repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
apache/beam | sdks/python/apache_beam/runners/__init__.py | 5 | 1352 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Runner objects execute a Pipeline.
This package defines runners, which are used to execute a pipeline.
"""
from apache_beam.runners.direct.direct_runner import DirectRunner
from apache_beam.runners.direct.test_direct_runner import TestDirectRunner
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import create_runner
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
from apache_beam.runners.dataflow.test_dataflow_runner import TestDataflowRunner
| apache-2.0 | -6,924,085,677,973,655,000 | 44.066667 | 80 | 0.802515 | false |
not-raspberry/pytest_reorder | tests/test_pytest_reorder.py | 1 | 8644 | """Pytest-reorder test suite."""
import re
import pytest
import subprocess
from mock import Mock
from pytest_reorder import (
default_reordering_hook, unpack_test_ordering, make_reordering_hook,
DEFAULT_ORDER
)
from pytest_reorder.reorder import (
EmptyTestsOrderList, UndefinedUnmatchedTestsOrder
)
@pytest.mark.parametrize('function', [
unpack_test_ordering,
make_reordering_hook
])
def test_bad_ordering(function):
"""Check what happens when a malformed list is passed to the function."""
with pytest.raises(EmptyTestsOrderList):
function([])
with pytest.raises(UndefinedUnmatchedTestsOrder):
function(['sth', 'sth_else', 'etc'])
@pytest.mark.parametrize('path_template, matches', [
('test_{}.py:test_aaa', True),
('tests/{}/test_something.py:test_aaa', True),
('tests/{}/test_something.py:test_aaa[parametrization-1]', True),
('a/b/c/d/e/f/tests/{}/test_something.py:test_kkk', True),
('something/test_{}.py:test_aaa', True),
('{}/test_sth.py:test_aaa', True),
('something/test_SOMETHING_HERE{}.py:test_aaa', False),
('some{}.py:test_aaa', False),
('test_something.py:test_aaa[test_{}.py]', False),
('test_something.py:test_aaa[{}]', False),
])
def test_default_order_regexes(path_template, matches):
"""Test path matching with default regexes."""
unit_match, integration_match, ui_match = (
re.compile(match) for match in DEFAULT_ORDER if match is not None)
unit_path, integration_path, ui_path = map(path_template.format, ['unit', 'integration', 'ui'])
assert bool(unit_match.match(unit_path)) is matches
assert unit_match.match(integration_path) is None
assert unit_match.match(ui_path) is None
assert integration_match.match(unit_path) is None
assert bool(integration_match.match(integration_path)) is matches
assert integration_match.match(ui_path) is None
assert ui_match.match(unit_path) is None
assert ui_match.match(integration_path) is None
assert bool(ui_match.match(ui_path)) is matches
@pytest.mark.parametrize('test_names, expected_test_order', [
([
'tests/test_sample.py',
'tests/integration/test_some_integration.py',
'tests/ui/test_some_ui.py',
'tests/unit/test_some_unit.py',
],
[
'tests/unit/test_some_unit.py',
'tests/test_sample.py',
'tests/integration/test_some_integration.py',
'tests/ui/test_some_ui.py',
]),
(['test_other.py', 'test_integration.py', 'test_ui.py', 'test_unit.py'],
['test_unit.py', 'test_other.py', 'test_integration.py', 'test_ui.py']),
# Tests deeply nested:
([
'users/tests/test_sample.py',
'users/tests/test_ui.py',
'users/tests/integration/test_some_integration.py',
'accounts/tests/ui/test_some_ui.py',
'stats/tests/unit/test_some_unit.py',
],
[
'stats/tests/unit/test_some_unit.py',
'users/tests/test_sample.py',
'users/tests/integration/test_some_integration.py',
'users/tests/test_ui.py',
'accounts/tests/ui/test_some_ui.py',
]),
# No common prefix:
(['other/test_sth.py', 'integration/test_sth.py', 'ui/test_sth.py', 'unit/test_sth.py'],
['unit/test_sth.py', 'other/test_sth.py', 'integration/test_sth.py', 'ui/test_sth.py']),
# No integration tests:
(['test_other.py', 'test_ui.py', 'test_unit.py'],
['test_unit.py', 'test_other.py', 'test_ui.py']),
# No other (name not matched) tests:
(['test_integration.py', 'test_ui.py', 'test_unit.py'],
['test_unit.py', 'test_integration.py', 'test_ui.py']),
# No ui tests:
(['test_other.py', 'test_integration.py', 'test_unit.py'],
['test_unit.py', 'test_other.py', 'test_integration.py']),
# No unit tests:
(['test_other.py', 'test_integration.py', 'test_ui.py'],
['test_other.py', 'test_integration.py', 'test_ui.py']),
# No tests at all:
([], []),
])
def test_reordering_default(test_names, expected_test_order):
"""Call library's ``default_reordering_hook`` and check resulting tests order."""
test_items = [Mock(nodeid=test_name) for test_name in test_names]
default_reordering_hook(None, None, test_items)
reordered_test_names = [item.nodeid for item in test_items]
assert reordered_test_names == expected_test_order
def test_reordering_custom_test_order():
"""Test reordering with a custom hook."""
tests_names = [
'test_suite/test_aaa.py',
'test_suite/test_bbb.py',
'test_suite/test_ccc.py',
'test_suite/test_fff.py',
]
test_items = [Mock(nodeid=test_name) for test_name in tests_names]
reorder_hook = make_reordering_hook(['.*/test_c', '.*/test_b', '.*/test_a', None])
reorder_hook(None, None, test_items)
reordered_test_names = [item.nodeid for item in test_items]
assert reordered_test_names == [
'test_suite/test_ccc.py',
'test_suite/test_bbb.py',
'test_suite/test_aaa.py',
'test_suite/test_fff.py',
]
@pytest.mark.parametrize('pytest_invocation, expected_test_order', [
(
['py.test', 'tests/sample_test_suites/flat/'],
[
b'tests/sample_test_suites/flat/unit/test_some_unit.py',
b'tests/sample_test_suites/flat/test_sample.py',
b'tests/sample_test_suites/flat/integration/test_some_integration.py',
b'tests/sample_test_suites/flat/ui/test_some_ui.py',
]
),
(
['py.test', 'tests/sample_test_suites/nested/'],
[
b'tests/sample_test_suites/nested/app_1/tests/unit/test_some_unit.py',
b'tests/sample_test_suites/nested/app_2/tests/test_unit.py',
b'tests/sample_test_suites/nested/app_2/tests/test_sth.py',
b'tests/sample_test_suites/nested/app_1/tests/integration/test_some_integration.py',
b'tests/sample_test_suites/nested/app_1/tests/ui/test_some_ui.py',
]
),
(
# No `--reorder` argument - no reordering.
['py.test', 'tests/sample_test_suites/flat_hook_not_imported/'],
[
b'tests/sample_test_suites/flat_hook_not_imported/test_sample.py',
b'tests/sample_test_suites/flat_hook_not_imported/integration/test_some_integration.py',
b'tests/sample_test_suites/flat_hook_not_imported/ui/test_some_ui.py',
b'tests/sample_test_suites/flat_hook_not_imported/unit/test_some_unit.py',
]
),
(
# `--reorder` with no extra args - default order.
['py.test', 'tests/sample_test_suites/flat_hook_not_imported/', '--reorder'],
[
b'tests/sample_test_suites/flat_hook_not_imported/unit/test_some_unit.py',
b'tests/sample_test_suites/flat_hook_not_imported/test_sample.py',
b'tests/sample_test_suites/flat_hook_not_imported/integration/test_some_integration.py',
b'tests/sample_test_suites/flat_hook_not_imported/ui/test_some_ui.py',
]
),
(
# `--reorder` with custom ordering.
[
'py.test', 'tests/sample_test_suites/flat_hook_not_imported/',
'--reorder', '*', '(test_|.*/)unit', '(test_|.*/)ui', '(test_|.*/)integration',
],
[
b'tests/sample_test_suites/flat_hook_not_imported/test_sample.py',
b'tests/sample_test_suites/flat_hook_not_imported/unit/test_some_unit.py',
b'tests/sample_test_suites/flat_hook_not_imported/ui/test_some_ui.py',
b'tests/sample_test_suites/flat_hook_not_imported/integration/test_some_integration.py',
]
),
])
def test_reordering_invoke_test_suite(pytest_invocation, expected_test_order):
"""Check the order of tests in sample test suites, invoked in a subprocess."""
output = subprocess.check_output(pytest_invocation)
lines_with_test_modules = [line for line in output.split(b'\n')
if line.startswith(b'tests/sample_test_suites/')]
test_modules = [line.split()[0] for line in lines_with_test_modules]
assert test_modules == expected_test_order
def test_commandline_reorder_option_no_unmatched_tests_order():
"""Test invoking pytest with the '--reorder' option and an invalid ordering list."""
with pytest.raises(subprocess.CalledProcessError) as bad_call:
subprocess.check_output([
'py.test', 'tests/sample_test_suites/flat_hook_not_imported/',
'--reorder', 'match_a', 'match_b', 'match_c',
])
assert (b'UndefinedUnmatchedTestsOrder: The list does not specify the order of unmatched tests.'
in bad_call.value.output)
| mit | -9,059,478,554,468,184,000 | 37.936937 | 100 | 0.632693 | false |
wkritzinger/asuswrt-merlin | release/src/router/samba36/lib/testtools/testtools/tests/test_run.py | 20 | 2429 | # Copyright (c) 2010 Testtools authors. See LICENSE for details.
"""Tests for the test runner logic."""
from testtools.helpers import try_import, try_imports
fixtures = try_import('fixtures')
StringIO = try_imports(['StringIO.StringIO', 'io.StringIO'])
import testtools
from testtools import TestCase, run
if fixtures:
class SampleTestFixture(fixtures.Fixture):
"""Creates testtools.runexample temporarily."""
def __init__(self):
self.package = fixtures.PythonPackage(
'runexample', [('__init__.py', """
from testtools import TestCase
class TestFoo(TestCase):
def test_bar(self):
pass
def test_quux(self):
pass
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
""")])
def setUp(self):
super(SampleTestFixture, self).setUp()
self.useFixture(self.package)
testtools.__path__.append(self.package.base)
self.addCleanup(testtools.__path__.remove, self.package.base)
class TestRun(TestCase):
def test_run_list(self):
if fixtures is None:
self.skipTest("Need fixtures")
package = self.useFixture(SampleTestFixture())
out = StringIO()
run.main(['prog', '-l', 'testtools.runexample.test_suite'], out)
self.assertEqual("""testtools.runexample.TestFoo.test_bar
testtools.runexample.TestFoo.test_quux
""", out.getvalue())
def test_run_load_list(self):
if fixtures is None:
self.skipTest("Need fixtures")
package = self.useFixture(SampleTestFixture())
out = StringIO()
# We load two tests - one that exists and one that doesn't, and we
# should get the one that exists and neither the one that doesn't nor
# the unmentioned one that does.
tempdir = self.useFixture(fixtures.TempDir())
tempname = tempdir.path + '/tests.list'
f = open(tempname, 'wb')
try:
f.write("""
testtools.runexample.TestFoo.test_bar
testtools.runexample.missingtest
""")
finally:
f.close()
run.main(['prog', '-l', '--load-list', tempname,
'testtools.runexample.test_suite'], out)
self.assertEqual("""testtools.runexample.TestFoo.test_bar
""", out.getvalue())
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
| gpl-2.0 | -5,781,118,297,902,162,000 | 30.960526 | 77 | 0.637711 | false |
openstack/senlin | senlin/engine/senlin_lock.py | 1 | 5991 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import random
import time
from oslo_config import cfg
from oslo_db import exception
from oslo_log import log as logging
from senlin.common.i18n import _
from senlin.common import utils
from senlin import objects
from senlin.objects import action as ao
from senlin.objects import cluster_lock as cl_obj
from senlin.objects import node_lock as nl_obj
CONF = cfg.CONF
CONF.import_opt('lock_retry_times', 'senlin.conf')
CONF.import_opt('lock_retry_interval', 'senlin.conf')
LOG = logging.getLogger(__name__)
LOCK_SCOPES = (
CLUSTER_SCOPE, NODE_SCOPE,
) = (
-1, 1,
)
def cluster_lock_acquire(context, cluster_id, action_id, engine=None,
scope=CLUSTER_SCOPE, forced=False):
"""Try to lock the specified cluster.
:param context: the context used for DB operations.
:param cluster_id: ID of the cluster to be locked.
:param action_id: ID of the action which wants to lock the cluster.
:param engine: ID of the engine which wants to lock the cluster.
:param scope: scope of lock, could be cluster wide lock, or node-wide
lock.
:param forced: set to True to cancel current action that owns the lock,
if any.
:returns: True if lock is acquired, or False otherwise.
"""
# Step 1: try lock the cluster - if the returned owner_id is the
# action id, it was a success
for retries in range(3):
try:
owners = cl_obj.ClusterLock.acquire(cluster_id, action_id, scope)
if action_id in owners:
return True
except exception.DBDuplicateEntry:
LOG.info('Duplicate entry in cluster_lock table for %(c)s. '
'Retrying cluster lock.',
{'c': cluster_id})
eventlet.sleep(random.randrange(1, 3))
# Step 2: Last resort is 'forced locking', only needed when retry failed
if forced:
owners = cl_obj.ClusterLock.steal(cluster_id, action_id)
return action_id in owners
# Step 3: check if the owner is a dead engine, if so, steal the lock.
# Will reach here only because scope == CLUSTER_SCOPE
action = ao.Action.get(context, owners[0])
if (action and action.owner and action.owner != engine and
utils.is_engine_dead(context, action.owner)):
LOG.info('The cluster %(c)s is locked by dead action %(a)s, '
'try to steal the lock.',
{'c': cluster_id, 'a': owners[0]})
dead_engine = action.owner
owners = cl_obj.ClusterLock.steal(cluster_id, action_id)
# Cleanse locks affected by the dead engine
objects.Service.gc_by_engine(dead_engine)
return action_id in owners
lock_owners = []
for o in owners:
lock_owners.append(o[:8])
LOG.warning('Cluster is already locked by action %(old)s, '
'action %(new)s failed grabbing the lock',
{'old': str(lock_owners), 'new': action_id[:8]})
return False
def cluster_lock_release(cluster_id, action_id, scope):
"""Release the lock on the specified cluster.
:param cluster_id: ID of the cluster to be released.
:param action_id: ID of the action that attempts to release the cluster.
:param scope: The scope of the lock to be released.
"""
return cl_obj.ClusterLock.release(cluster_id, action_id, scope)
def node_lock_acquire(context, node_id, action_id, engine=None,
forced=False):
"""Try to lock the specified node.
:param context: the context used for DB operations.
:param node_id: ID of the node to be locked.
:param action_id: ID of the action that attempts to lock the node.
:param engine: ID of the engine that attempts to lock the node.
:param forced: set to True to cancel current action that owns the lock,
if any.
:returns: True if lock is acquired, or False otherwise.
"""
# Step 1: try lock the node - if the returned owner_id is the
# action id, it was a success
owner = nl_obj.NodeLock.acquire(node_id, action_id)
if action_id == owner:
return True
# Step 2: Last resort is 'forced locking', only needed when retry failed
if forced:
owner = nl_obj.NodeLock.steal(node_id, action_id)
return action_id == owner
# Step 3: Try to steal a lock if it's owner is a dead engine.
# if this node lock by dead engine
action = ao.Action.get(context, owner)
if (action and action.owner and action.owner != engine and
utils.is_engine_dead(context, action.owner)):
LOG.info('The node %(n)s is locked by dead action %(a)s, '
'try to steal the lock.',
{'n': node_id, 'a': owner})
reason = _('Engine died when executing this action.')
nl_obj.NodeLock.steal(node_id, action_id)
ao.Action.mark_failed(context, action.id, time.time(), reason)
return True
LOG.warning('Node is already locked by action %(old)s, '
'action %(new)s failed grabbing the lock',
{'old': owner, 'new': action_id})
return False
def node_lock_release(node_id, action_id):
"""Release the lock on the specified node.
:param node_id: ID of the node to be released.
:param action_id: ID of the action that attempts to release the node.
"""
return nl_obj.NodeLock.release(node_id, action_id)
| apache-2.0 | 2,965,878,022,649,935,400 | 36.917722 | 77 | 0.646804 | false |
patricklaw/pants | src/python/pants/testutil/option_util.py | 4 | 2568 | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import Iterable, Mapping, Type, TypeVar, cast
from pants.engine.goal import GoalSubsystem
from pants.option.option_value_container import OptionValueContainer, OptionValueContainerBuilder
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.ranked_value import Rank, RankedValue, Value
from pants.option.subsystem import Subsystem
def create_options_bootstrapper(
args: Iterable[str] | None = None, *, env: Mapping[str, str] | None = None
) -> OptionsBootstrapper:
return OptionsBootstrapper.create(
args=("--pants-config-files=[]", *(args or [])),
env=env or {},
allow_pantsrc=False,
)
def create_option_value_container(
default_rank: Rank = Rank.NONE, **options: RankedValue | Value
) -> OptionValueContainer:
scoped_options = OptionValueContainerBuilder()
for key, value in options.items():
if not isinstance(value, RankedValue):
value = RankedValue(default_rank, value)
setattr(scoped_options, key, value)
return scoped_options.build()
_GS = TypeVar("_GS", bound=GoalSubsystem)
def create_goal_subsystem(
goal_subsystem_type: Type[_GS],
default_rank: Rank = Rank.NONE,
**options: RankedValue | Value,
) -> _GS:
"""Creates a new goal subsystem instance populated with the given option values.
:param goal_subsystem_type: The `GoalSubsystem` type to create.
:param default_rank: The rank to assign any raw option values passed.
:param options: The option values to populate the new goal subsystem instance with.
"""
return goal_subsystem_type(
scope=goal_subsystem_type.name,
options=create_option_value_container(default_rank, **options),
)
_SS = TypeVar("_SS", bound=Subsystem)
def create_subsystem(
subsystem_type: Type[_SS], default_rank: Rank = Rank.NONE, **options: RankedValue | Value
) -> _SS:
"""Creates a new subsystem instance populated with the given option values.
:param subsystem_type: The `Subsystem` type to create.
:param default_rank: The rank to assign any raw option values passed.
:param options: The option values to populate the new subsystem instance with.
"""
options_scope = cast(str, subsystem_type.options_scope)
return subsystem_type(
scope=options_scope,
options=create_option_value_container(default_rank, **options),
)
| apache-2.0 | -8,025,968,511,532,255,000 | 34.666667 | 97 | 0.71067 | false |
osu-cass/whats-fresh-api | whats_fresh/whats_fresh_api/tests/models/test_vendor_product_join_model.py | 2 | 1986 | from django.test import TestCase
from whats_fresh.whats_fresh_api.models import (VendorProduct,
ProductPreparation, Product,
Preparation, Vendor)
from django.contrib.gis.db import models
class VendorProductJoinTestCase(TestCase):
def setUp(self):
self.expected_fields = {
'vendor': models.ForeignKey,
'vendor_id': models.ForeignKey,
'product_preparation': models.ForeignKey,
'product_preparation_id': models.ForeignKey,
'vendor_price': models.TextField,
'available': models.NullBooleanField,
'id': models.AutoField
}
self.optional_fields = {
'vendor_price',
'available'
}
def test_fields_exist(self):
model = models.get_model('whats_fresh_api', 'VendorProduct')
for field, field_type in self.expected_fields.items():
self.assertEqual(
field_type, type(model._meta.get_field_by_name(field)[0]))
def test_no_additional_fields(self):
fields = VendorProduct._meta.get_all_field_names()
self.assertEqual(sorted(fields), sorted(self.expected_fields.keys()))
def test___unicode___method(self):
try:
VendorProduct.__unicode__(
VendorProduct(
vendor=Vendor(name='test'),
product_preparation=ProductPreparation(
product=Product(name='test'),
preparation=Preparation(name='test')
)
))
except AttributeError:
self.fail("No __unicode__ method found")
def test_optional_fields(self):
models.get_model('whats_fresh_api', 'VendorProduct')
for field in self.optional_fields:
self.assertEqual(
VendorProduct._meta.get_field_by_name(field)[0].blank, True)
| apache-2.0 | -5,035,918,078,256,444,000 | 35.777778 | 77 | 0.559416 | false |
hxue920/9001 | server/project/apps/core/migrations/0001_initial.py | 5 | 1956 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-01-16 06:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Favorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=250)),
('track_id', models.CharField(max_length=50)),
],
options={
'verbose_name': 'Favorite',
'verbose_name_plural': 'Favorites',
},
),
migrations.CreateModel(
name='Playlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('playlist_name', models.CharField(max_length=250)),
('user_id', models.CharField(max_length=250)),
('track_id', models.CharField(max_length=50)),
('title', models.CharField(max_length=250)),
('artist', models.CharField(max_length=250)),
('img_url', models.CharField(max_length=250)),
('stream_url', models.CharField(max_length=250)),
('duration', models.IntegerField()),
('platform', models.CharField(max_length=50)),
],
options={
'verbose_name': 'Playlist',
'verbose_name_plural': 'Playlists',
},
),
migrations.AlterUniqueTogether(
name='playlist',
unique_together=set([('playlist_name', 'user_id')]),
),
migrations.AlterUniqueTogether(
name='favorite',
unique_together=set([('user_id', 'track_id')]),
),
]
| mit | -5,541,745,379,833,490,000 | 34.563636 | 114 | 0.519427 | false |
theo-l/django | tests/defer/tests.py | 33 | 11474 | from django.core.exceptions import FieldError
from django.test import TestCase
from .models import (
BigChild, Child, ChildProxy, Primary, RefreshPrimaryProxy, Secondary,
)
class AssertionMixin:
def assert_delayed(self, obj, num):
"""
Instances with deferred fields look the same as normal instances when
we examine attribute values. Therefore, this method returns the number
of deferred fields on returned instances.
"""
count = len(obj.get_deferred_fields())
self.assertEqual(count, num)
class DeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
def test_defer(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.defer("related__first")[0], 0)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
def test_only(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("related__first")[0], 2)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
# You can use 'pk' with reverse foreign key lookups.
# The related_id is always set even if it's not fetched from the DB,
# so pk and related_id are not deferred.
self.assert_delayed(self.s1.primary_set.all().only('pk')[0], 2)
def test_defer_only_chaining(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
def test_defer_on_an_already_deferred_field(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("name")[0], 1)
def test_defer_none_to_clear_deferred_set(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name", "value")[0], 2)
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
def test_only_none_raises_error(self):
msg = 'Cannot pass None as an argument to only().'
with self.assertRaisesMessage(TypeError, msg):
Primary.objects.only(None)
def test_defer_extra(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
def test_defer_values_does_not_defer(self):
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(Primary.objects.defer("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_only_values_does_not_defer(self):
self.assertEqual(Primary.objects.only("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_get(self):
# Using defer() and only() with get() is also valid.
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
def test_defer_with_select_related(self):
obj = Primary.objects.select_related().defer("related__first", "related__second")[0]
self.assert_delayed(obj.related, 2)
self.assert_delayed(obj, 0)
def test_only_with_select_related(self):
obj = Primary.objects.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assert_delayed(obj.related, 1)
self.assertEqual(obj.related_id, self.s1.pk)
self.assertEqual(obj.name, "p1")
def test_defer_select_related_raises_invalid_query(self):
msg = (
'Field Primary.related cannot be both deferred and traversed '
'using select_related at the same time.'
)
with self.assertRaisesMessage(FieldError, msg):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
msg = (
'Field Primary.related cannot be both deferred and traversed using '
'select_related at the same time.'
)
with self.assertRaisesMessage(FieldError, msg):
Primary.objects.only("name").select_related("related")[0]
def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
# select_related() overrides defer().
with self.assertNumQueries(1):
obj = Primary.objects.defer("related").select_related()[0]
self.assert_delayed(obj, 1)
self.assertEqual(obj.related.id, self.s1.pk)
def test_saving_object_with_deferred_field(self):
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
Primary.objects.create(name="p2", value="xy", related=self.s1)
obj = Primary.objects.defer("value").get(name="p2")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
"p1", "a new name",
],
lambda p: p.name,
ordered=False,
)
def test_defer_baseclass_when_subclass_has_no_added_fields(self):
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=self.s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_only_baseclass_when_subclass_has_no_added_fields(self):
# You can retrieve a single column on a base class with no fields
Child.objects.create(name="c1", value="foo", related=self.s1)
obj = Child.objects.only("name").get(name="c1")
# on an inherited model, its PK is also fetched, hence '3' deferred fields.
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
class BigChildDeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
def test_defer_baseclass_when_subclass_has_added_field(self):
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass(self):
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass_both(self):
# Deferring fields from both superclass and subclass works.
obj = BigChild.objects.defer("other", "value").get(name="b1")
self.assert_delayed(obj, 2)
def test_only_baseclass_when_subclass_has_added_field(self):
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b1")
# when inherited model, its PK is also fetched, hence '4' deferred fields.
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_subclass(self):
# You can retrieve a single field on a subclass
obj = BigChild.objects.only("other").get(name="b1")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
class TestDefer2(AssertionMixin, TestCase):
def test_defer_proxy(self):
"""
Ensure select_related together with only on a proxy model behaves
as expected. See #17876.
"""
related = Secondary.objects.create(first='x1', second='x2')
ChildProxy.objects.create(name='p1', value='xx', related=related)
children = ChildProxy.objects.all().select_related().only('id', 'name')
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 2)
self.assertEqual(child.name, 'p1')
self.assertEqual(child.value, 'xx')
def test_defer_inheritance_pk_chaining(self):
"""
When an inherited model is fetched from the DB, its PK is also fetched.
When getting the PK of the parent model it is useful to use the already
fetched parent model PK if it happens to be available.
"""
s1 = Secondary.objects.create(first="x1", second="y1")
bc = BigChild.objects.create(name='b1', value='foo', related=s1, other='bar')
bc_deferred = BigChild.objects.only('name').get(pk=bc.pk)
with self.assertNumQueries(0):
bc_deferred.id
self.assertEqual(bc_deferred.pk, bc_deferred.id)
def test_eq(self):
s1 = Secondary.objects.create(first="x1", second="y1")
s1_defer = Secondary.objects.only('pk').get(pk=s1.pk)
self.assertEqual(s1, s1_defer)
self.assertEqual(s1_defer, s1)
def test_refresh_not_loading_deferred_fields(self):
s = Secondary.objects.create()
rf = Primary.objects.create(name='foo', value='bar', related=s)
rf2 = Primary.objects.only('related', 'value').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
rf2.refresh_from_db()
self.assertEqual(rf2.value, 'new bar')
with self.assertNumQueries(1):
self.assertEqual(rf2.name, 'new foo')
def test_custom_refresh_on_deferred_loading(self):
s = Secondary.objects.create()
rf = RefreshPrimaryProxy.objects.create(name='foo', value='bar', related=s)
rf2 = RefreshPrimaryProxy.objects.only('related').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
# Customized refresh_from_db() reloads all deferred fields on
# access of any of them.
self.assertEqual(rf2.name, 'new foo')
self.assertEqual(rf2.value, 'new bar')
| bsd-3-clause | 8,002,776,743,093,707,000 | 41.183824 | 92 | 0.617396 | false |
wisechengyi/pants | src/python/pants/backend/jvm/tasks/coverage/cobertura.py | 2 | 15059 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import functools
import os
import shutil
from collections import defaultdict
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.coverage.engine import CoverageEngine
from pants.base.exceptions import TaskError
from pants.build_graph.build_graph import BuildGraph
from pants.build_graph.target_scopes import Scopes
from pants.java.jar.jar_dependency import JarDependency
from pants.java.util import safe_classpath
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_file
from pants.util.dirutil import relativize_paths, safe_mkdir, safe_mkdir_for, safe_walk, touch
from pants.util.ordered_set import OrderedSet
class Cobertura(CoverageEngine):
"""Subsystem for getting code coverage with cobertura."""
class Factory(Subsystem):
options_scope = "cobertura"
@classmethod
def create(cls, settings, targets, execute_java_for_targets):
"""
:param settings: Generic code coverage settings.
:type settings: :class:`CodeCoverageSettings`
:param list targets: A list of targets to instrument and record code coverage for.
:param execute_java_for_targets: A function that accepts a list of targets whose JVM platform
constraints are used to pick a JVM `Distribution`. The
function should also accept `*args` and `**kwargs` compatible
with the remaining parameters accepted by
`pants.java.util.execute_java`.
"""
return Cobertura(settings, targets, execute_java_for_targets)
# TODO(jtrobec): deprecate these options and move them to subsystem scope
@staticmethod
def register_junit_options(register, register_jvm_tool):
register(
"--coverage-cobertura-include-classes",
advanced=True,
type=list,
fingerprint=True,
help="Regex patterns passed to cobertura specifying which classes should be "
'instrumented. (see the "includeclasses" element description here: '
"https://github.com/cobertura/cobertura/wiki/Ant-Task-Reference)",
)
register(
"--coverage-cobertura-exclude-classes",
advanced=True,
type=list,
fingerprint=True,
help="Regex patterns passed to cobertura specifying which classes should NOT be "
'instrumented. (see the "excludeclasses" element description here: '
"https://github.com/cobertura/cobertura/wiki/Ant-Task-Reference",
)
register(
"--coverage-cobertura-include-user-classpath",
type=bool,
fingerprint=True,
default=True,
help="Use the user classpath to aid in instrumenting classes",
)
def slf4j_jar(name):
return JarDependency(org="org.slf4j", name=name, rev="1.7.5")
def cobertura_jar(**kwargs):
return JarDependency(
org="net.sourceforge.cobertura", name="cobertura", rev="2.1.1", **kwargs
)
# The Cobertura jar needs all its dependencies when instrumenting code.
register_jvm_tool(
register, "cobertura-instrument", classpath=[cobertura_jar(), slf4j_jar("slf4j-simple")]
)
# Instrumented code needs cobertura.jar in the classpath to run, but not most of the
# dependencies; inject the SLF4J API so that Cobertura doesn't crash when it attempts to log
register_jvm_tool(
register,
"cobertura-run",
classpath=[cobertura_jar(intransitive=True), slf4j_jar("slf4j-api")],
)
register_jvm_tool(register, "cobertura-merge", classpath=[cobertura_jar()])
register_jvm_tool(register, "cobertura-report", classpath=[cobertura_jar()])
_DATAFILE_NAME = "cobertura.ser"
def __init__(self, settings, targets, execute_java_for_targets):
"""
:param settings: Generic code coverage settings.
:type settings: :class:`CodeCoverageSettings`
:param list targets: A list of targets to instrument and record code coverage for.
:param execute_java_for_targets: A function that accepts a list of targets whose JVM platform
constraints are used to pick a JVM `Distribution`. The function
should also accept `*args` and `**kwargs` compatible with the
remaining parameters accepted by
`pants.java.util.execute_java`.
"""
self._settings = settings
options = settings.options
self._context = settings.context
self._coverage_force = options.coverage_force
self._canonical_datafile = None
self._rootdirs = defaultdict(OrderedSet)
self._include_classes = options.coverage_cobertura_include_classes
self._exclude_classes = options.coverage_cobertura_exclude_classes
self._include_user_classpath = options.coverage_cobertura_include_user_classpath
self._targets = targets
self._execute_java = functools.partial(execute_java_for_targets, targets)
@staticmethod
def initialize_instrument_classpath(output_dir, settings, targets, instrumentation_classpath):
"""Clones the existing runtime_classpath and corresponding binaries to instrumentation
specific paths.
:param targets: the targets for which we should create an instrumentation_classpath entry based
on their runtime_classpath entry.
"""
instrument_dir = os.path.join(output_dir, "coverage", "classes")
settings.safe_makedir(instrument_dir, clean=True)
for target in targets:
if not Cobertura.is_coverage_target(target):
continue
# Do not instrument transitive dependencies.
paths = instrumentation_classpath.get_for_target(target)
target_instrumentation_path = os.path.join(instrument_dir, target.id)
for (index, (config, path)) in enumerate(paths):
# There are two sorts of classpath entries we see in the compile classpath: jars and dirs.
# The branches below handle the cloning of those respectively.
entry_instrumentation_path = os.path.join(target_instrumentation_path, str(index))
if settings.is_file(path):
settings.safe_makedir(entry_instrumentation_path, clean=True)
settings.copy2(path, entry_instrumentation_path)
new_path = os.path.join(entry_instrumentation_path, os.path.basename(path))
else:
settings.copytree(path, entry_instrumentation_path)
new_path = entry_instrumentation_path
instrumentation_classpath.remove_for_target(target, [(config, path)])
instrumentation_classpath.add_for_target(target, [(config, new_path)])
settings.log.debug(
f"runtime_classpath ({path}) cloned to instrument_classpath ({new_path})"
)
def _iter_datafiles(self, output_dir):
for root, _, files in safe_walk(output_dir):
for f in files:
if f == self._DATAFILE_NAME:
yield os.path.join(root, f)
break
@property
def canonical_datafile(self):
if self._canonical_datafile is None:
raise AssertionError(
"Expected a canonical datafile to have been created via a call to" "`instrument`."
)
elif not os.path.exists(self._canonical_datafile):
raise AssertionError(
"The canonical datafile at {} has unexpectedly been deleted.".format(
self._canonical_datafile
)
)
return self._canonical_datafile
def instrument(self, output_dir):
for datafile in self._iter_datafiles(output_dir):
os.unlink(datafile)
self._canonical_datafile = os.path.join(output_dir, f"{self._DATAFILE_NAME}.canonical")
# It's conceivable we'll be executing a test that has no source file dependencies; ie: we'll
# never generate a canonical coverage datafile below. Create an empty one here to allow the
# test run to proceed normally.
touch(self._canonical_datafile)
# Setup an instrumentation classpath based on the existing runtime classpath.
runtime_classpath = self._context.products.get_data("runtime_classpath")
instrumentation_classpath = self._context.products.safe_create_data(
"instrument_classpath", runtime_classpath.copy
)
self.initialize_instrument_classpath(
output_dir, self._settings, self._targets, instrumentation_classpath
)
cobertura_cp = self._settings.tool_classpath("cobertura-instrument")
files_to_instrument = []
for target in self._targets:
if Cobertura.is_coverage_target(target):
paths = instrumentation_classpath.get_for_target(target)
for (name, path) in paths:
files_to_instrument.append(path)
if len(files_to_instrument) > 0:
unique_files = list(set(files_to_instrument))
relativize_paths(unique_files, self._settings.workdir)
args = [
"--basedir",
self._settings.workdir,
"--datafile",
self._canonical_datafile,
]
if self._include_user_classpath:
closure = BuildGraph.closure(
self._targets,
bfs=True,
include_scopes=Scopes.JVM_TEST_SCOPES,
respect_intransitive=True,
)
aux_classpath = safe_classpath(
ClasspathUtil.classpath(closure, runtime_classpath), synthetic_jar_dir=None
)
args.append("--auxClasspath")
args.extend(aux_classpath)
# apply class incl/excl filters
if len(self._include_classes) > 0:
for pattern in self._include_classes:
args += ["--includeClasses", pattern]
else:
args += ["--includeClasses", ".*"] # default to instrumenting all classes
for pattern in self._exclude_classes:
args += ["--excludeClasses", pattern]
with temporary_file(binary_mode=False) as tmp_file:
tmp_file.write("\n".join(unique_files))
tmp_file.flush()
args += ["--listOfFilesToInstrument", tmp_file.name]
main = "net.sourceforge.cobertura.instrument.InstrumentMain"
self._settings.log.debug(
f"executing cobertura instrumentation with the following args: {args}"
)
result = self._execute_java(
classpath=cobertura_cp,
main=main,
jvm_options=self._settings.coverage_jvm_options,
args=args,
workunit_factory=self._context.new_workunit,
workunit_name="cobertura-instrument",
)
if result != 0:
raise TaskError(
"java {0} ... exited non-zero ({1})"
" 'failed to instrument'".format(main, result)
)
def run_modifications(self, output_dir):
datafile = os.path.join(output_dir, self._DATAFILE_NAME)
safe_mkdir_for(datafile)
shutil.copy(self.canonical_datafile, datafile)
datafile_option = f"-Dnet.sourceforge.cobertura.datafile={datafile}"
return self.RunModifications(
classpath_prepend=self._settings.tool_classpath("cobertura-run"),
extra_jvm_options=[datafile_option],
)
def should_report(self, execution_failed_exception=None):
if execution_failed_exception:
self._settings.log.warn(f"Test failed: {execution_failed_exception}")
if self._settings.coverage_force:
self._settings.log.warn("Generating report even though tests failed.")
return True
else:
return False
return True
def _execute_cobertura(self, workunit_name, tool_classpath, main, args):
cobertura_cp = self._settings.tool_classpath(tool_classpath)
result = self._execute_java(
classpath=cobertura_cp,
main=main,
jvm_options=self._settings.coverage_jvm_options,
args=args,
workunit_factory=self._context.new_workunit,
workunit_name=workunit_name,
)
if result != 0:
raise TaskError(
"java {} ... exited non-zero ({}) - failed to {}".format(
main, result, workunit_name
)
)
def report(self, output_dir, execution_failed_exception=None):
if self.should_report(execution_failed_exception):
datafiles = list(self._iter_datafiles(output_dir))
if len(datafiles) == 1:
datafile = datafiles[0]
else:
datafile = os.path.join(output_dir, f"{self._DATAFILE_NAME}.merged")
self._execute_cobertura(
workunit_name="cobertura-merge",
tool_classpath="cobertura-merge",
main="net.sourceforge.cobertura.merge.MergeMain",
args=["--datafile", datafile] + datafiles,
)
base_report_dir = os.path.join(output_dir, "coverage", "reports")
safe_mkdir(base_report_dir, clean=True)
source_roots = {t.target_base for t in self._targets if Cobertura.is_coverage_target(t)}
base_args = list(source_roots) + ["--datafile", datafile]
for report_format in ("xml", "html"):
report_dir = os.path.join(base_report_dir, report_format)
safe_mkdir(report_dir, clean=True)
self._execute_cobertura(
workunit_name=f"cobertura-report-{report_format}",
tool_classpath="cobertura-report",
main="net.sourceforge.cobertura.reporting.ReportMain",
args=base_args + ["--destination", report_dir, "--format", report_format],
)
if self._settings.coverage_open:
return os.path.join(base_report_dir, "html", "index.html")
| apache-2.0 | -9,101,340,946,120,089,000 | 44.772036 | 106 | 0.593134 | false |
eddturtle/WeatherPi | web-api.py | 1 | 1517 | #!/usr/bin/python
import time
import datetime as dt
import sqlite3 as lite
from flask import Flask, jsonify, render_template
app = Flask(__name__)
def connectDB():
connection = lite.connect('weather.db')
connection.row_factory = dictFactory
return connection
def dictFactory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def getData(begin, end):
connection = connectDB()
with connection:
cursor = connection.cursor()
query = 'SELECT temp_instance, temp_date \
FROM temp \
WHERE temp_date > ? \
AND temp_date < ? \
ORDER BY temp_date DESC'
cursor.execute(query, (begin, end))
data = cursor.fetchall()
return data
def getStats():
connection = connectDB()
with connection:
cursor = connection.cursor()
query = 'SELECT AVG(temp_instance) as temp_average, \
MAX(temp_instance) as temp_max, \
MIN(temp_instance) as temp_min, \
COUNT(temp_instance) as temp_count \
FROM temp'
cursor.execute(query,)
data = cursor.fetchall()
return data
@app.route('/api/v1/<int:begin>/<int:end>', methods = ['GET'])
def getTemperatures(begin, end):
if end == 0:
end = time.time()
begin = dt.datetime.fromtimestamp(begin)
end = dt.datetime.fromtimestamp(end)
return jsonify({ "core-stats": getStats(), "instances": getData(begin, end) })
@app.route('/')
def home():
return render_template('index.htm')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False, port=80)
| mit | -3,343,943,770,565,058,600 | 23.467742 | 79 | 0.661173 | false |
kenmcc/mypywws | src/pywws/Logger.py | 2 | 1838 | #!/usr/bin/env python
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-14 Jim Easterbrook [email protected]
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Common code for logging info and errors.
"""
from __future__ import absolute_import
import logging
import logging.handlers
import sys
from . import __version__
def ApplicationLogger(verbose, logfile=None):
logger = logging.getLogger('')
if logfile:
logger.setLevel(max(logging.ERROR - (verbose * 10), 1))
handler = logging.handlers.RotatingFileHandler(
logfile, maxBytes=128*1024, backupCount=3)
datefmt = '%Y-%m-%d %H:%M:%S'
else:
logger.setLevel(max(logging.WARNING - (verbose * 10), 1))
handler = logging.StreamHandler()
datefmt = '%H:%M:%S'
handler.setFormatter(
logging.Formatter('%(asctime)s:%(name)s:%(message)s', datefmt))
logger.addHandler(handler)
pywws_logger = logging.getLogger('pywws.Logger')
pywws_logger.warning('pywws version %s', __version__)
pywws_logger.info('Python version %s', sys.version)
return logger
| gpl-2.0 | -4,124,203,621,850,422,000 | 35.76 | 81 | 0.708923 | false |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/sqlalchemy/sql/dml.py | 32 | 33439 | # sql/dml.py
# Copyright (C) 2009-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provide :class:`.Insert`, :class:`.Update` and :class:`.Delete`.
"""
from .base import Executable, _generative, _from_objects, DialectKWArgs, \
ColumnCollection
from .elements import ClauseElement, _literal_as_text, Null, and_, _clone, \
_column_as_key
from .selectable import _interpret_as_from, _interpret_as_select, \
HasPrefixes, HasCTE
from .. import util
from .. import exc
class UpdateBase(
HasCTE, DialectKWArgs, HasPrefixes, Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.
"""
__visit_name__ = 'update_base'
_execution_options = \
Executable._execution_options.union({'autocommit': True})
_hints = util.immutabledict()
_parameter_ordering = None
_prefixes = ()
named_with_column = False
def _process_colparams(self, parameters):
def process_single(p):
if isinstance(p, (list, tuple)):
return dict(
(c.key, pval)
for c, pval in zip(self.table.c, p)
)
else:
return p
if self._preserve_parameter_order and parameters is not None:
if not isinstance(parameters, list) or \
(parameters and not isinstance(parameters[0], tuple)):
raise ValueError(
"When preserve_parameter_order is True, "
"values() only accepts a list of 2-tuples")
self._parameter_ordering = [key for key, value in parameters]
return dict(parameters), False
if (isinstance(parameters, (list, tuple)) and parameters and
isinstance(parameters[0], (list, tuple, dict))):
if not self._supports_multi_parameters:
raise exc.InvalidRequestError(
"This construct does not support "
"multiple parameter sets.")
return [process_single(p) for p in parameters], True
else:
return process_single(parameters), False
def params(self, *arg, **kw):
"""Set the parameters for the statement.
This method raises ``NotImplementedError`` on the base class,
and is overridden by :class:`.ValuesBase` to provide the
SET/VALUES clause of UPDATE and INSERT.
"""
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
"""Return a 'bind' linked to this :class:`.UpdateBase`
or a :class:`.Table` associated with it.
"""
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
@_generative
def returning(self, *cols):
r"""Add a :term:`RETURNING` or equivalent clause to this statement.
e.g.::
stmt = table.update().\
where(table.c.data == 'value').\
values(status='X').\
returning(table.c.server_flag,
table.c.updated_timestamp)
for server_flag, updated_timestamp in connection.execute(stmt):
print(server_flag, updated_timestamp)
The given collection of column expressions should be derived from
the table that is
the target of the INSERT, UPDATE, or DELETE. While :class:`.Column`
objects are typical, the elements can also be expressions::
stmt = table.insert().returning(
(table.c.first_name + " " + table.c.last_name).
label('fullname'))
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned are made
available via the result set and can be iterated using
:meth:`.ResultProxy.fetchone` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle), SQLAlchemy will
approximate this behavior at the result level so that a reasonable
amount of behavioral neutrality is provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
.. seealso::
:meth:`.ValuesBase.return_defaults` - an alternative method tailored
towards efficient fetching of server-side defaults and triggers
for single-row INSERTs or UPDATEs.
"""
self._returning = cols
@_generative
def with_hint(self, text, selectable=None, dialect_name="*"):
"""Add a table hint for a single table to this
INSERT/UPDATE/DELETE statement.
.. note::
:meth:`.UpdateBase.with_hint` currently applies only to
Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use
:meth:`.UpdateBase.prefix_with`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the :class:`.Table` that is the subject of this
statement, or optionally to that of the given
:class:`.Table` passed as the ``selectable`` argument.
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add a hint
that only takes effect for SQL Server::
mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")
.. versionadded:: 0.7.6
:param text: Text of the hint.
:param selectable: optional :class:`.Table` that specifies
an element of the FROM clause within an UPDATE or DELETE
to be the subject of the hint - applies only to certain backends.
:param dialect_name: defaults to ``*``, if specified as the name
of a particular dialect, will apply these hints only when
that dialect is in use.
"""
if selectable is None:
selectable = self.table
self._hints = self._hints.union(
{(selectable, dialect_name): text})
class ValuesBase(UpdateBase):
"""Supplies support for :meth:`.ValuesBase.values` to
INSERT and UPDATE constructs."""
__visit_name__ = 'values_base'
_supports_multi_parameters = False
_has_multi_parameters = False
_preserve_parameter_order = False
select = None
_post_values_clause = None
def __init__(self, table, values, prefixes):
self.table = _interpret_as_from(table)
self.parameters, self._has_multi_parameters = \
self._process_colparams(values)
if prefixes:
self._setup_prefixes(prefixes)
@_generative
def values(self, *args, **kwargs):
r"""specify a fixed VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
Note that the :class:`.Insert` and :class:`.Update` constructs support
per-execution time formatting of the VALUES and/or SET clauses,
based on the arguments passed to :meth:`.Connection.execute`.
However, the :meth:`.ValuesBase.values` method can be used to "fix" a
particular set of parameters into the statement.
Multiple calls to :meth:`.ValuesBase.values` will produce a new
construct, each one with the parameter list modified to include
the new parameters sent. In the typical case of a single
dictionary of parameters, the newly passed keys will replace
the same keys in the previous construct. In the case of a list-based
"multiple values" construct, each new list of values is extended
onto the existing list of values.
:param \**kwargs: key value pairs representing the string key
of a :class:`.Column` mapped to the value to be rendered into the
VALUES or SET clause::
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
:param \*args: As an alternative to passing key/value parameters,
a dictionary, tuple, or list of dictionaries or tuples can be passed
as a single positional argument in order to form the VALUES or
SET clause of the statement. The forms that are accepted vary
based on whether this is an :class:`.Insert` or an :class:`.Update`
construct.
For either an :class:`.Insert` or :class:`.Update` construct, a
single dictionary can be passed, which works the same as that of
the kwargs form::
users.insert().values({"name": "some name"})
users.update().values({"name": "some new name"})
Also for either form but more typically for the :class:`.Insert`
construct, a tuple that contains an entry for every column in the
table is also accepted::
users.insert().values((5, "some name"))
The :class:`.Insert` construct also supports being passed a list
of dictionaries or full-table-tuples, which on the server will
render the less common SQL syntax of "multiple values" - this
syntax is supported on backends such as SQLite, PostgreSQL, MySQL,
but not necessarily others::
users.insert().values([
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
])
The above form would render a multiple VALUES statement similar to::
INSERT INTO users (name) VALUES
(:name_1),
(:name_2),
(:name_3)
It is essential to note that **passing multiple values is
NOT the same as using traditional executemany() form**. The above
syntax is a **special** syntax not typically used. To emit an
INSERT statement against multiple rows, the normal method is
to pass a multiple values list to the :meth:`.Connection.execute`
method, which is supported by all database backends and is generally
more efficient for a very large number of parameters.
.. seealso::
:ref:`execute_multiple` - an introduction to
the traditional Core method of multiple parameter set
invocation for INSERTs and other statements.
.. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES
clause, even a list of length one,
implies that the :paramref:`.Insert.inline` flag is set to
True, indicating that the statement will not attempt to fetch
the "last inserted primary key" or other defaults. The
statement deals with an arbitrary number of rows, so the
:attr:`.ResultProxy.inserted_primary_key` accessor does not
apply.
.. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports
columns with Python side default values and callables in the
same way as that of an "executemany" style of invocation; the
callable is invoked for each row. See :ref:`bug_3288`
for other details.
The :class:`.Update` construct supports a special form which is a
list of 2-tuples, which when provided must be passed in conjunction
with the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
parameter.
This form causes the UPDATE statement to render the SET clauses
using the order of parameters given to :meth:`.Update.values`, rather
than the ordering of columns given in the :class:`.Table`.
.. versionadded:: 1.0.10 - added support for parameter-ordered
UPDATE statements via the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag.
.. seealso::
:ref:`updates_order_parameters` - full example of the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
flag
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
:func:`~.expression.insert` - produce an ``INSERT`` statement
:func:`~.expression.update` - produce an ``UPDATE`` statement
"""
if self.select is not None:
raise exc.InvalidRequestError(
"This construct already inserts from a SELECT")
if self._has_multi_parameters and kwargs:
raise exc.InvalidRequestError(
"This construct already has multiple parameter sets.")
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.")
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters, self._has_multi_parameters = \
self._process_colparams(v)
else:
if self._has_multi_parameters:
self.parameters = list(self.parameters)
p, self._has_multi_parameters = self._process_colparams(v)
if not self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.extend(p)
else:
self.parameters = self.parameters.copy()
p, self._has_multi_parameters = self._process_colparams(v)
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.update(p)
if kwargs:
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't pass kwargs and multiple parameter sets "
"simultaneously")
else:
self.parameters.update(kwargs)
@_generative
def return_defaults(self, *cols):
"""Make use of a :term:`RETURNING` clause for the purpose
of fetching server-side expressions and defaults.
E.g.::
stmt = table.insert().values(data='newdata').return_defaults()
result = connection.execute(stmt)
server_created_at = result.returned_defaults['created_at']
When used against a backend that supports RETURNING, all column
values generated by SQL expression or server-side-default will be
added to any existing RETURNING clause, provided that
:meth:`.UpdateBase.returning` is not used simultaneously. The column
values will then be available on the result using the
:attr:`.ResultProxy.returned_defaults` accessor as a dictionary,
referring to values keyed to the :class:`.Column` object as well as
its ``.key``.
This method differs from :meth:`.UpdateBase.returning` in these ways:
1. :meth:`.ValuesBase.return_defaults` is only intended for use with
an INSERT or an UPDATE statement that matches exactly one row.
While the RETURNING construct in the general sense supports
multiple rows for a multi-row UPDATE or DELETE statement, or for
special cases of INSERT that return multiple rows (e.g. INSERT from
SELECT, multi-valued VALUES clause),
:meth:`.ValuesBase.return_defaults` is intended only for an
"ORM-style" single-row INSERT/UPDATE statement. The row returned
by the statement is also consumed implicitly when
:meth:`.ValuesBase.return_defaults` is used. By contrast,
:meth:`.UpdateBase.returning` leaves the RETURNING result-set
intact with a collection of any number of rows.
2. It is compatible with the existing logic to fetch auto-generated
primary key values, also known as "implicit returning". Backends
that support RETURNING will automatically make use of RETURNING in
order to fetch the value of newly generated primary keys; while the
:meth:`.UpdateBase.returning` method circumvents this behavior,
:meth:`.ValuesBase.return_defaults` leaves it intact.
3. It can be called against any backend. Backends that don't support
RETURNING will skip the usage of the feature, rather than raising
an exception. The return value of
:attr:`.ResultProxy.returned_defaults` will be ``None``
:meth:`.ValuesBase.return_defaults` is used by the ORM to provide
an efficient implementation for the ``eager_defaults`` feature of
:func:`.mapper`.
:param cols: optional list of column key names or :class:`.Column`
objects. If omitted, all column expressions evaluated on the server
are added to the returning list.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.UpdateBase.returning`
:attr:`.ResultProxy.returned_defaults`
"""
self._return_defaults = cols or True
class Insert(ValuesBase):
"""Represent an INSERT construct.
The :class:`.Insert` object is created using the
:func:`~.expression.insert()` function.
.. seealso::
:ref:`coretutorial_insert_expressions`
"""
__visit_name__ = 'insert'
_supports_multi_parameters = True
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw):
"""Construct an :class:`.Insert` object.
Similar functionality is available via the
:meth:`~.TableClause.insert` method on
:class:`~.schema.Table`.
:param table: :class:`.TableClause` which is the subject of the
insert.
:param values: collection of values to be inserted; see
:meth:`.Insert.values` for a description of allowed formats here.
Can be omitted entirely; a :class:`.Insert` construct will also
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`.Connection.execute`.
:param inline: if True, no attempt will be made to retrieve the
SQL-generated default values to be provided within the statement;
in particular,
this allows SQL expressions to be rendered 'inline' within the
statement without the need to pre-execute them beforehand; for
backends that support "returning", this turns off the "implicit
returning" feature for the statement.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either
:class:`~sqlalchemy.schema.Column` objects or their string
identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`coretutorial_insert_expressions` - SQL Expression Tutorial
:ref:`inserts_and_updates` - SQL Expression Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self.select = self.select_names = None
self.include_insert_from_select_defaults = False
self.inline = inline
self._returning = returning
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
@_generative
def from_select(self, names, select, include_defaults=True):
"""Return a new :class:`.Insert` construct which represents
an ``INSERT...FROM SELECT`` statement.
e.g.::
sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
ins = table2.insert().from_select(['a', 'b'], sel)
:param names: a sequence of string column names or :class:`.Column`
objects representing the target columns.
:param select: a :func:`.select` construct, :class:`.FromClause`
or other construct which resolves into a :class:`.FromClause`,
such as an ORM :class:`.Query` object, etc. The order of
columns returned from this FROM clause should correspond to the
order of columns sent as the ``names`` parameter; while this
is not checked before passing along to the database, the database
would normally raise an exception if these column lists don't
correspond.
:param include_defaults: if True, non-server default values and
SQL expressions as specified on :class:`.Column` objects
(as documented in :ref:`metadata_defaults_toplevel`) not
otherwise specified in the list of names will be rendered
into the INSERT and SELECT statements, so that these values are also
included in the data to be inserted.
.. note:: A Python-side default that uses a Python callable function
will only be invoked **once** for the whole statement, and **not
per row**.
.. versionadded:: 1.0.0 - :meth:`.Insert.from_select` now renders
Python-side and SQL expression column defaults into the
SELECT statement for columns otherwise not included in the
list of column names.
.. versionchanged:: 1.0.0 an INSERT that uses FROM SELECT
implies that the :paramref:`.insert.inline` flag is set to
True, indicating that the statement will not attempt to fetch
the "last inserted primary key" or other defaults. The statement
deals with an arbitrary number of rows, so the
:attr:`.ResultProxy.inserted_primary_key` accessor does not apply.
.. versionadded:: 0.8.3
"""
if self.parameters:
raise exc.InvalidRequestError(
"This construct already inserts value expressions")
self.parameters, self._has_multi_parameters = \
self._process_colparams(
dict((_column_as_key(n), Null()) for n in names))
self.select_names = names
self.inline = True
self.include_insert_from_select_defaults = include_defaults
self.select = _interpret_as_select(select)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self.parameters = self.parameters.copy()
if self.select is not None:
self.select = _clone(self.select)
class Update(ValuesBase):
"""Represent an Update construct.
The :class:`.Update` object is created using the :func:`update()`
function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause=None,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
preserve_parameter_order=False,
**dialect_kw):
r"""Construct an :class:`.Update` object.
E.g.::
from sqlalchemy import update
stmt = update(users).where(users.c.id==5).\
values(name='user #5')
Similar functionality is available via the
:meth:`~.TableClause.update` method on
:class:`.Table`::
stmt = users.update().\
where(users.c.id==5).\
values(name='user #5')
:param table: A :class:`.Table` object representing the database
table to be updated.
:param whereclause: Optional SQL expression describing the ``WHERE``
condition of the ``UPDATE`` statement. Modern applications
may prefer to use the generative :meth:`~Update.where()`
method to specify the ``WHERE`` clause.
The WHERE clause can refer to multiple tables.
For databases which support this, an ``UPDATE FROM`` clause will
be generated, or on MySQL, a multi-table update. The statement
will fail on databases that don't have support for multi-table
update statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.update().values(name='ed').where(
users.c.name==select([addresses.c.email_address]).\
where(addresses.c.user_id==users.c.id).\
as_scalar()
)
.. versionchanged:: 0.7.4
The WHERE clause can refer to multiple tables.
:param values:
Optional dictionary which specifies the ``SET`` conditions of the
``UPDATE``. If left as ``None``, the ``SET``
conditions are determined from those parameters passed to the
statement during the execution and/or compilation of the
statement. When compiled standalone without any parameters,
the ``SET`` clause generates for all columns.
Modern applications may prefer to use the generative
:meth:`.Update.values` method to set the values of the
UPDATE statement.
:param inline:
if True, SQL defaults present on :class:`.Column` objects via
the ``default`` keyword will be compiled 'inline' into the statement
and not pre-executed. This means that their values will not
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
:param preserve_parameter_order: if True, the update statement is
expected to receive parameters **only** via the :meth:`.Update.values`
method, and they must be passed as a Python ``list`` of 2-tuples.
The rendered UPDATE statement will emit the SET clause for each
referenced column maintaining this order.
.. versionadded:: 1.0.10
.. seealso::
:ref:`updates_order_parameters` - full example of the
:paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
The keys within ``values`` can be either :class:`.Column`
objects or their string identifiers (specifically the "key" of the
:class:`.Column`, normally but not necessarily equivalent to
its "name"). Normally, the
:class:`.Column` objects used here are expected to be
part of the target :class:`.Table` that is the table
to be updated. However when using MySQL, a multiple-table
UPDATE statement can refer to columns from any of
the tables referred to in the WHERE clause.
The values referred to in ``values`` are typically:
* a literal data value (i.e. string, number, etc.)
* a SQL expression, such as a related :class:`.Column`,
a scalar-returning :func:`.select` construct,
etc.
When combining :func:`.select` constructs within the values
clause of an :func:`.update` construct,
the subquery represented by the :func:`.select` should be
*correlated* to the parent table, that is, providing criterion
which links the table inside the subquery to the outer table
being updated::
users.update().values(
name=select([addresses.c.email_address]).\
where(addresses.c.user_id==users.c.id).\
as_scalar()
)
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
"""
self._preserve_parameter_order = preserve_parameter_order
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
# TODO: this could be made memoized
# if the memoization is reset on each generative call.
froms = []
seen = set([self.table])
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
class Delete(UpdateBase):
"""Represent a DELETE construct.
The :class:`.Delete` object is created using the :func:`delete()`
function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause=None,
bind=None,
returning=None,
prefixes=None,
**dialect_kw):
"""Construct :class:`.Delete` object.
Similar functionality is available via the
:meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
:param table: The table to delete rows from.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
condition of the ``DELETE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
.. seealso::
:ref:`deletes` - SQL Expression Tutorial
"""
self._bind = bind
self.table = _interpret_as_from(table)
self._returning = returning
if prefixes:
self._setup_prefixes(prefixes)
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self._validate_dialect_kwargs(dialect_kw)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
| mit | 4,854,731,903,291,692,000 | 38.293772 | 87 | 0.60555 | false |
rationalAgent/edx-platform-custom | lms/djangoapps/courseware/tests/test_videoalpha_mongo.py | 2 | 3512 | # -*- coding: utf-8 -*-
"""Video xmodule tests in mongo."""
from . import BaseTestXmodule
from .test_videoalpha_xml import SOURCE_XML
from django.conf import settings
class TestVideo(BaseTestXmodule):
"""Integration tests: web client + mongo."""
CATEGORY = "videoalpha"
DATA = SOURCE_XML
MODEL_DATA = {
'data': DATA
}
def test_handle_ajax_dispatch(self):
responses = {
user.username: self.clients[user.username].post(
self.get_url('whatever'),
{},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
for user in self.users
}
self.assertEqual(
set([
response.status_code
for _, response in responses.items()
]).pop(),
404)
def test_videoalpha_constructor(self):
"""Make sure that all parameters extracted correclty from xml"""
# `get_html` return only context, cause we
# overwrite `system.render_template`
context = self.item_module.get_html()
expected_context = {
'data_dir': getattr(self, 'data_dir', None),
'caption_asset_path': '/c4x/MITx/999/asset/subs_',
'show_captions': self.item_module.show_captions,
'display_name': self.item_module.display_name_with_default,
'end': self.item_module.end_time,
'id': self.item_module.location.html_id(),
'sources': self.item_module.sources,
'start': self.item_module.start_time,
'sub': self.item_module.sub,
'track': self.item_module.track,
'youtube_streams': self.item_module.youtube_streams,
'autoplay': settings.MITX_FEATURES.get('AUTOPLAY_VIDEOS', True)
}
self.assertDictEqual(context, expected_context)
class TestVideoNonYouTube(TestVideo):
"""Integration tests: web client + mongo."""
DATA = """
<videoalpha show_captions="true"
data_dir=""
caption_asset_path=""
autoplay="true"
start_time="01:00:03" end_time="01:00:10"
>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.mp4"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.webm"/>
<source src=".../mit-3091x/M-3091X-FA12-L21-3_100.ogv"/>
</videoalpha>
"""
MODEL_DATA = {
'data': DATA
}
def test_videoalpha_constructor(self):
"""Make sure that if the 'youtube' attribute is omitted in XML, then
the template generates an empty string for the YouTube streams.
"""
# `get_html` return only context, cause we
# overwrite `system.render_template`
context = self.item_module.get_html()
expected_context = {
'data_dir': getattr(self, 'data_dir', None),
'caption_asset_path': '/c4x/MITx/999/asset/subs_',
'show_captions': self.item_module.show_captions,
'display_name': self.item_module.display_name_with_default,
'end': self.item_module.end_time,
'id': self.item_module.location.html_id(),
'sources': self.item_module.sources,
'start': self.item_module.start_time,
'sub': self.item_module.sub,
'track': self.item_module.track,
'youtube_streams': '',
'autoplay': settings.MITX_FEATURES.get('AUTOPLAY_VIDEOS', True)
}
self.assertDictEqual(context, expected_context)
| agpl-3.0 | -8,064,464,991,645,807,000 | 34.836735 | 76 | 0.5709 | false |
qma/pants | src/python/pants/build_graph/build_file_address_mapper.py | 4 | 10487 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.base.build_environment import get_buildroot
from pants.base.build_file import BuildFile
from pants.build_graph.address import Address, parse_spec
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_file_parser import BuildFileParser
from pants.util.dirutil import fast_relpath
# Note: Significant effort has been made to keep the types BuildFile, BuildGraph, Address, and
# Target separated appropriately. The BuildFileAddressMapper is intended to have knowledge
# of just BuildFile, BuildFileParser and Address.
#
# Here are some guidelines to help maintain this abstraction:
# - Use the terminology 'address' instead of 'target' in symbols and user messages
# - Wrap exceptions from BuildFile and BuildFileParser with a subclass of AddressLookupError
# so that callers do not have to reference those modules
#
# Note: 'spec' should not be a user visible term, substitute 'address' instead.
class BuildFileAddressMapper(object):
"""Maps addresses in the pants virtual address space to corresponding BUILD file declarations.
"""
class AddressNotInBuildFile(AddressLookupError):
"""Indicates an address cannot be found in an existing BUILD file."""
class EmptyBuildFileError(AddressLookupError):
"""Indicates no addresses are defined in a BUILD file."""
class InvalidBuildFileReference(AddressLookupError):
"""Indicates no BUILD file exists at the address referenced."""
class InvalidAddressError(AddressLookupError):
"""Indicates an address cannot be parsed."""
class BuildFileScanError(AddressLookupError):
"""Indicates a problem was encountered scanning a tree of BUILD files."""
class InvalidRootError(BuildFileScanError):
"""Indicates an invalid scan root was supplied."""
def __init__(self, build_file_parser, build_file_type):
"""Create a BuildFileAddressMapper.
:param build_file_parser: An instance of BuildFileParser
:param build_file_type: A subclass of BuildFile used to construct and cache BuildFile objects
"""
self._build_file_parser = build_file_parser
self._spec_path_to_address_map_map = {} # {spec_path: {address: addressable}} mapping
self._build_file_type = build_file_type
@property
def root_dir(self):
return self._build_file_parser.root_dir
def _raise_incorrect_address_error(self, build_file, wrong_target_name, targets):
"""Search through the list of targets and return those which originate from the same folder
which wrong_target_name resides in.
:raises: A helpful error message listing possible correct target addresses.
"""
def path_parts(build): # Gets a tuple of directory, filename.
build = str(build)
slash = build.rfind('/')
if slash < 0:
return '', build
return build[:slash], build[slash + 1:]
def are_siblings(a, b): # Are the targets in the same directory?
return path_parts(a)[0] == path_parts(b)[0]
valid_specs = []
all_same = True
# Iterate through all addresses, saving those which are similar to the wrong address.
for target in targets:
if are_siblings(target.build_file, build_file):
possibility = (path_parts(target.build_file)[1], target.spec[target.spec.rfind(':'):])
# Keep track of whether there are multiple BUILD files or just one.
if all_same and valid_specs and possibility[0] != valid_specs[0][0]:
all_same = False
valid_specs.append(possibility)
# Trim out BUILD extensions if there's only one anyway; no need to be redundant.
if all_same:
valid_specs = [('', tail) for head, tail in valid_specs]
# Might be neat to sort by edit distance or something, but for now alphabetical is fine.
valid_specs = [''.join(pair) for pair in sorted(valid_specs)]
# Give different error messages depending on whether BUILD file was empty.
if valid_specs:
one_of = ' one of' if len(valid_specs) > 1 else '' # Handle plurality, just for UX.
raise self.AddressNotInBuildFile(
'{target_name} was not found in BUILD file {build_file}. Perhaps you '
'meant{one_of}: \n {specs}'.format(target_name=wrong_target_name,
build_file=build_file,
one_of=one_of,
specs='\n '.join(valid_specs)))
# There were no targets in the BUILD file.
raise self.EmptyBuildFileError(
':{target_name} was not found in BUILD file {build_file}, because that '
'BUILD file contains no addressable entities.'.format(target_name=wrong_target_name,
build_file=build_file))
def resolve(self, address):
"""Maps an address in the virtual address space to an object.
:param Address address: the address to lookup in a BUILD file
:raises AddressLookupError: if the path to the address is not found.
:returns: A tuple of the natively mapped BuildFileAddress and the Addressable it points to.
"""
address_map = self._address_map_from_spec_path(address.spec_path)
if address not in address_map:
build_file = self._build_file_type.from_cache(self.root_dir, address.spec_path,
must_exist=False)
self._raise_incorrect_address_error(build_file, address.target_name, address_map)
else:
return address_map[address]
def resolve_spec(self, spec):
"""Converts a spec to an address and maps it using `resolve`"""
try:
address = Address.parse(spec)
except ValueError as e:
raise self.InvalidAddressError(e)
_, addressable = self.resolve(address)
return addressable
def _address_map_from_spec_path(self, spec_path):
"""Returns a resolution map of all addresses in a "directory" in the virtual address space.
:returns {Address: (Address, <resolved Object>)}:
"""
if spec_path not in self._spec_path_to_address_map_map:
try:
try:
build_file = self._build_file_type.from_cache(self.root_dir, spec_path)
except BuildFile.BuildFileError as e:
raise self.BuildFileScanError("{message}\n searching {spec_path}"
.format(message=e,
spec_path=spec_path))
mapping = self._build_file_parser.address_map_from_build_file(build_file)
except BuildFileParser.BuildFileParserError as e:
raise AddressLookupError("{message}\n Loading addresses from '{spec_path}' failed."
.format(message=e, spec_path=spec_path))
address_map = {address: (address, addressed) for address, addressed in mapping.items()}
self._spec_path_to_address_map_map[spec_path] = address_map
return self._spec_path_to_address_map_map[spec_path]
def addresses_in_spec_path(self, spec_path):
"""Returns only the addresses gathered by `address_map_from_spec_path`, with no values."""
return self._address_map_from_spec_path(spec_path).keys()
def from_cache(self, *args, **kwargs):
"""Return a BuildFile instance. Args as per BuildFile.from_cache
:returns: a BuildFile
"""
return self._build_file_type.from_cache(*args, **kwargs)
def spec_to_address(self, spec, relative_to=''):
"""A helper method for mapping a spec to the correct address.
:param string spec: A spec to lookup in the map.
:param string relative_to: Path the spec might be relative to
:raises :class:`pants.build_graph.address_lookup_error.AddressLookupError`
If the BUILD file cannot be found in the path specified by the spec.
:returns: A new Address instance.
:rtype: :class:`pants.build_graph.address.Address`
"""
spec_path, name = parse_spec(spec, relative_to=relative_to)
try:
self.from_cache(self.root_dir, spec_path)
except BuildFile.BuildFileError as e:
raise self.InvalidBuildFileReference('{message}\n when translating spec {spec}'
.format(message=e, spec=spec))
return Address(spec_path, name)
def scan_buildfiles(self, root_dir, *args, **kwargs):
"""Looks for all BUILD files in root_dir or its descendant directories.
:returns: an OrderedSet of BuildFile instances.
"""
return self._build_file_type.scan_buildfiles(root_dir, *args, **kwargs)
def specs_to_addresses(self, specs, relative_to=''):
"""The equivalent of `spec_to_address` for a group of specs all relative to the same path.
:param spec: iterable of Addresses.
:raises AddressLookupError: if the BUILD file cannot be found in the path specified by the spec
"""
for spec in specs:
yield self.spec_to_address(spec, relative_to=relative_to)
def scan_addresses(self, root=None, spec_excludes=None):
"""Recursively gathers all addresses visible under `root` of the virtual address space.
:param string root: The absolute path of the root to scan; defaults to the root directory of the
pants project.
:rtype: set of :class:`pants.build_graph.address.Address`
:raises AddressLookupError: if there is a problem parsing a BUILD file
"""
root_dir = get_buildroot()
base_path = None
if root:
try:
base_path = fast_relpath(root, root_dir)
except ValueError as e:
raise self.InvalidRootError(e)
addresses = set()
try:
for build_file in self._build_file_type.scan_buildfiles(root_dir=root_dir,
base_path=base_path,
spec_excludes=spec_excludes):
for address in self.addresses_in_spec_path(build_file.spec_path):
addresses.add(address)
except BuildFile.BuildFileError as e:
# Handle exception from BuildFile out of paranoia. Currently, there is no way to trigger it.
raise self.BuildFileScanError("{message}\n while scanning BUILD files in '{root}'."
.format(message=e, root=root))
return addresses
| apache-2.0 | 4,005,731,635,918,745,000 | 44.995614 | 100 | 0.66654 | false |
ryanbhayward/games-puzzles-algorithms | old/lib/test/players/mcts/mcts_test.py | 1 | 8759 | from games_puzzles_algorithms.players.mcts.mcts_agent import MctsAgent
from games_puzzles_algorithms.players.mcts.mcts_agent import UctNode
from games_puzzles_algorithms.players.mcts.mcts_agent import BanditNode
from games_puzzles_algorithms.games.fake_game_state import FakeGameState
from games_puzzles_algorithms.debug import log
import random
def test_roll_out():
random.seed(0)
state = FakeGameState()
patient = MctsAgent(random, UctNode(1))
outcome = patient.roll_out(state, 0)
assert outcome['score'] == 2
outcome = patient.roll_out(state, 0)
assert outcome['score'] == -3
outcome = patient.roll_out(state, 0)
assert outcome['score'] == -3
outcome = patient.roll_out(state, 1)
assert outcome['score'] == -2
outcome = patient.roll_out(state, 1)
assert outcome['score'] == 3
outcome = patient.roll_out(state, 1)
assert outcome['score'] == 3
outcome = patient.roll_out(state, 1)
assert outcome['score'] == -2
def test_search_explore3():
random.seed(0)
state = FakeGameState()
patient = MctsAgent(random, UctNode(3))
num_iterations = 10
stats = patient.search(state, num_iterations=num_iterations)
assert stats['num_iterations_completed'] == 10
assert stats['time_used_s'] is not None
assert stats['num_nodes_expanded'] == 11
def test_search_explore2():
random.seed(0)
state = FakeGameState()
patient = MctsAgent(random, UctNode(2))
num_iterations = 10
stats = patient.search(state, num_iterations=num_iterations)
assert stats['num_iterations_completed'] == 10
assert stats['time_used_s'] is not None
assert stats['num_nodes_expanded'] == 9
def test_search_explore1():
random.seed(0)
state = FakeGameState()
patient = MctsAgent(random, UctNode(1))
num_iterations = 10
stats = patient.search(state, num_iterations=num_iterations)
assert stats['num_iterations_completed'] == 10
assert stats['time_used_s'] is not None
assert stats['num_nodes_expanded'] == 9
def test_child_nodes():
root = BanditNode()
assert root.child_nodes() == []
def test_expand():
root = BanditNode()
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
assert len(children) == 2
assert children[0].action == 0
assert children[1].action == 1
assert children[0].parent == root
assert children[1].parent == root
def test_is_leaf():
root = BanditNode()
assert root.is_leaf()
state = FakeGameState()
root.expand(state)
assert not root.is_leaf()
def test_is_root():
root = BanditNode()
state = FakeGameState()
root.expand(state)
assert root.is_root()
for child in root.child_nodes():
assert not child.is_root()
def test_child_nodes():
root = BanditNode()
state = FakeGameState()
root.expand(state)
assert len(root.child_nodes()) == 2
for child in root.child_nodes():
assert len(child.child_nodes()) == 0
def test_num_nodes():
root = BanditNode()
state = FakeGameState()
root.expand(state)
assert root.num_nodes() == 3
for child in root.child_nodes():
assert child.num_nodes() == 1
def test_ucb_initial_explore():
root = BanditNode()
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
for child in children:
assert BanditNode.ucb_value(child, 1) == float('inf')
def test_backup():
root = BanditNode()
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
state.play(children[0].action)
children[0].expand(state)
children[0].child_nodes()[0].backup(-1)
assert children[0].child_nodes()[0].avg_reward() == -1
assert children[0].child_nodes()[1].avg_reward() == 0
assert children[0].avg_reward() == 1
assert len(children[1].child_nodes()) == 0
assert children[1].avg_reward() == 0
children[0].child_nodes()[1].backup(1)
assert children[0].child_nodes()[0].avg_reward() == -1
assert children[0].child_nodes()[1].avg_reward() == 1
assert children[0].avg_reward() == 0
assert len(children[1].child_nodes()) == 0
assert children[1].avg_reward() == 0
def test_backup_with_lcb():
root = BanditNode()
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
state.play(children[0].action)
children[0].expand(state)
children[0].child_nodes()[0].backup(-1)
children[0].child_nodes()[1].backup(1)
assert BanditNode.lcb_value(children[0], 1) == -0.8325546111576977
assert BanditNode.lcb_value(children[0].child_nodes()[0], 1) == (
-2.177410022515475)
assert BanditNode.lcb_value(children[0].child_nodes()[1], 1) == (
-0.17741002251547466)
def test_backup_with_ucb():
root = BanditNode()
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
state.play(children[0].action)
children[0].expand(state)
children[0].child_nodes()[0].backup(-1)
children[0].child_nodes()[1].backup(1)
assert BanditNode.ucb_value(children[0], 1) == 0.8325546111576977
assert BanditNode.ucb_value(children[0].child_nodes()[0], 1) == (
0.17741002251547466)
assert BanditNode.ucb_value(children[0].child_nodes()[1], 1) == (
2.177410022515475)
def test_backup_with_value():
root = BanditNode()
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
state.play(children[0].action)
children[0].expand(state)
children[0].child_nodes()[0].backup(1)
assert children[0].value() == -1
assert children[1].value() == 0
assert children[0].child_nodes()[0].value() == 1
assert children[0].child_nodes()[1].value() == 0
children[0].child_nodes()[1].backup(-1)
assert children[0].value() == 0
assert children[0].child_nodes()[0].value() == 1
assert children[0].child_nodes()[1].value() == -1
def test_backup_with_ucb_explore():
root = UctNode(1)
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
state.play(children[0].action)
children[0].expand(state)
children[0].child_nodes()[0].backup(1)
assert children[0].value() == -1
assert children[1].value() == float("inf")
assert children[0].child_nodes()[0].value() == 1
assert children[0].child_nodes()[1].value() == float("inf")
children[0].child_nodes()[1].backup(-1)
assert children[0].value() > 0
assert children[0].child_nodes()[0].value() > 1
assert children[0].child_nodes()[1].value() > -1
def test_favorite_child():
root = UctNode(1)
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
children[0].backup(1)
children[1].backup(-1)
value_of_favorite = root.favorite_children()[0].value()
for child in children:
assert child.value() <= value_of_favorite
def test_info_strings_to_json():
root = BanditNode()
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
children[0].backup(1)
children[1].backup(-1)
info = root.info_strings_to_dict()
assert info["info"] == "avg_reward: 0.0 num_visits: 2"
assert info["children"][0][
"info"] == "player: 0 action: 0 | avg_reward: 1.0 num_visits: 1"
assert info["children"][1][
"info"] == "player: 0 action: 1 | avg_reward: -1.0 num_visits: 1"
def test_info_strings_to_json_ucb():
root = UctNode(1)
state = FakeGameState()
root.expand(state)
children = root.child_nodes()
children[0].backup(1)
children[1].backup(-1)
info = root.info_strings_to_dict()
assert info[
"info"] == "avg_reward: 0.0 num_visits: 2 ucb_value: 0.8325546111576977"
assert info["children"][0][
"info"] == "player: 0 action: 0 | avg_reward: 1.0 num_visits: 1 ucb_value: 2.177410022515475"
assert info["children"][1][
"info"] == "player: 0 action: 1 | avg_reward: -1.0 num_visits: 1 ucb_value: 0.17741002251547466"
def test_str():
root = BanditNode()
state = FakeGameState()
assert str(root) == '{\n "info": "avg_reward: 0 num_visits: 0"\n}'
def test_verbose_search():
import json
import logging
logging.basicConfig(level=logging.DEBUG)
random.seed(0)
state = FakeGameState()
patient = MctsAgent(random, UctNode(1))
num_iterations = 2
stats = patient.search(state, num_iterations=num_iterations)
assert stats['num_iterations_completed'] == num_iterations
assert stats['time_used_s'] is not None
assert stats['num_nodes_expanded'] == 3
print(json.dumps({'statistics': stats, 'tree': patient.to_dict()},
sort_keys=True,
indent=4))
| mit | 6,192,658,067,794,371,000 | 28.591216 | 104 | 0.638315 | false |
ClearCorp-dev/odoo-clearcorp | TODO-9.0/account_distribution_line/__init__.py | 2 | 1088 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_distribution_line
import account_reconcile | agpl-3.0 | 6,204,988,583,742,978,000 | 44.375 | 78 | 0.618566 | false |
Iwan-Zotow/runEGS | XcData/process_shot.py | 1 | 14376 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import conversion
import subprocess
import symdata
import hashlib
import struct
import numpy as np
import names_helper
import logging
EXT = "xz"
def check_archive_integrity(shot_name):
"""
Given the shot archive, check compression integrity
Parameters
----------
shot_name: string
name of the archive with compressed shot data
returns: int
0 is Ok, non-zero means error
"""
cmd = "xz -t {0}".format(shot_name)
rc = subprocess.call(cmd, shell=True)
return rc
def check_tar_integrity(shot_name):
"""
Given the shot archive, check TAR integrity
Parameters
----------
shot_name: string
name of the archive with compressed shot data
returns: integer
0 is Ok, non-zero means error
"""
cmd = "tar tJf {0}".format(shot_name)
rc = subprocess.call(cmd, shell=True)
return rc
def unpack_archive(shot_name):
"""
Given the shot archive, unpack it
Parameters
----------
shot_name: string
name of the archive with compressed shot data
returns: integer
0 is Ok, non-zero means error
"""
cmd = "tar xJvf {0}".format(shot_name)
rc = subprocess.call(cmd, shell=True)
return rc
def read_sha1(full_prefix):
"""
Read sha1 file, return it as dictionary
Parameters
----------
full_prefix: string
shot full prefix (e.g R8O3IL08C25_Y10Z15)
returns: dictionary or None
Dictionary is Ok, None if error
"""
fname = os.path.join(full_prefix, "sha1")
if not os.access(fname, os.R_OK):
return None
shas = {}
with open(fname, "r") as f:
lines = f.readlines()
for line in lines:
s = line.split()
idx = s[0].find(":")
s[0] = s[0][:idx]
head, name = os.path.split(s[0])
shas[name] = s[1]
return shas
def check_signatures(full_prefix):
"""
Given unpacked direcory with full_prefix, read signatures and check against sha1
Parameters
----------
full_prefix: string
shot full prefix (e.g R8O3IL08C25_Y10Z15)
returns: Tuple of (bool, string, string)
True if ok, False and file name and SHA1 if signature mismatch
"""
shas = read_sha1(full_prefix)
if shas == None:
raise Exception("check_signatures", "SHA1 file is problematic")
algo = "sha1"
if not (algo in hashlib.algorithms):
raise Exception("check_signatures", "No SHA1 hash available")
for k, v in shas.items():
filename, extension = os.path.splitext(k)
if extension == ".log":
continue
if extension == ".egsphsp1":
continue
fname = os.path.join(full_prefix, k)
hasher = hashlib.sha1()
with open(fname, "rb") as afile:
buf = afile.read()
hasher.update(buf)
s = hasher.hexdigest()
if s != v:
return (False, k, v)
return (True, None, None)
def get_dimensions(line):
"""
Parse and extract X, Y and Z dimensions from string
Parameters
----------
full_prefix: string
shot full prefix (e.g R8O3IL08C25_Y10Z15)
returns: Tuple of (bool, string, string)
True if ok, False and file name and SHA1 if signature mismatch
:param line: line contains x, y, z dimensions
"""
split = line.split(" ")
split = [x for x in split if x] # remove empty lines
nx = int(split[0])
ny = int(split[1])
nz = int(split[2])
return (nx, ny, nz)
def get_boundaries(n, line):
"""
Parse and extract X, Y and Z boundaries from string
Parameters
----------
n: integer
number of bins (boundaries are one more)
line: string
line contains boundaries data
returns: array of floats
array of parsed boundaries, in mm
"""
split = line.split(" ")
split = [x for x in split if x] # remove empty lines
boundaries = []
for i in range(0,n+1):
d = conversion.cm2mm( float(split[i]) )
boundaries.append(d)
if boundaries.count == 0:
return None
return boundaries
def get_full_prefix_name(shot_name):
"""
Given shot name, get back full name
Parameters
----------
shot_name: string
full shot file name
returns: string
extracted full prefix (e.g R8O3IL08C25_Y10Z15)
"""
head,tail = os.path.split(shot_name)
idx = tail.find(".")
if idx == -1:
raise ValueError("File shot not found: {0}".shot_name)
return tail[:idx]
def get_3ddata(nx, ny, nz, line, data):
"""
Read a line and convert it to 3D dose representation
Parameters
----------
nx: integer
nof X points
ny: integer
nof Y points
nz: integer
nof Z points
line: string
which contains all 3D dose data points
data: numpy 3D grid of floats
3D dose data as NumPy object
"""
split = line.split(" ")
split = [x for x in split if x] # remove empty lines
k = 0
for iz in range(0, nz):
for iy in range(0, ny):
for ix in range(0, nx):
data[ix,iy,iz] = float(split[k])
k += 1
def read_data(full_prefix):
"""
Read shot data into data array from full prefixed dir
Parameters
----------
full_prefix: string
directory with full prefix name, contains unpacked shot data (e.g R8O3IL08C25_Y10Z15)
returns: symdata object
all .3ddose data read from shot on success, None on failure
"""
fname = os.path.join(full_prefix, full_prefix + ".3ddose")
phd = None
with open(fname, 'r') as f:
#read in the dimensions
line = f.readline()
(nx, ny, nz) = get_dimensions(line)
line = f.readline()
bx = get_boundaries(nx, line)
line = f.readline()
by = get_boundaries(ny, line)
line = f.readline()
bz = get_boundaries(nz, line)
phd = symdata.symdata(bx, by, bz)
data = phd.data()
line = f.readline()
get_3ddata(nx, ny, nz, line, data)
return phd
def writeX_d3d(fname, tddata, zshift):
"""
Write X averaged dose data, assuming data is X averaged
Parameters
----------
fname: string
file name to write
tddata: 3d data object
holds dose and boundaries to write
zshift: float
Z shift, mm
returns: Tuple of floats
dose bounding box (minX, maxX, minY, maxY, minZ, maxZ), in mm, or None in the case of failure
"""
if not tddata.sym_x():
raise Exception("Data are NOT x averaged, bailing out...\n")
folder_name = os.path.dirname(fname)
if folder_name != '':
if not os.path.exists(folder_name):
os.makedirs(folder_name)
nx = tddata.nx()
ny = tddata.ny()
nz = tddata.nz()
bx = tddata.bx()
by = tddata.by()
bz = tddata.bz()
data = tddata.data()
with open(fname, "wb") as f:
# write symmetry flags
f.write(struct.pack("i", 1)) # X sym
f.write(struct.pack("i", 0)) # Y not sym
f.write(struct.pack("i", 0)) # Z not sym
# write dimensions
nx_half = nx//2
f.write(struct.pack("i", nx_half)) # due to symmetry
f.write(struct.pack("i", ny))
f.write(struct.pack("i", nz))
# write X boundaries, symmetric
for ix in range(nx_half, nx+1):
xmm = np.float32( bx[ix] )
f.write(struct.pack("f", xmm))
# write Y boundaries, full
for iy in range(0, ny+1):
ymm = np.float32( by[iy] )
f.write(struct.pack("f", ymm))
# write Z boundaries, full
for iz in range(0, nz+1):
zmm = np.float32( bz[iz] ) - zshift
f.write(struct.pack("f", zmm))
# supposed to be reversed order
for ix in range(nx_half, nx):
for iy in range(0, ny):
for iz in range(0, nz):
d = np.float32(data[ix,iy,iz])
f.write(struct.pack("f", d))
return ( bx[0], bx[-1], by[0], by[-1], bz[0] - zshift, bz[-1] - zshift)
def writeXY_d3d(fname, tddata, zshift):
"""
Write X&Y averaged dose data, assuming data is X&Y averaged
Parameters
----------
fname: string
file name to write
tddata: 3d data object
holds dose and boundaries to write
zshift: float
Z shift, mm
returns: Tuple of floats
dose bounding box (minX, maxX, minY, maxY, minZ, maxZ), in mm, or None in the case of failure
"""
if not tddata.sym_x():
raise Exception("Data are NOT X averaged, bailing out...\n")
if not tddata.sym_y():
raise Exception("Data are NOT Y averaged, bailing out...\n")
folder_name = os.path.dirname(fname)
if folder_name != '':
if not os.path.exists(folder_name):
os.makedirs(folder_name)
nx = tddata.nx()
ny = tddata.ny()
nz = tddata.nz()
bx = tddata.bx()
by = tddata.by()
bz = tddata.bz()
data = tddata.data()
with open(fname, "wb") as f:
# write symmetry flags
f.write(struct.pack("i", 1)) # X sym
f.write(struct.pack("i", 1)) # Y sym
f.write(struct.pack("i", 0)) # Z not sym
# write dimensions
nx_half = nx//2
f.write(struct.pack("i", nx_half)) # due to symmetry
ny_half = ny//2
f.write(struct.pack("i", ny_half))
f.write(struct.pack("i", nz))
# write X boundaries, symmetric
for ix in range(nx_half, nx+1):
xmm = np.float32( bx[ix] )
f.write(struct.pack("f", xmm))
# write Y boundaries, symmetric
for iy in range(ny_half, ny+1):
ymm = np.float32( by[iy] )
f.write(struct.pack("f", ymm))
# write Z boundaries, full
for iz in range(0, nz+1):
zmm = np.float32( bz[iz] ) - zshift
f.write(struct.pack("f", zmm))
# supposed to be reversed order
for ix in range(nx_half, nx):
for iy in range(ny_half, ny):
for iz in range(0, nz):
d = np.float32(data[ix,iy,iz])
f.write(struct.pack("f", d))
return ( bx[0], bx[-1], by[0], by[-1], bz[0] - zshift, bz[-1] - zshift)
def full_prefix_2_d3d_name(full_prefix):
"""
Given full prefix for a shot, make .d3difo compatible file name
full_prefix: string
directory with full prefix name, contains unpacked shot data (e.g R8O3IL08C25_Y10Z15)
returns: string
.d3difo compatible file name (e.g. R8O2IM01_Y000Z000C015)
"""
radUnit, outerCup, innerCupSer, innerCupNum, coll = names_helper.parse_file_prefix( full_prefix )
(shY, shZ) = names_helper.parse_shot(full_prefix)
file_prefix = names_helper.make_cup_prefix(radUnit, outerCup, innerCupSer, innerCupNum)
return file_prefix + "_Y{0:03d}Z{1:03d}C{2:03d}".format(int(shY), int(shZ), int(coll))
def process_shot(shot_name, out_dir, zshift, sym_Y = False):
"""
Process single shot given shot full filename
shot_name: string
full name of the compressed shot data file
out_dir: string
name of the output directory
zshift: float
Z shift, in mm
returns: tuple of data for .d3difo file
collimator, shot position (Y,Z) in mm, dose box bounds (minX, maxX, minY, maxY, minZ, maxZ) in mm, .d3d file name
"""
# first, check archive existance
if not os.access(shot_name, os.R_OK):
raise ValueError("File shot not found: {0}".format(shot_name))
# test with decompression
rc = check_archive_integrity(shot_name)
if rc != 0:
raise ValueError("Archive is bad: {0}".format(shot_name))
rc = check_tar_integrity(shot_name)
if rc != 0:
raise ValueError("TAR is bad: {0}".format(shot_name))
rc = unpack_archive(shot_name)
if rc != 0:
raise ValueError("Upacking failed: {0}".format(shot_name))
full_prefix = get_full_prefix_name(shot_name)
(rc, name, sha1) = check_signatures(full_prefix)
if not rc:
raise ValueError("SHA1 failed: {0}: {1}".format(name, sha1))
tddose = read_data(full_prefix)
can_sym_X = tddose.could_sym_x()
if not can_sym_X:
raise Exception("Cannot X AVERAGE, bad X boundaries\n")
tddose.do_sym_x()
if not tddose.sym_x():
raise Exception("Something went wrong on X symmetrization\n")
can_sym_Y = False
if sym_Y: # someone wants averaged Y
can_sym_Y = tddose.could_sym_y() # check if we can...
if can_sym_Y:
# yes, we can
tddose.do_sym_y()
if not tddose.sym_y():
raise Exception("Something went wrong on Y symmetrization\n")
# writing thing out, getting back boundaries
aname = full_prefix_2_d3d_name(full_prefix)+".d3d"
bounds = None
if can_sym_Y:
bounds = writeXY_d3d(os.path.join(out_dir, aname), tddose, zshift)
else:
bounds = writeX_d3d(os.path.join(out_dir, aname), tddose, zshift)
if bounds == None:
raise Exception("No dose box bounds returned\n")
shot = names_helper.parse_shot(full_prefix)
radUnit, outerCup, innerCupSer, innerCupNum, coll = names_helper.parse_file_prefix( full_prefix )
return (coll, shot, bounds, aname)
if __name__ == "__main__":
process_shot("/home/beamuser/Documents/EGS/R8O3IL09C25_Y0Z0.tar.xz", ".", 140.0)
| apache-2.0 | 4,597,714,065,421,494,300 | 25.281536 | 121 | 0.544866 | false |
sdispater/cleo | cleo/ui/table.py | 1 | 22947 | import itertools
import math
import re
from typing import Dict
from typing import Generator
from typing import List
from typing import Optional
from typing import Union
from cleo.formatters.formatter import Formatter
from cleo.io.io import IO
from cleo.io.outputs.output import Output
from .table_cell import TableCell
from .table_cell_style import TableCellStyle
from .table_separator import TableSeparator
from .table_style import TableStyle
_Row = List[Union[str, TableCell]]
_Rows = List[Union[_Row, TableSeparator]]
class Table:
SEPARATOR_TOP: int = 0
SEPARATOR_TOP_BOTTOM: int = 1
SEPARATOR_MID: int = 2
SEPARATOR_BOTTOM: int = 3
BORDER_OUTSIDE: int = 0
BORDER_INSIDE: int = 1
_styles: Optional[Dict[str, TableStyle]] = None
def __init__(self, io: Union[IO, Output], style: str = None) -> None:
self._io = io
if style is None:
style = "default"
self._header_title = None
self._footer_title = None
self._headers = []
self._rows = []
self._horizontal = False
self._effective_column_widths: Dict[int, int] = {}
self._number_of_columns = None
self._column_styles: Dict[int, TableStyle] = {}
self._column_widths: Dict[int, int] = {}
self._column_max_widths: Dict[int, int] = {}
self._rendered = False
self._style: Optional[TableStyle] = None
self._init_styles()
self.set_style(style)
@property
def style(self) -> TableStyle:
return self._style
def set_style(self, name: str) -> "Table":
self._init_styles()
self._style = self._resolve_style(name)
return self
def column_style(self, column_index: int) -> TableStyle:
if column_index in self._column_styles:
return self._column_styles[column_index]
return self._style
def set_column_style(
self, column_index: int, style: Union[str, TableStyle]
) -> "Table":
self._column_styles[column_index] = self._resolve_style(style)
return self
def set_column_width(self, column_index: int, width: int) -> "Table":
self._column_widths[column_index] = width
return self
def set_column_widths(self, widths: List[int]) -> "Table":
self._column_widths = {}
for i, width in enumerate(widths):
self._column_widths[i] = width
return self
def set_column_max_width(self, column_index: int, width: int) -> "Table":
self._column_widths[column_index] = width
return self
def set_headers(self, headers: List[str]) -> "Table":
if headers and not isinstance(headers[0], list):
headers = [headers]
self._headers = headers
return self
def set_rows(self, rows: _Rows) -> "Table":
self._rows = []
return self.add_rows(rows)
def add_rows(self, rows: _Rows) -> "Table":
for row in rows:
self.add_row(row)
return self
def add_row(self, row: Union[_Row, TableSeparator]) -> "Table":
if isinstance(row, TableSeparator):
self._rows.append(row)
return self
self._rows.append(row)
return self
def set_header_title(self, header_title: str) -> "Table":
self._header_title = header_title
return self
def set_footer_title(self, footer_title: str) -> "Table":
self._footer_title = footer_title
return self
def horizontal(self, horizontal: bool = True) -> "Table":
self._horizontal = horizontal
return self
def render(self) -> None:
divider = TableSeparator()
if self._horizontal:
rows = []
headers = self._headers[0] if self._headers else []
for i, header in enumerate(headers):
rows.append([header])
for row in self._rows:
if isinstance(row, TableSeparator):
continue
if len(row) > i:
rows[i].append(row[i])
elif isinstance(rows[i][0], TableCell) and rows[i][0].colspan >= 2:
# There is a title
pass
else:
rows[i].append(None)
else:
rows = self._headers + [divider] + self._rows
self._calculate_number_of_columns(rows)
rows = list(self._build_table_rows(rows))
self._calculate_column_widths(rows)
is_header = not self._horizontal
is_first_row = self._horizontal
for row in rows:
if row is divider:
is_header = False
is_first_row = True
continue
if isinstance(row, TableSeparator):
self._render_row_separator()
continue
if not row:
continue
if is_header or is_first_row:
if is_first_row:
self._render_row_separator(self.SEPARATOR_TOP_BOTTOM)
is_first_row = False
else:
self._render_row_separator(
self.SEPARATOR_TOP,
self._header_title,
self._style.header_title_format,
)
if self._horizontal:
self._render_row(
row, self._style.cell_row_format, self._style.cell_header_format
)
else:
self._render_row(
row,
self._style.cell_header_format
if is_header
else self._style.cell_row_format,
)
self._render_row_separator(
self.SEPARATOR_BOTTOM,
self._footer_title,
self._style.footer_title_format,
)
self._cleanup()
self._rendered = True
def _render_row_separator(
self,
type: int = SEPARATOR_MID,
title: Optional[str] = None,
title_format: Optional[str] = None,
) -> None:
"""
Renders horizontal header separator.
Example:
+-----+-----------+-------+
"""
count = self._number_of_columns
if not count:
return
borders = self._style.border_chars
if not borders[0] and not borders[2] and not self._style.crossing_char:
return
crossings = self._style.crossing_chars
if type == self.SEPARATOR_MID:
horizontal, left_char, mid_char, right_char = (
borders[2],
crossings[8],
crossings[0],
crossings[4],
)
elif type == self.SEPARATOR_TOP:
horizontal, left_char, mid_char, right_char = (
borders[0],
crossings[1],
crossings[2],
crossings[3],
)
elif type == self.SEPARATOR_TOP_BOTTOM:
horizontal, left_char, mid_char, right_char = (
borders[0],
crossings[9],
crossings[10],
crossings[11],
)
else:
horizontal, left_char, mid_char, right_char = (
borders[0],
crossings[7],
crossings[6],
crossings[5],
)
markup = left_char
for column in range(count):
markup += horizontal * self._effective_column_widths[column]
markup += right_char if column == count - 1 else mid_char
if title is not None:
formatted_title = title_format.format(title)
title_length = len(self._io.remove_format(formatted_title))
markup_length = len(markup)
limit = markup_length - 4
if title_length > limit:
title_length = limit
format_length = len(self._io.remove_format(title_format.format("")))
formatted_title = title_format.format(
title[: limit - format_length - 3] + "..."
)
title_start = (markup_length - title_length) // 2
markup = (
markup[:title_start]
+ formatted_title
+ markup[title_start + title_length :]
)
self._io.write_line(self._style.border_format.format(markup))
def _render_column_separator(self, type: int = BORDER_OUTSIDE) -> str:
"""
Renders vertical column separator.
"""
borders = self._style.border_chars
return self._style.border_format.format(
borders[1] if type == self.BORDER_OUTSIDE else borders[3]
)
def _render_row(
self, row: List[str], cell_format: str, first_cell_format: Optional[str] = None
) -> None:
"""
Renders table row.
Example:
| 9971-5-0210-0 | A Tale of Two Cities | Charles Dickens |
"""
row_content = self._render_column_separator(self.BORDER_OUTSIDE)
columns = self._get_row_columns(row)
last = len(columns) - 1
for i, column in enumerate(columns):
if first_cell_format and i == 0:
row_content += self._render_cell(row, column, first_cell_format)
else:
row_content += self._render_cell(row, column, cell_format)
row_content += self._render_column_separator(
self.BORDER_OUTSIDE if i == last else self.BORDER_INSIDE
)
self._io.write_line(row_content)
def _render_cell(self, row: _Row, column: int, cell_format: str) -> str:
"""
Renders a table cell with padding.
"""
try:
cell = row[column]
except IndexError:
cell = ""
width = self._effective_column_widths[column]
if isinstance(cell, TableCell) and cell.colspan > 1:
# add the width of the following columns(numbers of colspan).
for next_column in range(column + 1, column + cell.colspan):
width += (
self._get_column_separator_width()
+ self._effective_column_widths[next_column]
)
style = self.column_style(column)
if isinstance(cell, TableSeparator):
return style.border_format.format(style.border_chars[2] * width)
width += len(cell) - len(self._io.remove_format(cell))
content = style.cell_row_content_format.format(cell)
pad = style.pad
if isinstance(cell, TableCell) and isinstance(cell.style, TableCellStyle):
is_not_styled_by_tag = not re.match(
r"^<(\w+|(\w+=[\w,]+;?)*)>.+</(\w+|(\w+=\w+;?)*)?>$", str(cell)
)
if is_not_styled_by_tag:
cell_format = cell.style.cell_format
if cell_format is None:
cell_format = f"<{cell.style.tag}>{{}}</>"
if "</>" in content:
content = content.replace("</>", "")
width -= 3
if "<fg=default;bg=default>" in content:
content = content.replace("<fg=default;bg=default>")
width -= len("<fg=default;bg=default>")
pad = cell.style.pad
return cell_format.format(pad(content, width, style.padding_char))
def _calculate_number_of_columns(self, rows: _Rows) -> None:
columns = [0]
for row in rows:
if isinstance(row, TableSeparator):
continue
columns.append(self._get_number_of_columns(row))
self._number_of_columns = max(columns)
def _build_table_rows(self, rows: _Rows) -> Generator:
unmerged_rows = {}
row_key = 0
while row_key < len(rows):
rows = self._fill_next_rows(rows, row_key)
# Remove any new line breaks and replace it with a new line
for column, cell in enumerate(rows[row_key]):
colspan = cell.colspan if isinstance(cell, TableCell) else 1
if column in self._column_max_widths and self._column_max_widths[
column
] < len(self._io.remove_format(cell)):
cell = self._io.formatter.format_and_wrap(
cell, self._column_max_widths[column] * colspan
)
if "\n" not in cell:
continue
escaped = "\n".join(
Formatter.escape_trailing_backslash(c) for c in cell.split("\n")
)
cell = (
TableCell(escaped, colspan=cell.colspan)
if isinstance(cell, TableCell)
else escaped
)
lines = cell.replace("\n", "<fg=default;bg=default>\n</>").split("\n")
for line_key, line in enumerate(lines):
if colspan > 1:
line = TableCell(line, colspan=colspan)
if line_key == 0:
rows[row_key][column] = line
else:
if row_key not in unmerged_rows:
unmerged_rows[row_key] = {}
if line_key not in unmerged_rows[row_key]:
unmerged_rows[row_key][line_key] = self._copy_row(
rows, row_key
)
unmerged_rows[row_key][line_key][column] = line
row_key += 1
for row_key, row in enumerate(rows):
yield self._fill_cells(row)
if row_key in unmerged_rows:
for unmerged_row in unmerged_rows[row_key].values():
yield self._fill_cells(unmerged_row)
def _calculate_row_count(self) -> int:
number_of_rows = len(
list(
self._build_table_rows(self._headers + [TableSeparator()] + self._rows)
)
)
if self._headers:
number_of_rows += 1
if len(self._rows) > 0:
number_of_rows += 1
return number_of_rows
def _fill_next_rows(self, rows: _Rows, line: int) -> _Rows:
"""
Fill rows that contains rowspan > 1.
"""
unmerged_rows = {}
for column, cell in enumerate(rows[line]):
if isinstance(cell, TableCell) and cell.rowspan > 1:
nb_lines = cell.rowspan - 1
lines = [cell]
if "\n" in cell:
lines = cell.replace("\n", "<fg=default;bg=default>\n</>").split(
"\n"
)
if len(lines) > nb_lines:
nb_lines = cell.count("\n")
rows[line][column] = TableCell(
lines[0], colspan=cell.colspan, style=cell.style
)
# Create a two dimensional dict (rowspan x colspan)
placeholder = dict(
[(k, {}) for k in range(line + 1, line + 1 + nb_lines)]
)
for k, v in unmerged_rows.items():
if k in placeholder:
for l, m in unmerged_rows[k].items():
if l in placeholder[k]:
placeholder[k][l].update(m)
else:
placeholder[k][l] = m
else:
placeholder[k] = v
unmerged_rows = placeholder
for unmerged_row_key, _ in unmerged_rows.items():
value = ""
if unmerged_row_key - line < len(lines):
value = lines[unmerged_row_key - line]
unmerged_rows[unmerged_row_key][column] = TableCell(
value, colspan=cell.colspan, style=cell.style
)
if nb_lines == unmerged_row_key - line:
break
for unmerged_row_key, unmerged_row in unmerged_rows.items():
# we need to know if unmerged_row will be merged or inserted into rows
if (
unmerged_row_key < len(rows)
and isinstance(rows[unmerged_row_key], list)
and (
(
self._get_number_of_columns(rows[unmerged_row_key])
+ self._get_number_of_columns(
list(unmerged_rows[unmerged_row_key].values())
)
)
<= self._number_of_columns
)
):
# insert cell into row at cell_key position
for cell_key, cell in unmerged_row.items():
rows[unmerged_row_key].insert(cell_key, cell)
else:
row = self._copy_row(rows, unmerged_row_key - 1)
for column, cell in unmerged_row.items():
if len(cell):
row[column] = unmerged_row[column]
rows.insert(unmerged_row_key, row)
return rows
def _fill_cells(self, row: _Row) -> List[Union[str, TableCell]]:
"""
Fills cells for a row that contains colspan > 1.
"""
new_row = []
for column, cell in enumerate(row):
new_row.append(cell)
if isinstance(cell, TableCell) and cell.colspan > 1:
for _ in range(column + 1, column + cell.colspan):
# insert empty value at column position
new_row.append("")
if new_row:
return new_row
return row
def _copy_row(self, rows: _Rows, line: int) -> _Row:
"""
Copies a row.
"""
row = [x for x in rows[line]]
for cell_key, cell_value in enumerate(row):
row[cell_key] = ""
if isinstance(cell_value, TableCell):
row[cell_key] = TableCell("", colspan=cell_value.colspan)
return row
def _get_number_of_columns(self, row: _Row):
"""
Gets number of columns by row.
"""
columns = len(row)
for column in row:
if isinstance(column, TableCell):
columns += column.colspan - 1
return columns
def _get_row_columns(self, row: _Row) -> List[int]:
"""
Gets list of columns for the given row.
"""
columns = list(range(0, self._number_of_columns))
for cell_key, cell in enumerate(row):
if isinstance(cell, TableCell) and cell.colspan > 1:
# exclude grouped columns.
columns = [
x
for x in columns
if x not in list(range(cell_key + 1, cell_key + cell.colspan))
]
return columns
def _calculate_column_widths(self, rows: _Rows) -> None:
"""
Calculates column widths.
"""
for column in range(0, self._number_of_columns):
lengths = [0]
for row in rows:
if isinstance(row, TableSeparator):
continue
row_ = row.copy()
for i, cell in enumerate(row_):
if isinstance(cell, TableCell):
text_content = self._io.remove_format(cell)
text_length = len(text_content)
if text_length:
length = math.ceil(text_length / cell.colspan)
content_columns = [
text_content[i : i + length]
for i in range(0, text_length, length)
]
for position, content in enumerate(content_columns):
try:
row_[i + position] = content
except IndexError:
row_.append(content)
lengths.append(self._get_cell_width(row_, column))
self._effective_column_widths[column] = (
max(lengths) + len(self._style.cell_row_content_format) - 2
)
def _get_column_separator_width(self) -> int:
return len(self._style.border_format.format(self._style.border_chars[3]))
def _get_cell_width(self, row: _Row, column: int) -> int:
"""
Gets cell width.
"""
cell_width = 0
try:
cell = row[column]
cell_width = len(self._io.remove_format(cell))
except IndexError:
pass
column_width = (
self._column_widths[column] if column in self._column_widths else 0
)
cell_width = max(cell_width, column_width)
if column in self._column_max_widths:
return min(self._column_max_widths[column], cell_width)
return cell_width
def _cleanup(self):
self._column_widths = {}
self._number_of_columns = None
@classmethod
def _init_styles(cls) -> None:
if cls._styles is not None:
return
borderless = TableStyle()
borderless.set_horizontal_border_chars("=")
borderless.set_vertical_border_chars(" ")
borderless.set_default_crossing_char(" ")
compact = TableStyle()
compact.set_horizontal_border_chars("")
compact.set_vertical_border_chars(" ")
compact.set_default_crossing_char("")
compact.set_cell_row_content_format("{}")
box = TableStyle()
box.set_horizontal_border_chars("─")
box.set_vertical_border_chars("│")
box.set_crossing_chars("┼", "┌", "┬", "┐", "┤", "┘", "┴", "└", "├")
box_double = TableStyle()
box_double.set_horizontal_border_chars("═", "─")
box_double.set_vertical_border_chars("║", "│")
box_double.set_crossing_chars(
"┼", "╔", "╤", "╗", "╢", "╝", "╧", "╚", "╟", "╠", "╪", "╣"
)
cls._styles = {
"default": TableStyle(),
"borderless": borderless,
"compact": compact,
"box": box,
"box-double": box_double,
}
@classmethod
def _resolve_style(cls, name: Union[str, TableStyle]) -> TableStyle:
if isinstance(name, TableStyle):
return name
if name in cls._styles:
return cls._styles[name]
raise ValueError(f'Table style "{name}" is not defined.')
| mit | -3,334,421,641,350,958,000 | 31.15309 | 87 | 0.495566 | false |
google/report2bq | application/classes/sa360_report_validation/keyword.py | 1 | 3039 | """
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = ['[email protected] (David Harcombe)']
from googleapiclient.discovery import Resource
from classes.sa360_report_validation.sa360_field_validator import SA360Validator
class Keyword(SA360Validator):
def __init__(self,
sa360_service: Resource = None,
agency: int = None,
advertiser: int = None) -> None:
super().__init__(sa360_service, agency, advertiser)
self.fields = [
"status",
"engineStatus",
"creationTimestamp",
"lastModifiedTimestamp",
"agency",
"agencyId",
"advertiser",
"advertiserId",
"account",
"accountId",
"accountEngineId",
"accountType",
"campaign",
"campaignId",
"campaignStatus",
"adGroup",
"adGroupId",
"adGroupStatus",
"keywordId",
"keywordMatchType",
"keywordText",
"keywordEngineId",
"keywordMaxCpc",
"effectiveKeywordMaxCpc",
"keywordLandingPage",
"keywordClickserverUrl",
"isDisplayKeyword",
"keywordMaxBid",
"keywordMinBid",
"keywordUrlParams",
"bingKeywordParam2",
"bingKeywordParam3",
"keywordLabels",
"qualityScoreCurrent",
"topOfPageBidCurrent",
"effectiveBidStrategyId",
"effectiveBidStrategy",
"bidStrategyInherited",
"effectiveLabels",
"dfaActions",
"dfaRevenue",
"dfaTransactions",
"dfaWeightedActions",
"dfaActionsCrossEnv",
"dfaRevenueCrossEnv",
"dfaTransactionsCrossEnv",
"dfaWeightedActionsCrossEnv",
"avgCpc",
"avgCpm",
"avgPos",
"clicks",
"cost",
"ctr",
"impr",
"adWordsConversions",
"adWordsConversionValue",
"adWordsViewThroughConversions",
"visits",
"qualityScoreAvg",
"topOfPageBidAvg",
"date",
"monthStart",
"monthEnd",
"quarterStart",
"quarterEnd",
"weekStart",
"weekEnd",
"yearStart",
"yearEnd",
"deviceSegment",
"floodlightGroup",
"floodlightGroupId",
"floodlightGroupTag",
"floodlightActivity",
"floodlightActivityId",
"floodlightActivityTag",
"ad",
"adId",
"isUnattributedAd",
"adHeadline",
"adHeadline2",
"adHeadline3",
"adDescription1",
"adDescription2",
"adDisplayUrl",
"adLandingPage",
"adType",
"adPromotionLine",
] | apache-2.0 | -2,372,571,817,100,767,700 | 24.546218 | 80 | 0.618295 | false |
GehenHe/Recognize-Face-on-Android | tensorflow/contrib/bayesflow/python/ops/variational_inference.py | 16 | 12030 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variational inference.
## Ops
@@elbo
@@elbo_with_log_joint
@@ELBOForms
@@register_prior
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.bayesflow.python.ops import stochastic_graph as sg
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor as st
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
VI_PRIORS = "__vi_priors__"
def register_prior(variational, prior):
"""Associate a variational `StochasticTensor` with a `Distribution` prior.
This is a helper function used in conjunction with `elbo` that allows users
to specify the mapping between variational distributions and their priors
without having to pass in `variational_with_prior` explicitly.
Args:
variational: `StochasticTensor` q(Z). Approximating distribution.
prior: `Distribution` p(Z). Prior distribution.
Returns:
None
Raises:
ValueError: if variational is not a `StochasticTensor` or `prior` is not
a `Distribution`.
"""
if not isinstance(variational, st.StochasticTensor):
raise TypeError("variational must be a StochasticTensor")
if not isinstance(prior, distribution.Distribution):
raise TypeError("prior must be a Distribution")
ops.add_to_collection(VI_PRIORS, (variational, prior))
class _ELBOForm(object):
pass
class ELBOForms(object):
"""Constants to control the `elbo` calculation.
`analytic_kl` uses the analytic KL divergence between the
variational distribution(s) and the prior(s).
`analytic_entropy` uses the analytic entropy of the variational
distribution(s).
`sample` uses the sample KL or the sample entropy is the joint is provided.
See `elbo` for what is used with `default`.
"""
default, analytic_kl, analytic_entropy, sample = (_ELBOForm()
for _ in range(4))
@staticmethod
def check_form(form):
if form not in {
ELBOForms.default, ELBOForms.analytic_kl, ELBOForms.analytic_entropy,
ELBOForms.sample
}:
raise TypeError("form must be an ELBOForms constant")
def elbo(log_likelihood,
variational_with_prior=None,
keep_batch_dim=True,
form=None,
name="ELBO"):
r"""Evidence Lower BOund. `log p(x) >= ELBO`.
Optimization objective for inference of hidden variables by variational
inference.
This function is meant to be used in conjunction with `StochasticTensor`.
The user should build out the inference network, using `StochasticTensor`s
as latent variables, and the generative network. `elbo` at minimum needs
`p(x|Z)` and assumes that all `StochasticTensor`s upstream of `p(x|Z)` are
the variational distributions. Use `register_prior` to register `Distribution`
priors for each `StochasticTensor`. Alternatively, pass in
`variational_with_prior` specifying all variational distributions and their
priors.
Mathematical details:
```
log p(x) = log \int p(x, Z) dZ
= log \int \frac {q(Z)p(x, Z)}{q(Z)} dZ
= log E_q[\frac {p(x, Z)}{q(Z)}]
>= E_q[log \frac {p(x, Z)}{q(Z)}] = L[q; p, x] # ELBO
L[q; p, x] = E_q[log p(x|Z)p(Z)] - E_q[log q(Z)]
= E_q[log p(x|Z)p(Z)] + H[q] (1)
= E_q[log p(x|Z)] - KL(q || p) (2)
H - Entropy
KL - Kullback-Leibler divergence
```
See section 2.2 of Stochastic Variational Inference by Hoffman et al. for
more, including the ELBO's equivalence to minimizing `KL(q(Z)||p(Z|x))`
in the fully Bayesian setting. https://arxiv.org/pdf/1206.7051.pdf.
`form` specifies which form of the ELBO is used. `form=ELBOForms.default`
tries, in order of preference: analytic KL, analytic entropy, sampling.
Multiple entries in the `variational_with_prior` dict implies a factorization.
e.g. `q(Z) = q(z1)q(z2)q(z3)`.
Args:
log_likelihood: `Tensor` log p(x|Z).
variational_with_prior: dict from `StochasticTensor` q(Z) to
`Distribution` p(Z). If `None`, defaults to all `StochasticTensor`
objects upstream of `log_likelihood` with priors registered with
`register_prior`.
keep_batch_dim: bool. Whether to keep the batch dimension when summing
entropy/KL term. When the sample is per data point, this should be True;
otherwise (e.g. in a Bayesian NN), this should be False.
form: ELBOForms constant. Controls how the ELBO is computed. Defaults to
ELBOForms.default.
name: name to prefix ops with.
Returns:
`Tensor` ELBO of the same type and shape as `log_likelihood`.
Raises:
TypeError: if variationals in `variational_with_prior` are not
`StochasticTensor`s or if priors are not `Distribution`s.
TypeError: if form is not a valid ELBOForms constant.
ValueError: if `variational_with_prior` is None and there are no
`StochasticTensor`s upstream of `log_likelihood`.
ValueError: if any variational does not have a prior passed or registered.
"""
if form is None:
form = ELBOForms.default
with ops.name_scope(name):
model = ops.convert_to_tensor(log_likelihood)
variational_with_prior = _find_variational_and_priors(
model, variational_with_prior)
return _elbo(form, log_likelihood, None, variational_with_prior,
keep_batch_dim)
def elbo_with_log_joint(log_joint,
variational=None,
keep_batch_dim=True,
form=None,
name="ELBO"):
"""Evidence Lower BOund. `log p(x) >= ELBO`.
This method is for models that have computed `p(x,Z)` instead of `p(x|Z)`.
See `elbo` for further details.
Because only the joint is specified, analytic KL is not available.
Args:
log_joint: `Tensor` log p(x, Z).
variational: list of `StochasticTensor` q(Z). If `None`, defaults to all
`StochasticTensor` objects upstream of `log_joint`.
keep_batch_dim: bool. Whether to keep the batch dimension when summing
entropy term. When the sample is per data point, this should be True;
otherwise (e.g. in a Bayesian NN), this should be False.
form: ELBOForms constant. Controls how the ELBO is computed. Defaults to
ELBOForms.default.
name: name to prefix ops with.
Returns:
`Tensor` ELBO of the same type and shape as `log_joint`.
Raises:
TypeError: if variationals in `variational` are not `StochasticTensor`s.
TypeError: if form is not a valid ELBOForms constant.
ValueError: if `variational` is None and there are no `StochasticTensor`s
upstream of `log_joint`.
ValueError: if form is ELBOForms.analytic_kl.
"""
if form is None:
form = ELBOForms.default
if form == ELBOForms.analytic_kl:
raise ValueError("ELBOForms.analytic_kl is not available when using "
"elbo_with_log_joint. Use elbo or a different form.")
with ops.name_scope(name):
model = ops.convert_to_tensor(log_joint)
variational_with_prior = None
if variational is not None:
variational_with_prior = dict(zip(variational, [None] * len(variational)))
variational_with_prior = _find_variational_and_priors(
model, variational_with_prior, require_prior=False)
return _elbo(form, None, log_joint, variational_with_prior, keep_batch_dim)
def _elbo(form, log_likelihood, log_joint, variational_with_prior,
keep_batch_dim):
"""Internal implementation of ELBO. Users should use `elbo`.
Args:
form: ELBOForms constant. Controls how the ELBO is computed.
log_likelihood: `Tensor` log p(x|Z).
log_joint: `Tensor` log p(x, Z).
variational_with_prior: `dict<StochasticTensor, Distribution>`, varational
distributions to prior distributions.
keep_batch_dim: bool. Whether to keep the batch dimension when reducing
the entropy/KL.
Returns:
ELBO `Tensor` with same shape and dtype as `log_likelihood`/`log_joint`.
"""
ELBOForms.check_form(form)
# Order of preference
# 1. Analytic KL: log_likelihood - KL(q||p)
# 2. Analytic entropy: log_likelihood + log p(Z) + H[q], or log_joint + H[q]
# 3. Sample: log_likelihood - (log q(Z) - log p(Z)) =
# log_likelihood + log p(Z) - log q(Z), or log_joint - q(Z)
def _reduce(val):
if keep_batch_dim:
return val
else:
return math_ops.reduce_sum(val)
kl_terms = []
entropy_terms = []
prior_terms = []
for q, z, p in [(qz.distribution, qz.value(), pz)
for qz, pz in variational_with_prior.items()]:
# Analytic KL
kl = None
if log_joint is None and form in {ELBOForms.default, ELBOForms.analytic_kl}:
try:
kl = kullback_leibler.kl(q, p)
logging.info("Using analytic KL between q:%s, p:%s", q, p)
except NotImplementedError as e:
if form == ELBOForms.analytic_kl:
raise e
if kl is not None:
kl_terms.append(-1. * _reduce(kl))
continue
# Analytic entropy
entropy = None
if form in {ELBOForms.default, ELBOForms.analytic_entropy}:
try:
entropy = q.entropy()
logging.info("Using analytic entropy for q:%s", q)
except NotImplementedError as e:
if form == ELBOForms.analytic_entropy:
raise e
if entropy is not None:
entropy_terms.append(_reduce(entropy))
if log_likelihood is not None:
prior = p.log_prob(z)
prior_terms.append(_reduce(prior))
continue
# Sample
if form in {ELBOForms.default, ELBOForms.sample}:
entropy = -q.log_prob(z)
entropy_terms.append(_reduce(entropy))
if log_likelihood is not None:
prior = p.log_prob(z)
prior_terms.append(_reduce(prior))
first_term = log_joint if log_joint is not None else log_likelihood
return sum([first_term] + kl_terms + entropy_terms + prior_terms)
def _find_variational_and_priors(model,
variational_with_prior,
require_prior=True):
"""Find upstream StochasticTensors and match with registered priors."""
if variational_with_prior is None:
# pylint: disable=protected-access
upstreams = sg._upstream_stochastic_nodes([model])
# pylint: enable=protected-access
upstreams = list(upstreams[model])
if not upstreams:
raise ValueError("No upstream stochastic nodes found for tensor: %s",
model)
prior_map = dict(ops.get_collection(VI_PRIORS))
variational_with_prior = {}
for q in upstreams:
if require_prior and (q not in prior_map or prior_map[q] is None):
raise ValueError("No prior specified for StochasticTensor: %s", q)
variational_with_prior[q] = prior_map.get(q)
if not all(
[isinstance(q, st.StochasticTensor) for q in variational_with_prior]):
raise TypeError("variationals must be StochasticTensors")
if not all([
p is None or isinstance(p, distribution.Distribution)
for p in variational_with_prior.values()
]):
raise TypeError("priors must be Distribution objects")
return variational_with_prior
| apache-2.0 | -5,625,669,942,508,287,000 | 35.676829 | 80 | 0.670407 | false |
CasataliaLabs/biscuit_drishtiman | Pmw-2.0.0/Pmw/Pmw_1_3_3/lib/PmwTimeFuncs.py | 2 | 4441 | # Functions for dealing with dates and times.
import re
import string
def timestringtoseconds(text, separator = ':'):
inputList = string.split(string.strip(text), separator)
if len(inputList) != 3:
raise ValueError, 'invalid value: ' + text
sign = 1
if len(inputList[0]) > 0 and inputList[0][0] in ('+', '-'):
if inputList[0][0] == '-':
sign = -1
inputList[0] = inputList[0][1:]
if re.search('[^0-9]', string.join(inputList, '')) is not None:
raise ValueError, 'invalid value: ' + text
hour = string.atoi(inputList[0])
minute = string.atoi(inputList[1])
second = string.atoi(inputList[2])
if minute >= 60 or second >= 60:
raise ValueError, 'invalid value: ' + text
return sign * (hour * 60 * 60 + minute * 60 + second)
_year_pivot = 50
_century = 2000
def setyearpivot(pivot, century = None):
global _year_pivot
global _century
oldvalues = (_year_pivot, _century)
_year_pivot = pivot
if century is not None:
_century = century
return oldvalues
def datestringtojdn(text, format = 'ymd', separator = '/'):
inputList = string.split(string.strip(text), separator)
if len(inputList) != 3:
raise ValueError, 'invalid value: ' + text
if re.search('[^0-9]', string.join(inputList, '')) is not None:
raise ValueError, 'invalid value: ' + text
formatList = list(format)
day = string.atoi(inputList[formatList.index('d')])
month = string.atoi(inputList[formatList.index('m')])
year = string.atoi(inputList[formatList.index('y')])
if _year_pivot is not None:
if year >= 0 and year < 100:
if year <= _year_pivot:
year = year + _century
else:
year = year + _century - 100
jdn = ymdtojdn(year, month, day)
if jdntoymd(jdn) != (year, month, day):
raise ValueError, 'invalid value: ' + text
return jdn
def _cdiv(a, b):
# Return a / b as calculated by most C language implementations,
# assuming both a and b are integers.
if a * b > 0:
return a / b
else:
return -(abs(a) / abs(b))
def ymdtojdn(year, month, day, julian = -1, papal = 1):
# set Julian flag if auto set
if julian < 0:
if papal: # Pope Gregory XIII's decree
lastJulianDate = 15821004L # last day to use Julian calendar
else: # British-American usage
lastJulianDate = 17520902L # last day to use Julian calendar
julian = ((year * 100L) + month) * 100 + day <= lastJulianDate
if year < 0:
# Adjust BC year
year = year + 1
if julian:
return 367L * year - _cdiv(7 * (year + 5001L + _cdiv((month - 9), 7)), 4) + \
_cdiv(275 * month, 9) + day + 1729777L
else:
return (day - 32076L) + \
_cdiv(1461L * (year + 4800L + _cdiv((month - 14), 12)), 4) + \
_cdiv(367 * (month - 2 - _cdiv((month - 14), 12) * 12), 12) - \
_cdiv((3 * _cdiv((year + 4900L + _cdiv((month - 14), 12)), 100)), 4) + \
1 # correction by rdg
def jdntoymd(jdn, julian = -1, papal = 1):
# set Julian flag if auto set
if julian < 0:
if papal: # Pope Gregory XIII's decree
lastJulianJdn = 2299160L # last jdn to use Julian calendar
else: # British-American usage
lastJulianJdn = 2361221L # last jdn to use Julian calendar
julian = (jdn <= lastJulianJdn);
x = jdn + 68569L
if julian:
x = x + 38
daysPer400Years = 146100L
fudgedDaysPer4000Years = 1461000L + 1
else:
daysPer400Years = 146097L
fudgedDaysPer4000Years = 1460970L + 31
z = _cdiv(4 * x, daysPer400Years)
x = x - _cdiv((daysPer400Years * z + 3), 4)
y = _cdiv(4000 * (x + 1), fudgedDaysPer4000Years)
x = x - _cdiv(1461 * y, 4) + 31
m = _cdiv(80 * x, 2447)
d = x - _cdiv(2447 * m, 80)
x = _cdiv(m, 11)
m = m + 2 - 12 * x
y = 100 * (z - 49) + y + x
# Convert from longs to integers.
yy = int(y)
mm = int(m)
dd = int(d)
if yy <= 0:
# Adjust BC years.
yy = yy - 1
return (yy, mm, dd)
def stringtoreal(text, separator = '.'):
if separator != '.':
if string.find(text, '.') >= 0:
raise ValueError, 'invalid value: ' + text
index = string.find(text, separator)
if index >= 0:
text = text[:index] + '.' + text[index + 1:]
return string.atof(text)
| gpl-3.0 | -3,340,865,132,212,912,000 | 28.417808 | 78 | 0.56789 | false |
pnorman/mapnik | scons/scons-local-2.4.1/SCons/Tool/mslib.py | 6 | 2210 | """SCons.Tool.mslib
Tool-specific initialization for lib (MicroSoft library archiver).
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mslib.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import SCons.Defaults
import SCons.Tool
import SCons.Tool.msvs
import SCons.Tool.msvc
import SCons.Util
from MSCommon import msvc_exists, msvc_setup_env_once
def generate(env):
"""Add Builders and construction variables for lib to an Environment."""
SCons.Tool.createStaticLibBuilder(env)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['AR'] = 'lib'
env['ARFLAGS'] = SCons.Util.CLVar('/nologo')
env['ARCOM'] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}"
env['LIBPREFIX'] = ''
env['LIBSUFFIX'] = '.lib'
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 | 3,082,534,838,143,315,500 | 33.53125 | 104 | 0.734389 | false |
epuzanov/ZenPacks.community.PgSQLMon | ZenPacks/community/PgSQLMon/PgSqlSrvInst.py | 1 | 1065 | ################################################################################
#
# This program is part of the PgSQLMon Zenpack for Zenoss.
# Copyright (C) 2009-2012 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""PgSqlSrvInst
PgSqlSrvInst reperesents PostgreSQL server instance
$Id: PgSqlSrvInst.py,v 1.0 2012/04/24 23:19:40 egor Exp $"""
__version__ = "$Revision: 1.0 $"[11:-2]
from Globals import InitializeClass
from ZenPacks.community.RDBMS.DBSrvInst import DBSrvInst
class PgSqlSrvInst(DBSrvInst):
"""
PgSQL SrvInst object
"""
ZENPACKID = 'ZenPacks.community.PgSQLMon'
port = 5432
_properties = DBSrvInst._properties + (
{'id':'port', 'type':'int', 'mode':'w'},
)
def getRRDTemplates(self):
"""
Return the RRD Templates list
"""
return []
InitializeClass(PgSqlSrvInst)
| gpl-2.0 | 508,947,185,697,748,900 | 24.357143 | 80 | 0.562441 | false |
youprofit/zato | code/zato-server/test/zato/server/test_message.py | 6 | 10198 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from logging import getLogger
from unittest import TestCase
from uuid import uuid4
# Bunch
from bunch import Bunch, bunchify
# lxml
from lxml import etree
# Zato
from zato.common.test import rand_string
from zato.server.message import JSONPointerStore, Mapper, XPathStore
logger = getLogger(__name__)
def config_value(value):
return Bunch({'value':value})
# ################################################################################################################################
class TestJSONPointerStore(TestCase):
def test_add(self):
jps = JSONPointerStore()
name1, expr1 = '1', config_value('/{}/{}'.format(*rand_string(2)))
name2, expr2 = '2', config_value('/aaa/{}/{}'.format(*rand_string(2)))
name3, expr3 = '3', config_value('/aaa/{}/{}'.format(*rand_string(2)))
name4, expr4 = '2', config_value('/aaa/{}/{}'.format(*rand_string(2)))
jps.add(name1, expr1.value)
self.assertIn(name1, jps.data)
self.assertEquals(expr1.value, jps.data[name1].path)
jps.add(name2, expr2.value)
self.assertIn(name2, jps.data)
self.assertEquals(expr2.value, jps.data[name2].path)
jps.add(name3, expr3.value)
self.assertIn(name3, jps.data)
self.assertEquals(expr3.value, jps.data[name3].path)
# name4's value is '2' so it overrides 2
jps.add(name4, expr4.value)
self.assertIn(name4, jps.data)
self.assertEquals(expr4.value, jps.data[name2].path)
self.assertEquals(expr4.value, jps.data[name4].path)
def test_get(self):
jps = JSONPointerStore()
c_value, d_value = rand_string(2)
doc = {
'a': {
'b': [
{'c': c_value},
{'d': d_value},
]
},
'e': None,
'f': 0
}
name1, expr1 = '1', config_value('/a')
name2, expr2 = '2', config_value('/a/b')
name3, expr3 = '3', config_value('/a/b/0')
name4, expr4 = '4', config_value('/a/b/1')
name5, expr5 = '5', config_value('/a/b/0/c')
# This will return default because the path points to None
name6, expr6 = '6', config_value('/e')
# This will return default because there is no such path
name7, expr7 = '7', config_value('/e/e2/e3')
# This will not return None because 0 is not None even though it's False in boolean sense
name8, expr8 = '8', config_value('/f')
jps.add(name1, expr1.value)
value = jps.get(name1, doc)
self.assertListEqual(value.keys(), ['b'])
jps.add(name2, expr2.value)
value = jps.get(name2, doc)
self.assertDictEqual(value[0], {'c':c_value})
self.assertDictEqual(value[1], {'d':d_value})
jps.add(name3, expr3.value)
value = jps.get(name3, doc)
self.assertDictEqual(value, {'c':c_value})
jps.add(name4, expr4.value)
value = jps.get(name4, doc)
self.assertDictEqual(value, {'d':d_value})
jps.add(name5, expr5.value)
value = jps.get(name5, doc)
self.assertEquals(value, c_value)
default1 = rand_string()
default2 = rand_string()
jps.add(name6, expr6.value)
value = jps.get(name6, doc, default1)
self.assertEquals(value, default1)
jps.add(name7, expr7.value)
value = jps.get(name7, doc, default2)
self.assertEquals(value, default2)
jps.add(name8, expr8.value)
value = jps.get(name8, doc)
self.assertEquals(value, 0)
def test_set_defaults(self):
jps = JSONPointerStore()
value1 = {'b':{}}
value2 = {'c':{}}
doc = {}
name1, expr1 = '1', config_value('/a')
name2, expr2 = '2', config_value('/a/b')
jps.add(name1, expr1.value)
jps.add(name2, expr2.value)
jps.set(name1, doc, value1)
value = jps.get(name1, doc)
self.assertEquals(value, value1)
jps.set(name2, doc, value2)
value = jps.get(name2, doc)
self.assertDictEqual(value, value2)
def test_set_in_place(self):
jps = JSONPointerStore()
doc = {'a':'b'}
value_random = rand_string()
name1, expr1 = '1', config_value('/a')
jps.add(name1, expr1.value)
# in_place is False so a new doc is created and the previous one should be retained
new_doc = jps.set(name1, doc, value_random, True, in_place=False)
value = jps.get(name1, new_doc)
self.assertEquals(value, value_random)
value = jps.get(name1, doc)
self.assertEquals(value, 'b')
def test_set_skip_missing(self):
jps = JSONPointerStore()
doc = {}
name1, expr1 = '1', config_value('/a')
name2, expr2 = '2', config_value('/b')
value1, value2 = rand_string(2)
default1, default2 = rand_string(2)
jps.add(name1, expr1.value)
jps.add(name2, expr2.value)
# value is equal to default1 because it is never set by jps.set
jps.set(name1, doc, value1, True)
value = jps.get(name1, doc, default1)
self.assertEquals(value, default1)
self.assertDictEqual(doc, {})
jps.set(name2, doc, value2)
value = jps.get(name2, doc, default2)
self.assertEquals(value, value2)
self.assertDictEqual(doc, {'b':value2})
def test_set_create_missing(self):
jps = JSONPointerStore()
doc = {}
name1, expr1, value1 = '1', config_value('/a/b/c/d'), rand_string()
name2, expr2, value2 = '2', config_value('/a/b/c/dd'), rand_string()
name3, expr3, value3 = '3', config_value('/a/b/cc/d'), rand_string()
jps.add(name1, expr1.value)
jps.add(name2, expr2.value)
jps.add(name3, expr3.value)
# Creates all the missing path parts in the empty document
jps.set(name1, doc, value1)
jps.set(name2, doc, value2)
jps.set(name3, doc, value3)
doc = bunchify(doc)
self.assertEquals(doc.a.b.c.d, value1)
self.assertEquals(doc.a.b.c.dd, value2)
self.assertEquals(doc.a.b.cc.d, value3)
# ################################################################################################################################
class TestXPathStore(TestCase):
def test_store_replace(self):
expr1 = '/root/elem1'
expr2 = '//jt:elem2'
expr3 = '//list1/item1'
expr4 = '//item2/key'
ns_map={'jt':'just-testing'}
for idx, expr in enumerate([expr1, expr2, expr3, expr4]):
msg = """
<root>
<elem1>elem1</elem1>
<elem2 xmlns="just-testing">elem2</elem2>
<list1>
<item1>item-a</item1>
<item1>item-b</item1>
<item2>
<key>key</key>
</item2>
</list1>
</root>
""".encode('utf-8')
doc = etree.fromstring(msg)
new_value = uuid4().hex
config = Bunch()
config.name = str(idx)
config.value = expr
xps = XPathStore()
xps.add(config.name, config, ns_map=ns_map)
xps.set(config.name, doc, new_value, ns_map)
result = xps.get(config.name, doc)
self.assertTrue(len(result) > 0)
if isinstance(result, list):
for item in result:
logger.warn('%r %r %r %r %s', idx, expr, item, result, etree.tostring(doc, pretty_print=1))
self.assertEquals(item, new_value)
else:
self.assertEquals(result, new_value)
def test_get(self):
msg = """
<root>
<a>123</a>
<b>456</b>
</root>
""".encode('utf-8')
config1 = Bunch()
config1.name = '1'
config1.value = '//a'
config2 = Bunch()
config2.name = '2'
config2.value = '//zzz'
default = rand_string()
xps = XPathStore()
xps.add(config1.name, config1)
xps.add(config2.name, config2)
doc = etree.fromstring(msg)
value = xps.get('1', doc)
self.assertEquals(value, '123')
value = xps.get('2', doc, default)
self.assertEquals(value, default)
def test_set(self):
msg = """
<root>
<a>123</a>
<b>456</b>
</root>
""".encode('utf-8')
config1 = Bunch()
config1.name = '1'
config1.value = '//a'
new_value = rand_string()
config2 = Bunch()
config2.name = '2'
config2.value = '/zzz'
xps = XPathStore()
xps.add(config1.name, config1)
xps.add(config2.name, config2)
doc = etree.fromstring(msg)
xps.set('1', doc, new_value)
value = xps.get('1', doc)
self.assertEquals(value, new_value)
xps.set('2', doc, new_value)
value = xps.get('2', doc)
self.assertEquals(value, None)
# ################################################################################################################################
class TestMapper(TestCase):
def test_map(self):
source = {
'a': {
'b': [1, 2, '3', 4],
'c': {'d':'123'}
}}
m = Mapper(source)
# 1:1 mappings
m.map('/a/b', '/aa')
m.map('/a/c/d', '/bb')
# Force conversion to int
m.map('int:/a/c/d', '/cc/dd')
m.map('int:/a/c/d', '/cc/ee/ff/19')
target = bunchify(m.target)
self.assertListEqual(target.aa, [1, 2, '3', 4])
self.assertEquals(target.bb, '123')
self.assertEquals(target.cc.dd, 123)
| gpl-3.0 | -4,351,046,926,534,961,700 | 27.971591 | 130 | 0.51265 | false |
hesseltuinhof/mxnet | tools/parse_log.py | 15 | 1545 | #!/usr/bin/env python
"""
parse mxnet output log into a markdown table
"""
import argparse
import sys
import re
parser = argparse.ArgumentParser(description='Parse mxnet output log')
parser.add_argument('logfile', nargs=1, type=str,
help = 'the log file for parsing')
parser.add_argument('--format', type=str, default='markdown',
choices = ['markdown', 'none'],
help = 'the format of the parsed outout')
args = parser.parse_args()
with open(args.logfile[0]) as f:
lines = f.readlines()
res = [re.compile('.*Epoch\[(\d+)\] Train.*=([.\d]+)'),
re.compile('.*Epoch\[(\d+)\] Valid.*=([.\d]+)'),
re.compile('.*Epoch\[(\d+)\] Time.*=([.\d]+)')]
data = {}
for l in lines:
i = 0
for r in res:
m = r.match(l)
if m is not None:
break
i += 1
if m is None:
continue
assert len(m.groups()) == 2
epoch = int(m.groups()[0])
val = float(m.groups()[1])
if epoch not in data:
data[epoch] = [0] * len(res) * 2
data[epoch][i*2] += val
data[epoch][i*2+1] += 1
if args.format == 'markdown':
print "| epoch | train-accuracy | valid-accuracy | time |"
print "| --- | --- | --- | --- |"
for k, v in data.items():
print "| %2d | %f | %f | %.1f |" % (k+1, v[0]/v[1], v[2]/v[3], v[4]/v[5])
elif args.format == 'none':
print "epoch\ttrain-accuracy\tvalid-accuracy\ttime"
for k, v in data.items():
print "%2d\t%f\t%f\t%.1f" % (k+1, v[0]/v[1], v[2]/v[3], v[4]/v[5])
| apache-2.0 | -8,872,981,783,342,119,000 | 28.150943 | 81 | 0.522977 | false |
zepheira/zenpub | thirdparty/geopy_export/geopy/util.py | 3 | 2501 | import re
import logging
import htmlentitydefs
import xml.dom.minidom
from xml.parsers.expat import ExpatError
try:
from decimal import Decimal
except ImportError:
NUMBER_TYPES = (int, long, float)
else:
NUMBER_TYPES = (int, long, float, Decimal)
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('geopy')
logger.addHandler(NullHandler())
def pairwise(seq):
for i in range(0, len(seq) - 1):
yield (seq[i], seq[i + 1])
def join_filter(sep, seq, pred=bool):
return sep.join([unicode(i) for i in seq if pred(i)])
def get_encoding(page, contents=None):
plist = page.headers.getplist()
if plist:
key, value = plist[-1].split('=')
if key.lower() == 'charset':
return value
if contents:
try:
return xml.dom.minidom.parseString(contents).encoding
except ExpatError:
pass
def decode_page(page):
contents = page.read()
encoding = get_encoding(page, contents) or sys.getdefaultencoding()
return unicode(contents, encoding=encoding).encode('utf-8')
def get_first_text(node, tag_names, strip=None):
if isinstance(tag_names, basestring):
tag_names = [tag_names]
if node:
while tag_names:
nodes = node.getElementsByTagName(tag_names.pop(0))
if nodes:
child = nodes[0].firstChild
return child and child.nodeValue.strip(strip)
def join_filter(sep, seq, pred=bool):
return sep.join([unicode(i) for i in seq if pred(i)])
import re, htmlentitydefs
def unescape(text):
"""
Removes HTML or XML character references and entities from a text string.
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
try:
reversed
except NameError:
def reversed(seq):
i = len(seq)
while i > 0:
i -= 1
yield seq[i]
else:
reversed = reversed
| apache-2.0 | 8,204,440,484,568,202,000 | 25.326316 | 77 | 0.572571 | false |
elkingtonmcb/pattern | examples/04-search/01-search.py | 21 | 1721 | import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.search import search
from pattern.en import parsetree
# The pattern.search module contains a number of pattern matching tools
# to search a string syntactically (word function) or semantically (word meaning).
# If you only need to match string characters, regular expressions are faster.
# However, if you are scanning a sentence for concept types (e.g. all flowers)
# or parts-of-speech (e.g. all adjectives), this module provides the functionality.
# In the simplest case, the search() function
# takes a word (or a sequence of words) that you want to retrieve:
print search("rabbit", "big white rabbit")
print
# Search words can contain wildcard characters:
print search("rabbit*", "big white rabbit")
print search("rabbit*", "big white rabbits")
print
# Search words can contain different options:
print search("rabbit|cony|bunny", "big black bunny")
print
# Things become more interesting if we involve the pattern.en.parser module.
# The parser takes a string, identifies words, and assigns a part-of-speech tag
# to each word, for example NN (noun) or JJ (adjective).
# A parsed sentence can be scanned for part-of-speech tags:
s = parsetree("big white rabbit")
print search("JJ", s) # all adjectives
print search("NN", s) # all nouns
print search("NP", s) # all noun phrases
print
# Since the search() is case-insensitive, uppercase search words
# are always considered to be tags (or taxonomy terms - see further examples).
# The return value is a Match object,
# where Match.words is a list of Word objects that matched:
m = search("NP", s)
for word in m[0].words:
print word.string, word.tag
| bsd-3-clause | -3,540,690,211,711,898,600 | 39.023256 | 87 | 0.740267 | false |
mantidproject/mantid | scripts/abins/powderdata.py | 3 | 4181 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import numpy as np
from typing import Dict, Optional
PowderDict = Dict[str, Dict[int, np.ndarray]]
class PowderData:
"""
Data container for tensors used in analytic powder-averaging model
:param a_tensors: dict of total displacement tensors, indexed by integer
k-point identities
:param b_tensors: dict of mode-by-mode tensors, indexed by integer k-point
identities
:param frequencies: frequencies corresponding to data in b_tensors; usually
this has already been pruned to remove imaginary modes.
:param num_atoms: Expected number of atoms in tensor data. If provided,
this value is used for sanity-checking
"""
def __init__(self, *,
a_tensors: Dict[int, np.ndarray],
b_tensors: Dict[int, np.ndarray],
frequencies: Dict[int, np.ndarray],
num_atoms: Optional[int] = None):
self._data = {"a_tensors": a_tensors,
"b_tensors": b_tensors,
"frequencies": frequencies} # type: PowderDict
self._num_atoms = num_atoms
self._check_data()
def get_a_tensors(self) -> Dict[int, np.ndarray]:
return self._data["a_tensors"]
def get_b_tensors(self) -> Dict[int, np.ndarray]:
return self._data["b_tensors"]
def get_frequencies(self) -> np.ndarray:
return self._data["frequencies"]
def extract(self) -> PowderDict:
"""Get tensor data as dict"""
return {key: {str(k): array for k, array in data.items()}
for key, data in self._data.items()}
@classmethod
def from_extracted(cls, dct: PowderDict,
num_atoms: Optional[int] = None):
"""Reconstruct a PowderData object from the extracted dictionary representation"""
a_tensors = {int(k_index): data for k_index, data in dct['a_tensors'].items()}
b_tensors = {int(k_index): data for k_index, data in dct['b_tensors'].items()}
frequencies = {int(k_index): data for k_index, data in dct['frequencies'].items()}
return cls(a_tensors=a_tensors, b_tensors=b_tensors,
frequencies=frequencies, num_atoms=num_atoms)
def _check_data(self) -> None:
for key in "a_tensors", "b_tensors", "frequencies":
if not isinstance(self._data[key], dict):
raise TypeError(f"Value of {key} should be a dictionary.")
for k, data in self._data[key].items():
if not isinstance(data, np.ndarray):
raise TypeError(f"Items in {key} dict should be numpy arrays")
if self._num_atoms is not None:
self._num_atoms = int(self._num_atoms)
if self._num_atoms <= 0:
raise ValueError("Invalid value of num_atoms.")
for _, tensor in self.get_a_tensors().items():
if tensor.shape[0] != self._num_atoms:
raise ValueError("Invalid dimension of a_tensors.")
for _, tensor in self.get_b_tensors().items():
if tensor.shape[0] != self._num_atoms:
raise ValueError("Invalid dimension of b_tensors.")
if self.get_frequencies().keys() != self.get_a_tensors().keys():
raise ValueError("Frequency data does not cover same number of kpts as a_tensors")
if self.get_frequencies().keys() != self.get_b_tensors().keys():
raise ValueError("Frequency data does not cover same number of kpts as b_tensors")
for k, frequency_set in self.get_frequencies().items():
if frequency_set.size != self.get_b_tensors()[k].shape[1]:
raise ValueError(f"Number of frequencies does not match shape of b_tensors at k-point {k}")
def __str__(self) -> str:
return "Tensor data for analytic powder averaging"
| gpl-3.0 | -2,474,078,237,197,263,400 | 40.81 | 107 | 0.608706 | false |
Bysmyyr/chromium-crosswalk | third_party/WebKit/Source/bindings/scripts/aggregate_generated_bindings.py | 22 | 8191 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate aggregate .cpp files that include multiple V8 binding .cpp files.
This can be a single output file, to preserve symbol space; or multiple output
files, to reduce maximum compilation unit size and allow parallel compilation.
Usage:
aggregate_generated_bindings.py COMPONENT_DIR IDL_FILES_LIST -- OUTPUT_FILE1 OUTPUT_FILE2 ...
COMPONENT_DIR is the relative directory of a component, e.g., 'core', 'modules'.
IDL_FILES_LIST is a text file containing the IDL file paths, so the command
line doesn't exceed OS length limits.
OUTPUT_FILE1 etc. are filenames of output files.
Design doc: http://www.chromium.org/developers/design-documents/idl-build
"""
import errno
import os
import re
import sys
from utilities import should_generate_impl_file_from_idl, get_file_contents, idl_filename_to_component, idl_filename_to_interface_name, read_idl_files_list_from_file
# A regexp for finding Conditional attributes in interface definitions.
CONDITIONAL_PATTERN = re.compile(
r'\['
r'[^\]]*'
r'Conditional=([\_0-9a-zA-Z]*)'
r'[^\]]*'
r'\]\s*'
r'((callback|partial)\s+)?'
r'interface\s+'
r'\w+\s*'
r'(:\s*\w+\s*)?'
r'{',
re.MULTILINE)
COPYRIGHT_TEMPLATE = """/*
* THIS FILE WAS AUTOMATICALLY GENERATED, DO NOT EDIT.
*
* This file was generated by the action_derivedsourcesallinone.py script.
*
* Copyright (C) 2009 Google Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
"""
def extract_conditional(idl_contents):
"""Find [Conditional] interface extended attribute."""
match = CONDITIONAL_PATTERN.search(idl_contents)
if not match:
return None
return match.group(1)
def extract_meta_data(file_paths):
"""Extracts conditional and interface name from each IDL file."""
meta_data_list = []
for file_path in file_paths:
if not file_path.endswith('.idl'):
print 'WARNING: non-IDL file passed: "%s"' % file_path
continue
if not os.path.exists(file_path):
print 'WARNING: file not found: "%s"' % file_path
continue
idl_file_contents = get_file_contents(file_path)
if not should_generate_impl_file_from_idl(idl_file_contents):
continue
# Extract interface name from file name
interface_name = idl_filename_to_interface_name(file_path)
meta_data = {
'conditional': extract_conditional(idl_file_contents),
'name': interface_name,
}
meta_data_list.append(meta_data)
return meta_data_list
def generate_content(component_dir, aggregate_partial_interfaces, files_meta_data_this_partition):
# Add fixed content.
output = [COPYRIGHT_TEMPLATE,
'#define NO_IMPLICIT_ATOMICSTRING\n\n']
# List all includes segmented by if and endif.
prev_conditional = None
files_meta_data_this_partition.sort(key=lambda e: e['conditional'])
for meta_data in files_meta_data_this_partition:
conditional = meta_data['conditional']
if prev_conditional != conditional:
if prev_conditional:
output.append('#endif\n')
if conditional:
output.append('\n#if ENABLE(%s)\n' % conditional)
prev_conditional = conditional
if aggregate_partial_interfaces:
cpp_filename = 'V8%sPartial.cpp' % meta_data['name']
else:
cpp_filename = 'V8%s.cpp' % meta_data['name']
output.append('#include "bindings/%s/v8/%s"\n' %
(component_dir, cpp_filename))
if prev_conditional:
output.append('#endif\n')
return ''.join(output)
def write_content(content, output_file_name):
parent_path, file_name = os.path.split(output_file_name)
if not os.path.exists(parent_path):
print 'Creating directory: %s' % parent_path
os.makedirs(parent_path)
with open(output_file_name, 'w') as f:
f.write(content)
def main(args):
if len(args) <= 4:
raise Exception('Expected at least 5 arguments.')
component_dir = args[1]
input_file_name = args[2]
in_out_break_index = args.index('--')
output_file_names = args[in_out_break_index + 1:]
idl_file_names = read_idl_files_list_from_file(input_file_name)
components = set([idl_filename_to_component(filename)
for filename in idl_file_names])
if len(components) != 1:
raise Exception('Cannot aggregate generated codes in different components')
aggregate_partial_interfaces = component_dir not in components
files_meta_data = extract_meta_data(idl_file_names)
total_partitions = len(output_file_names)
for partition, file_name in enumerate(output_file_names):
files_meta_data_this_partition = [
meta_data for meta_data in files_meta_data
if hash(meta_data['name']) % total_partitions == partition]
file_contents = generate_content(component_dir,
aggregate_partial_interfaces,
files_meta_data_this_partition)
write_content(file_contents, file_name)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | 380,930,723,111,739,300 | 38.379808 | 165 | 0.69027 | false |
python-control/python-control | control/freqplot.py | 1 | 56813 | # freqplot.py - frequency domain plots for control systems
#
# Author: Richard M. Murray
# Date: 24 May 09
#
# This file contains some standard control system plots: Bode plots,
# Nyquist plots and pole-zero diagrams. The code for Nichols charts
# is in nichols.py.
#
# Copyright (c) 2010 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import warnings
from .ctrlutil import unwrap
from .bdalg import feedback
from .margins import stability_margins
from .exception import ControlMIMONotImplemented
from .statesp import StateSpace
from .xferfcn import TransferFunction
from . import config
__all__ = ['bode_plot', 'nyquist_plot', 'gangof4_plot', 'singular_values_plot',
'bode', 'nyquist', 'gangof4']
# Default values for module parameter variables
_freqplot_defaults = {
'freqplot.feature_periphery_decades': 1,
'freqplot.number_of_samples': 1000,
'freqplot.dB': False, # Plot gain in dB
'freqplot.deg': True, # Plot phase in degrees
'freqplot.Hz': False, # Plot frequency in Hertz
'freqplot.grid': True, # Turn on grid for gain and phase
'freqplot.wrap_phase': False, # Wrap the phase plot at a given value
# deprecations
'deprecated.bode.dB': 'freqplot.dB',
'deprecated.bode.deg': 'freqplot.deg',
'deprecated.bode.Hz': 'freqplot.Hz',
'deprecated.bode.grid': 'freqplot.grid',
'deprecated.bode.wrap_phase': 'freqplot.wrap_phase',
}
#
# Main plotting functions
#
# This section of the code contains the functions for generating
# frequency domain plots
#
#
# Bode plot
#
def bode_plot(syslist, omega=None,
plot=True, omega_limits=None, omega_num=None,
margins=None, method='best', *args, **kwargs):
"""Bode plot for a system
Plots a Bode plot for the system over a (optional) frequency range.
Parameters
----------
syslist : linsys
List of linear input/output systems (single system is OK)
omega : array_like
List of frequencies in rad/sec to be used for frequency response
dB : bool
If True, plot result in dB. Default is false.
Hz : bool
If True, plot frequency in Hz (omega must be provided in rad/sec).
Default value (False) set by config.defaults['freqplot.Hz']
deg : bool
If True, plot phase in degrees (else radians). Default value (True)
config.defaults['freqplot.deg']
plot : bool
If True (default), plot magnitude and phase
omega_limits : array_like of two values
Limits of the to generate frequency vector.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num : int
Number of samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
margins : bool
If True, plot gain and phase margin.
method : method to use in computing margins (see :func:`stability_margins`)
*args : :func:`matplotlib.pyplot.plot` positional properties, optional
Additional arguments for `matplotlib` plots (color, linestyle, etc)
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
mag : ndarray (or list of ndarray if len(syslist) > 1))
magnitude
phase : ndarray (or list of ndarray if len(syslist) > 1))
phase in radians
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequency in rad/sec
Other Parameters
----------------
grid : bool
If True, plot grid lines on gain and phase plots. Default is set by
`config.defaults['freqplot.grid']`.
initial_phase : float
Set the reference phase to use for the lowest frequency. If set, the
initial phase of the Bode plot will be set to the value closest to the
value specified. Units are in either degrees or radians, depending on
the `deg` parameter. Default is -180 if wrap_phase is False, 0 if
wrap_phase is True.
wrap_phase : bool or float
If wrap_phase is `False`, then the phase will be unwrapped so that it
is continuously increasing or decreasing. If wrap_phase is `True` the
phase will be restricted to the range [-180, 180) (or [:math:`-\\pi`,
:math:`\\pi`) radians). If `wrap_phase` is specified as a float, the
phase will be offset by 360 degrees if it falls below the specified
value. Default to `False`, set by config.defaults['freqplot.wrap_phase'].
The default values for Bode plot configuration parameters can be reset
using the `config.defaults` dictionary, with module name 'bode'.
Notes
-----
1. Alternatively, you may use the lower-level methods
:meth:`LTI.frequency_response` or ``sys(s)`` or ``sys(z)`` or to
generate the frequency response for a single system.
2. If a discrete time model is given, the frequency response is plotted
along the upper branch of the unit circle, using the mapping ``z =
exp(1j * omega * dt)`` where `omega` ranges from 0 to `pi/dt` and `dt`
is the discrete timebase. If timebase not specified (``dt=True``),
`dt` is set to 1.
Examples
--------
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> mag, phase, omega = bode(sys)
"""
# Make a copy of the kwargs dictionary since we will modify it
kwargs = dict(kwargs)
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
import warnings
warnings.warn("'Plot' keyword is deprecated in bode_plot; use 'plot'",
FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Get values for params (and pop from list to allow keyword use in plot)
dB = config._get_param(
'freqplot', 'dB', kwargs, _freqplot_defaults, pop=True)
deg = config._get_param(
'freqplot', 'deg', kwargs, _freqplot_defaults, pop=True)
Hz = config._get_param(
'freqplot', 'Hz', kwargs, _freqplot_defaults, pop=True)
grid = config._get_param(
'freqplot', 'grid', kwargs, _freqplot_defaults, pop=True)
plot = config._get_param('freqplot', 'plot', plot, True)
margins = config._get_param(
'freqplot', 'margins', margins, False)
wrap_phase = config._get_param(
'freqplot', 'wrap_phase', kwargs, _freqplot_defaults, pop=True)
initial_phase = config._get_param(
'freqplot', 'initial_phase', kwargs, None, pop=True)
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
# If argument was a singleton, turn it into a tuple
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
omega, omega_range_given = _determine_omega_vector(
syslist, omega, omega_limits, omega_num)
if plot:
# Set up the axes with labels so that multiple calls to
# bode_plot will superimpose the data. This was implicit
# before matplotlib 2.1, but changed after that (See
# https://github.com/matplotlib/matplotlib/issues/9024).
# The code below should work on all cases.
# Get the current figure
if 'sisotool' in kwargs:
fig = kwargs['fig']
ax_mag = fig.axes[0]
ax_phase = fig.axes[2]
sisotool = kwargs['sisotool']
del kwargs['fig']
del kwargs['sisotool']
else:
fig = plt.gcf()
ax_mag = None
ax_phase = None
sisotool = False
# Get the current axes if they already exist
for ax in fig.axes:
if ax.get_label() == 'control-bode-magnitude':
ax_mag = ax
elif ax.get_label() == 'control-bode-phase':
ax_phase = ax
# If no axes present, create them from scratch
if ax_mag is None or ax_phase is None:
plt.clf()
ax_mag = plt.subplot(211, label='control-bode-magnitude')
ax_phase = plt.subplot(
212, label='control-bode-phase', sharex=ax_mag)
mags, phases, omegas, nyquistfrqs = [], [], [], []
for sys in syslist:
if not sys.issiso():
# TODO: Add MIMO bode plots.
raise ControlMIMONotImplemented(
"Bode is currently only implemented for SISO systems.")
else:
omega_sys = np.asarray(omega)
if sys.isdtime(strict=True):
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
else:
nyquistfrq = None
mag, phase, omega_sys = sys.frequency_response(omega_sys)
mag = np.atleast_1d(mag)
phase = np.atleast_1d(phase)
#
# Post-process the phase to handle initial value and wrapping
#
if initial_phase is None:
# Start phase in the range 0 to -360 w/ initial phase = -180
# If wrap_phase is true, use 0 instead (phase \in (-pi, pi])
initial_phase = -math.pi if wrap_phase is not True else 0
elif isinstance(initial_phase, (int, float)):
# Allow the user to override the default calculation
if deg:
initial_phase = initial_phase/180. * math.pi
else:
raise ValueError("initial_phase must be a number.")
# Shift the phase if needed
if abs(phase[0] - initial_phase) > math.pi:
phase -= 2*math.pi * \
round((phase[0] - initial_phase) / (2*math.pi))
# Phase wrapping
if wrap_phase is False:
phase = unwrap(phase) # unwrap the phase
elif wrap_phase is True:
pass # default calculation OK
elif isinstance(wrap_phase, (int, float)):
phase = unwrap(phase) # unwrap the phase first
if deg:
wrap_phase *= math.pi/180.
# Shift the phase if it is below the wrap_phase
phase += 2*math.pi * np.maximum(
0, np.ceil((wrap_phase - phase)/(2*math.pi)))
else:
raise ValueError("wrap_phase must be bool or float.")
mags.append(mag)
phases.append(phase)
omegas.append(omega_sys)
nyquistfrqs.append(nyquistfrq)
# Get the dimensions of the current axis, which we will divide up
# TODO: Not current implemented; just use subplot for now
if plot:
nyquistfrq_plot = None
if Hz:
omega_plot = omega_sys / (2. * math.pi)
if nyquistfrq:
nyquistfrq_plot = nyquistfrq / (2. * math.pi)
else:
omega_plot = omega_sys
if nyquistfrq:
nyquistfrq_plot = nyquistfrq
phase_plot = phase * 180. / math.pi if deg else phase
mag_plot = mag
if nyquistfrq_plot:
# append data for vertical nyquist freq indicator line.
# if this extra nyquist lime is is plotted in a single plot
# command then line order is preserved when
# creating a legend eg. legend(('sys1', 'sys2'))
omega_nyq_line = np.array((np.nan, nyquistfrq, nyquistfrq))
omega_plot = np.hstack((omega_plot, omega_nyq_line))
mag_nyq_line = np.array((
np.nan, 0.7*min(mag_plot), 1.3*max(mag_plot)))
mag_plot = np.hstack((mag_plot, mag_nyq_line))
phase_range = max(phase_plot) - min(phase_plot)
phase_nyq_line = np.array(
(np.nan,
min(phase_plot) - 0.2 * phase_range,
max(phase_plot) + 0.2 * phase_range))
phase_plot = np.hstack((phase_plot, phase_nyq_line))
#
# Magnitude plot
#
if dB:
ax_mag.semilogx(omega_plot, 20 * np.log10(mag_plot),
*args, **kwargs)
else:
ax_mag.loglog(omega_plot, mag_plot, *args, **kwargs)
# Add a grid to the plot + labeling
ax_mag.grid(grid and not margins, which='both')
ax_mag.set_ylabel("Magnitude (dB)" if dB else "Magnitude")
#
# Phase plot
#
# Plot the data
ax_phase.semilogx(omega_plot, phase_plot, *args, **kwargs)
# Show the phase and gain margins in the plot
if margins:
# Compute stability margins for the system
margin = stability_margins(sys, method=method)
gm, pm, Wcg, Wcp = (margin[i] for i in (0, 1, 3, 4))
# Figure out sign of the phase at the first gain crossing
# (needed if phase_wrap is True)
phase_at_cp = phases[0][(np.abs(omegas[0] - Wcp)).argmin()]
if phase_at_cp >= 0.:
phase_limit = 180.
else:
phase_limit = -180.
if Hz:
Wcg, Wcp = Wcg/(2*math.pi), Wcp/(2*math.pi)
# Draw lines at gain and phase limits
ax_mag.axhline(y=0 if dB else 1, color='k', linestyle=':',
zorder=-20)
ax_phase.axhline(y=phase_limit if deg else
math.radians(phase_limit),
color='k', linestyle=':', zorder=-20)
mag_ylim = ax_mag.get_ylim()
phase_ylim = ax_phase.get_ylim()
# Annotate the phase margin (if it exists)
if pm != float('inf') and Wcp != float('nan'):
if dB:
ax_mag.semilogx(
[Wcp, Wcp], [0., -1e5],
color='k', linestyle=':', zorder=-20)
else:
ax_mag.loglog(
[Wcp, Wcp], [1., 1e-8],
color='k', linestyle=':', zorder=-20)
if deg:
ax_phase.semilogx(
[Wcp, Wcp], [1e5, phase_limit + pm],
color='k', linestyle=':', zorder=-20)
ax_phase.semilogx(
[Wcp, Wcp], [phase_limit + pm, phase_limit],
color='k', zorder=-20)
else:
ax_phase.semilogx(
[Wcp, Wcp], [1e5, math.radians(phase_limit) +
math.radians(pm)],
color='k', linestyle=':', zorder=-20)
ax_phase.semilogx(
[Wcp, Wcp], [math.radians(phase_limit) +
math.radians(pm),
math.radians(phase_limit)],
color='k', zorder=-20)
# Annotate the gain margin (if it exists)
if gm != float('inf') and Wcg != float('nan'):
if dB:
ax_mag.semilogx(
[Wcg, Wcg], [-20.*np.log10(gm), -1e5],
color='k', linestyle=':', zorder=-20)
ax_mag.semilogx(
[Wcg, Wcg], [0, -20*np.log10(gm)],
color='k', zorder=-20)
else:
ax_mag.loglog(
[Wcg, Wcg], [1./gm, 1e-8], color='k',
linestyle=':', zorder=-20)
ax_mag.loglog(
[Wcg, Wcg], [1., 1./gm], color='k', zorder=-20)
if deg:
ax_phase.semilogx(
[Wcg, Wcg], [0, phase_limit],
color='k', linestyle=':', zorder=-20)
else:
ax_phase.semilogx(
[Wcg, Wcg], [0, math.radians(phase_limit)],
color='k', linestyle=':', zorder=-20)
ax_mag.set_ylim(mag_ylim)
ax_phase.set_ylim(phase_ylim)
if sisotool:
ax_mag.text(
0.04, 0.06,
'G.M.: %.2f %s\nFreq: %.2f %s' %
(20*np.log10(gm) if dB else gm,
'dB ' if dB else '',
Wcg, 'Hz' if Hz else 'rad/s'),
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_mag.transAxes,
fontsize=8 if int(mpl.__version__[0]) == 1 else 6)
ax_phase.text(
0.04, 0.06,
'P.M.: %.2f %s\nFreq: %.2f %s' %
(pm if deg else math.radians(pm),
'deg' if deg else 'rad',
Wcp, 'Hz' if Hz else 'rad/s'),
horizontalalignment='left',
verticalalignment='bottom',
transform=ax_phase.transAxes,
fontsize=8 if int(mpl.__version__[0]) == 1 else 6)
else:
plt.suptitle(
"Gm = %.2f %s(at %.2f %s), "
"Pm = %.2f %s (at %.2f %s)" %
(20*np.log10(gm) if dB else gm,
'dB ' if dB else '',
Wcg, 'Hz' if Hz else 'rad/s',
pm if deg else math.radians(pm),
'deg' if deg else 'rad',
Wcp, 'Hz' if Hz else 'rad/s'))
# Add a grid to the plot + labeling
ax_phase.set_ylabel("Phase (deg)" if deg else "Phase (rad)")
def gen_zero_centered_series(val_min, val_max, period):
v1 = np.ceil(val_min / period - 0.2)
v2 = np.floor(val_max / period + 0.2)
return np.arange(v1, v2 + 1) * period
if deg:
ylim = ax_phase.get_ylim()
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], 45.))
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], 15.), minor=True)
else:
ylim = ax_phase.get_ylim()
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], math.pi / 4.))
ax_phase.set_yticks(gen_zero_centered_series(
ylim[0], ylim[1], math.pi / 12.), minor=True)
ax_phase.grid(grid and not margins, which='both')
# ax_mag.grid(which='minor', alpha=0.3)
# ax_mag.grid(which='major', alpha=0.9)
# ax_phase.grid(which='minor', alpha=0.3)
# ax_phase.grid(which='major', alpha=0.9)
# Label the frequency axis
ax_phase.set_xlabel("Frequency (Hz)" if Hz
else "Frequency (rad/sec)")
if len(syslist) == 1:
return mags[0], phases[0], omegas[0]
else:
return mags, phases, omegas
#
# Nyquist plot
#
# Default values for module parameter variables
_nyquist_defaults = {
'nyquist.mirror_style': '--',
'nyquist.arrows': 2,
'nyquist.arrow_size': 8,
'nyquist.indent_radius': 1e-1,
'nyquist.indent_direction': 'right',
}
def nyquist_plot(syslist, omega=None, plot=True, omega_limits=None,
omega_num=None, label_freq=0, color=None,
return_contour=False, warn_nyquist=True, *args, **kwargs):
"""Nyquist plot for a system
Plots a Nyquist plot for the system over a (optional) frequency range.
The curve is computed by evaluating the Nyqist segment along the positive
imaginary axis, with a mirror image generated to reflect the negative
imaginary axis. Poles on or near the imaginary axis are avoided using a
small indentation. The portion of the Nyquist contour at infinity is not
explicitly computed (since it maps to a constant value for any system with
a proper transfer function).
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK). Nyquist
curves for each system are plotted on the same graph.
plot : boolean
If True, plot magnitude
omega : array_like
Set of frequencies to be evaluated, in rad/sec.
omega_limits : array_like of two values
Limits to the range of frequencies. Ignored if omega is provided, and
auto-generated if omitted.
omega_num : int
Number of frequency samples to plot. Defaults to
config.defaults['freqplot.number_of_samples'].
color : string
Used to specify the color of the line and arrowhead.
mirror_style : string or False
Linestyle for mirror image of the Nyquist curve. If `False` then
omit completely. Default linestyle ('--') is determined by
config.defaults['nyquist.mirror_style'].
return_contour : bool
If 'True', return the contour used to evaluate the Nyquist plot.
label_freq : int
Label every nth frequency on the plot. If not specified, no labels
are generated.
arrows : int or 1D/2D array of floats
Specify the number of arrows to plot on the Nyquist curve. If an
integer is passed. that number of equally spaced arrows will be
plotted on each of the primary segment and the mirror image. If a 1D
array is passed, it should consist of a sorted list of floats between
0 and 1, indicating the location along the curve to plot an arrow. If
a 2D array is passed, the first row will be used to specify arrow
locations for the primary curve and the second row will be used for
the mirror image.
arrow_size : float
Arrowhead width and length (in display coordinates). Default value is
8 and can be set using config.defaults['nyquist.arrow_size'].
arrow_style : matplotlib.patches.ArrowStyle
Define style used for Nyquist curve arrows (overrides `arrow_size`).
indent_radius : float
Amount to indent the Nyquist contour around poles that are at or near
the imaginary axis.
indent_direction : str
For poles on the imaginary axis, set the direction of indentation to
be 'right' (default), 'left', or 'none'.
warn_nyquist : bool, optional
If set to 'False', turn off warnings about frequencies above Nyquist.
*args : :func:`matplotlib.pyplot.plot` positional properties, optional
Additional arguments for `matplotlib` plots (color, linestyle, etc)
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
count : int (or list of int if len(syslist) > 1)
Number of encirclements of the point -1 by the Nyquist curve. If
multiple systems are given, an array of counts is returned.
contour : ndarray (or list of ndarray if len(syslist) > 1)), optional
The contour used to create the primary Nyquist curve segment. To
obtain the Nyquist curve values, evaluate system(s) along contour.
Notes
-----
1. If a discrete time model is given, the frequency response is computed
along the upper branch of the unit circle, using the mapping ``z =
exp(1j * omega * dt)`` where `omega` ranges from 0 to `pi/dt` and `dt`
is the discrete timebase. If timebase not specified (``dt=True``),
`dt` is set to 1.
2. If a continuous-time system contains poles on or near the imaginary
axis, a small indentation will be used to avoid the pole. The radius
of the indentation is given by `indent_radius` and it is taken to the
right of stable poles and the left of unstable poles. If a pole is
exactly on the imaginary axis, the `indent_direction` parameter can be
used to set the direction of indentation. Setting `indent_direction`
to `none` will turn off indentation. If `return_contour` is True, the
exact contour used for evaluation is returned.
Examples
--------
>>> sys = ss([[1, -2], [3, -4]], [[5], [7]], [[6, 8]], [[9]])
>>> count = nyquist_plot(sys)
"""
# Check to see if legacy 'Plot' keyword was used
if 'Plot' in kwargs:
warnings.warn("'Plot' keyword is deprecated in nyquist_plot; "
"use 'plot'", FutureWarning)
# Map 'Plot' keyword to 'plot' keyword
plot = kwargs.pop('Plot')
# Check to see if legacy 'labelFreq' keyword was used
if 'labelFreq' in kwargs:
warnings.warn("'labelFreq' keyword is deprecated in nyquist_plot; "
"use 'label_freq'", FutureWarning)
# Map 'labelFreq' keyword to 'label_freq' keyword
label_freq = kwargs.pop('labelFreq')
# Check to see if legacy 'arrow_width' or 'arrow_length' were used
if 'arrow_width' in kwargs or 'arrow_length' in kwargs:
warnings.warn(
"'arrow_width' and 'arrow_length' keywords are deprecated in "
"nyquist_plot; use `arrow_size` instead", FutureWarning)
kwargs['arrow_size'] = \
(kwargs.get('arrow_width', 0) + kwargs.get('arrow_length', 0)) / 2
kwargs.pop('arrow_width', False)
kwargs.pop('arrow_length', False)
# Get values for params (and pop from list to allow keyword use in plot)
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
mirror_style = config._get_param(
'nyquist', 'mirror_style', kwargs, _nyquist_defaults, pop=True)
arrows = config._get_param(
'nyquist', 'arrows', kwargs, _nyquist_defaults, pop=True)
arrow_size = config._get_param(
'nyquist', 'arrow_size', kwargs, _nyquist_defaults, pop=True)
arrow_style = config._get_param('nyquist', 'arrow_style', kwargs, None)
indent_radius = config._get_param(
'nyquist', 'indent_radius', kwargs, _nyquist_defaults, pop=True)
indent_direction = config._get_param(
'nyquist', 'indent_direction', kwargs, _nyquist_defaults, pop=True)
# If argument was a singleton, turn it into a list
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
omega, omega_range_given = _determine_omega_vector(
syslist, omega, omega_limits, omega_num)
if not omega_range_given:
# Start contour at zero frequency
omega[0] = 0.
# Go through each system and keep track of the results
counts, contours = [], []
for sys in syslist:
if not sys.issiso():
# TODO: Add MIMO nyquist plots.
raise ControlMIMONotImplemented(
"Nyquist plot currently only supports SISO systems.")
# Figure out the frequency range
omega_sys = np.asarray(omega)
# Determine the contour used to evaluate the Nyquist curve
if sys.isdtime(strict=True):
# Transform frequencies in for discrete-time systems
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
# Issue a warning if we are sampling above Nyquist
if np.any(omega_sys * sys.dt > np.pi) and warn_nyquist:
warnings.warn("evaluation above Nyquist frequency")
# Transform frequencies to continuous domain
contour = np.exp(1j * omega * sys.dt)
else:
contour = 1j * omega_sys
# Bend the contour around any poles on/near the imaginary axis
if isinstance(sys, (StateSpace, TransferFunction)) \
and sys.isctime() and indent_direction != 'none':
poles = sys.pole()
if contour[1].imag > indent_radius \
and 0. in poles and not omega_range_given:
# add some points for quarter circle around poles at origin
contour = np.concatenate(
(1j * np.linspace(0., indent_radius, 50),
contour[1:]))
for i, s in enumerate(contour):
# Find the nearest pole
p = poles[(np.abs(poles - s)).argmin()]
# See if we need to indent around it
if abs(s - p) < indent_radius:
if p.real < 0 or \
(p.real == 0 and indent_direction == 'right'):
# Indent to the right
contour[i] += \
np.sqrt(indent_radius ** 2 - (s-p).imag ** 2)
elif p.real > 0 or \
(p.real == 0 and indent_direction == 'left'):
# Indent to the left
contour[i] -= \
np.sqrt(indent_radius ** 2 - (s-p).imag ** 2)
else:
ValueError("unknown value for indent_direction")
# TODO: add code to indent around discrete poles on unit circle
# Compute the primary curve
resp = sys(contour)
# Compute CW encirclements of -1 by integrating the (unwrapped) angle
phase = -unwrap(np.angle(resp + 1))
count = int(np.round(np.sum(np.diff(phase)) / np.pi, 0))
counts.append(count)
contours.append(contour)
if plot:
# Parse the arrows keyword
if isinstance(arrows, int):
N = arrows
# Space arrows out, starting midway along each "region"
arrow_pos = np.linspace(0.5/N, 1 + 0.5/N, N, endpoint=False)
elif isinstance(arrows, (list, np.ndarray)):
arrow_pos = np.sort(np.atleast_1d(arrows))
elif not arrows:
arrow_pos = []
else:
raise ValueError("unknown or unsupported arrow location")
# Set the arrow style
if arrow_style is None:
arrow_style = mpl.patches.ArrowStyle(
'simple', head_width=arrow_size, head_length=arrow_size)
# Save the components of the response
x, y = resp.real, resp.imag
# Plot the primary curve
p = plt.plot(x, y, '-', color=color, *args, **kwargs)
c = p[0].get_color()
ax = plt.gca()
_add_arrows_to_line2D(
ax, p[0], arrow_pos, arrowstyle=arrow_style, dir=1)
# Plot the mirror image
if mirror_style is not False:
p = plt.plot(x, -y, mirror_style, color=c, *args, **kwargs)
_add_arrows_to_line2D(
ax, p[0], arrow_pos, arrowstyle=arrow_style, dir=-1)
# Mark the -1 point
plt.plot([-1], [0], 'r+')
# Label the frequencies of the points
if label_freq:
ind = slice(None, None, label_freq)
for xpt, ypt, omegapt in zip(x[ind], y[ind], omega_sys[ind]):
# Convert to Hz
f = omegapt / (2 * np.pi)
# Factor out multiples of 1000 and limit the
# result to the range [-8, 8].
pow1000 = max(min(get_pow1000(f), 8), -8)
# Get the SI prefix.
prefix = gen_prefix(pow1000)
# Apply the text. (Use a space before the text to
# prevent overlap with the data.)
#
# np.round() is used because 0.99... appears
# instead of 1.0, and this would otherwise be
# truncated to 0.
plt.text(xpt, ypt, ' ' +
str(int(np.round(f / 1000 ** pow1000, 0))) + ' ' +
prefix + 'Hz')
if plot:
ax = plt.gca()
ax.set_xlabel("Real axis")
ax.set_ylabel("Imaginary axis")
ax.grid(color="lightgray")
# "Squeeze" the results
if len(syslist) == 1:
counts, contours = counts[0], contours[0]
# Return counts and (optionally) the contour we used
return (counts, contours) if return_contour else counts
# Internal function to add arrows to a curve
def _add_arrows_to_line2D(
axes, line, arrow_locs=[0.2, 0.4, 0.6, 0.8],
arrowstyle='-|>', arrowsize=1, dir=1, transform=None):
"""
Add arrows to a matplotlib.lines.Line2D at selected locations.
Parameters:
-----------
axes: Axes object as returned by axes command (or gca)
line: Line2D object as returned by plot command
arrow_locs: list of locations where to insert arrows, % of total length
arrowstyle: style of the arrow
arrowsize: size of the arrow
transform: a matplotlib transform instance, default to data coordinates
Returns:
--------
arrows: list of arrows
Based on https://stackoverflow.com/questions/26911898/
"""
if not isinstance(line, mpl.lines.Line2D):
raise ValueError("expected a matplotlib.lines.Line2D object")
x, y = line.get_xdata(), line.get_ydata()
arrow_kw = {
"arrowstyle": arrowstyle,
}
color = line.get_color()
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
raise NotImplementedError("multicolor lines not supported")
else:
arrow_kw['color'] = color
linewidth = line.get_linewidth()
if isinstance(linewidth, np.ndarray):
raise NotImplementedError("multiwidth lines not supported")
else:
arrow_kw['linewidth'] = linewidth
if transform is None:
transform = axes.transData
# Compute the arc length along the curve
s = np.cumsum(np.sqrt(np.diff(x) ** 2 + np.diff(y) ** 2))
arrows = []
for loc in arrow_locs:
n = np.searchsorted(s, s[-1] * loc)
# Figure out what direction to paint the arrow
if dir == 1:
arrow_tail = (x[n], y[n])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
elif dir == -1:
# Orient the arrow in the other direction on the segment
arrow_tail = (x[n + 1], y[n + 1])
arrow_head = (np.mean(x[n:n + 2]), np.mean(y[n:n + 2]))
else:
raise ValueError("unknown value for keyword 'dir'")
p = mpl.patches.FancyArrowPatch(
arrow_tail, arrow_head, transform=transform, lw=0,
**arrow_kw)
axes.add_patch(p)
arrows.append(p)
return arrows
#
# Gang of Four plot
#
# TODO: think about how (and whether) to handle lists of systems
def gangof4_plot(P, C, omega=None, **kwargs):
"""Plot the "Gang of 4" transfer functions for a system
Generates a 2x2 plot showing the "Gang of 4" sensitivity functions
[T, PS; CS, S]
Parameters
----------
P, C : LTI
Linear input/output systems (process and control)
omega : array
Range of frequencies (list or bounds) in rad/sec
**kwargs : :func:`matplotlib.pyplot.plot` keyword properties, optional
Additional keywords (passed to `matplotlib`)
Returns
-------
None
"""
if not P.issiso() or not C.issiso():
# TODO: Add MIMO go4 plots.
raise ControlMIMONotImplemented(
"Gang of four is currently only implemented for SISO systems.")
# Get the default parameter values
dB = config._get_param(
'freqplot', 'dB', kwargs, _freqplot_defaults, pop=True)
Hz = config._get_param(
'freqplot', 'Hz', kwargs, _freqplot_defaults, pop=True)
grid = config._get_param(
'freqplot', 'grid', kwargs, _freqplot_defaults, pop=True)
# Compute the senstivity functions
L = P * C
S = feedback(1, L)
T = L * S
# Select a default range if none is provided
# TODO: This needs to be made more intelligent
if omega is None:
omega = _default_frequency_range((P, C, S))
# Set up the axes with labels so that multiple calls to
# gangof4_plot will superimpose the data. See details in bode_plot.
plot_axes = {'t': None, 's': None, 'ps': None, 'cs': None}
for ax in plt.gcf().axes:
label = ax.get_label()
if label.startswith('control-gangof4-'):
key = label[len('control-gangof4-'):]
if key not in plot_axes:
raise RuntimeError(
"unknown gangof4 axis type '{}'".format(label))
plot_axes[key] = ax
# if any of the axes are missing, start from scratch
if any((ax is None for ax in plot_axes.values())):
plt.clf()
plot_axes = {'s': plt.subplot(221, label='control-gangof4-s'),
'ps': plt.subplot(222, label='control-gangof4-ps'),
'cs': plt.subplot(223, label='control-gangof4-cs'),
't': plt.subplot(224, label='control-gangof4-t')}
#
# Plot the four sensitivity functions
#
omega_plot = omega / (2. * math.pi) if Hz else omega
# TODO: Need to add in the mag = 1 lines
mag_tmp, phase_tmp, omega = S.frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['s'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['s'].loglog(omega_plot, mag, **kwargs)
plot_axes['s'].set_ylabel("$|S|$" + " (dB)" if dB else "")
plot_axes['s'].tick_params(labelbottom=False)
plot_axes['s'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = (P * S).frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['ps'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['ps'].loglog(omega_plot, mag, **kwargs)
plot_axes['ps'].tick_params(labelbottom=False)
plot_axes['ps'].set_ylabel("$|PS|$" + " (dB)" if dB else "")
plot_axes['ps'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = (C * S).frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['cs'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['cs'].loglog(omega_plot, mag, **kwargs)
plot_axes['cs'].set_xlabel(
"Frequency (Hz)" if Hz else "Frequency (rad/sec)")
plot_axes['cs'].set_ylabel("$|CS|$" + " (dB)" if dB else "")
plot_axes['cs'].grid(grid, which='both')
mag_tmp, phase_tmp, omega = T.frequency_response(omega)
mag = np.squeeze(mag_tmp)
if dB:
plot_axes['t'].semilogx(omega_plot, 20 * np.log10(mag), **kwargs)
else:
plot_axes['t'].loglog(omega_plot, mag, **kwargs)
plot_axes['t'].set_xlabel(
"Frequency (Hz)" if Hz else "Frequency (rad/sec)")
plot_axes['t'].set_ylabel("$|T|$" + " (dB)" if dB else "")
plot_axes['t'].grid(grid, which='both')
plt.tight_layout()
#
# Singular values plot
#
def singular_values_plot(syslist, omega=None,
plot=True, omega_limits=None, omega_num=None,
*args, **kwargs):
"""Singular value plot for a system
Plots a Singular Value plot for the system over a (optional) frequency range.
Parameters
----------
syslist : linsys
List of linear systems (single system is OK).
omega : array_like
List of frequencies in rad/sec to be used for frequency response.
plot : bool
If True (default), generate the singular values plot.
omega_limits : array_like of two values
Limits of the frequency vector to generate.
If Hz=True the limits are in Hz otherwise in rad/s.
omega_num : int
Number of samples to plot.
Default value (1000) set by config.defaults['freqplot.number_of_samples'].
dB : bool
If True, plot result in dB.
Default value (False) set by config.defaults['freqplot.dB'].
Hz : bool
If True, plot frequency in Hz (omega must be provided in rad/sec).
Default value (False) set by config.defaults['freqplot.Hz']
Returns
-------
sigma : ndarray (or list of ndarray if len(syslist) > 1))
singular values
omega : ndarray (or list of ndarray if len(syslist) > 1))
frequency in rad/sec
Other Parameters
----------------
grid : bool
If True, plot grid lines on gain and phase plots. Default is set by
`config.defaults['freqplot.grid']`.
Examples
--------
>>> import numpy as np
>>> den = [75, 1]
>>> sys = TransferFunction([[[87.8], [-86.4]], [[108.2], [-109.6]]], [[den, den], [den, den]])
>>> omega = np.logspace(-4, 1, 1000)
>>> sigma, omega = singular_values_plot(sys, plot=True)
>>> singular_values_plot(sys, 0.0, plot=False)
(array([[197.20868123],
[ 1.39141948]]), array([0.]))
"""
# Make a copy of the kwargs dictionary since we will modify it
kwargs = dict(kwargs)
# Get values for params (and pop from list to allow keyword use in plot)
dB = config._get_param(
'freqplot', 'dB', kwargs, _freqplot_defaults, pop=True)
Hz = config._get_param(
'freqplot', 'Hz', kwargs, _freqplot_defaults, pop=True)
grid = config._get_param(
'freqplot', 'grid', kwargs, _freqplot_defaults, pop=True)
plot = config._get_param(
'freqplot', 'plot', plot, True)
omega_num = config._get_param('freqplot', 'number_of_samples', omega_num)
# If argument was a singleton, turn it into a tuple
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
omega, omega_range_given = _determine_omega_vector(
syslist, omega, omega_limits, omega_num)
omega = np.atleast_1d(omega)
if plot:
fig = plt.gcf()
ax_sigma = None
# Get the current axes if they already exist
for ax in fig.axes:
if ax.get_label() == 'control-sigma':
ax_sigma = ax
# If no axes present, create them from scratch
if ax_sigma is None:
plt.clf()
ax_sigma = plt.subplot(111, label='control-sigma')
# color cycle handled manually as all singular values
# of the same systems are expected to be of the same color
color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
color_offset = 0
if len(ax_sigma.lines) > 0:
last_color = ax_sigma.lines[-1].get_color()
if last_color in color_cycle:
color_offset = color_cycle.index(last_color) + 1
sigmas, omegas, nyquistfrqs = [], [], []
for idx_sys, sys in enumerate(syslist):
omega_sys = np.asarray(omega)
if sys.isdtime(strict=True):
nyquistfrq = math.pi / sys.dt
if not omega_range_given:
# limit up to and including nyquist frequency
omega_sys = np.hstack((
omega_sys[omega_sys < nyquistfrq], nyquistfrq))
omega_complex = np.exp(1j * omega_sys * sys.dt)
else:
nyquistfrq = None
omega_complex = 1j*omega_sys
fresp = sys(omega_complex, squeeze=False)
fresp = fresp.transpose((2, 0, 1))
sigma = np.linalg.svd(fresp, compute_uv=False)
sigmas.append(sigma.transpose()) # return shape is "channel first"
omegas.append(omega_sys)
nyquistfrqs.append(nyquistfrq)
if plot:
color = color_cycle[(idx_sys + color_offset) % len(color_cycle)]
color = kwargs.pop('color', color)
nyquistfrq_plot = None
if Hz:
omega_plot = omega_sys / (2. * math.pi)
if nyquistfrq:
nyquistfrq_plot = nyquistfrq / (2. * math.pi)
else:
omega_plot = omega_sys
if nyquistfrq:
nyquistfrq_plot = nyquistfrq
sigma_plot = sigma
if dB:
ax_sigma.semilogx(omega_plot, 20 * np.log10(sigma_plot),
color=color, *args, **kwargs)
else:
ax_sigma.loglog(omega_plot, sigma_plot,
color=color, *args, **kwargs)
if nyquistfrq_plot is not None:
ax_sigma.axvline(x=nyquistfrq_plot, color=color)
# Add a grid to the plot + labeling
if plot:
ax_sigma.grid(grid, which='both')
ax_sigma.set_ylabel("Singular Values (dB)" if dB else "Singular Values")
ax_sigma.set_xlabel("Frequency (Hz)" if Hz else "Frequency (rad/sec)")
if len(syslist) == 1:
return sigmas[0], omegas[0]
else:
return sigmas, omegas
#
# Utility functions
#
# This section of the code contains some utility functions for
# generating frequency domain plots
#
# Determine the frequency range to be used
def _determine_omega_vector(syslist, omega_in, omega_limits, omega_num):
"""Determine the frequency range for a frequency-domain plot
according to a standard logic.
If omega_in and omega_limits are both None, then omega_out is computed
on omega_num points according to a default logic defined by
_default_frequency_range and tailored for the list of systems syslist, and
omega_range_given is set to False.
If omega_in is None but omega_limits is an array-like of 2 elements, then
omega_out is computed with the function np.logspace on omega_num points
within the interval [min, max] = [omega_limits[0], omega_limits[1]], and
omega_range_given is set to True.
If omega_in is not None, then omega_out is set to omega_in,
and omega_range_given is set to True
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
omega_in : 1D array_like or None
Frequency range specified by the user
omega_limits : 1D array_like or None
Frequency limits specified by the user
omega_num : int
Number of points to be used for the frequency
range (if the frequency range is not user-specified)
Returns
-------
omega_out : 1D array
Frequency range to be used
omega_range_given : bool
True if the frequency range was specified by the user, either through
omega_in or through omega_limits. False if both omega_in
and omega_limits are None.
"""
omega_range_given = True
if omega_in is None:
if omega_limits is None:
omega_range_given = False
# Select a default range if none is provided
omega_out = _default_frequency_range(syslist,
number_of_samples=omega_num)
else:
omega_limits = np.asarray(omega_limits)
if len(omega_limits) != 2:
raise ValueError("len(omega_limits) must be 2")
omega_out = np.logspace(np.log10(omega_limits[0]),
np.log10(omega_limits[1]),
num=omega_num, endpoint=True)
else:
omega_out = np.copy(omega_in)
return omega_out, omega_range_given
# Compute reasonable defaults for axes
def _default_frequency_range(syslist, Hz=None, number_of_samples=None,
feature_periphery_decades=None):
"""Compute a default frequency range for frequency domain plots.
This code looks at the poles and zeros of all of the systems that
we are plotting and sets the frequency range to be one decade above
and below the min and max feature frequencies, rounded to the nearest
integer. If no features are found, it returns logspace(-1, 1)
Parameters
----------
syslist : list of LTI
List of linear input/output systems (single system is OK)
Hz : bool
If True, the limits (first and last value) of the frequencies
are set to full decades in Hz so it fits plotting with logarithmic
scale in Hz otherwise in rad/s. Omega is always returned in rad/sec.
number_of_samples : int, optional
Number of samples to generate. The default value is read from
``config.defaults['freqplot.number_of_samples']. If None, then the
default from `numpy.logspace` is used.
feature_periphery_decades : float, optional
Defines how many decades shall be included in the frequency range on
both sides of features (poles, zeros). The default value is read from
``config.defaults['freqplot.feature_periphery_decades']``.
Returns
-------
omega : array
Range of frequencies in rad/sec
Examples
--------
>>> from matlab import ss
>>> sys = ss("1. -2; 3. -4", "5.; 7", "6. 8", "9.")
>>> omega = _default_frequency_range(sys)
"""
# Set default values for options
number_of_samples = config._get_param(
'freqplot', 'number_of_samples', number_of_samples)
feature_periphery_decades = config._get_param(
'freqplot', 'feature_periphery_decades', feature_periphery_decades, 1)
# Find the list of all poles and zeros in the systems
features = np.array(())
freq_interesting = []
# detect if single sys passed by checking if it is sequence-like
if not hasattr(syslist, '__iter__'):
syslist = (syslist,)
for sys in syslist:
try:
# Add new features to the list
if sys.isctime():
features_ = np.concatenate((np.abs(sys.pole()),
np.abs(sys.zero())))
# Get rid of poles and zeros at the origin
toreplace = features_ == 0.0
if np.any(toreplace):
features_ = features_[~toreplace]
elif sys.isdtime(strict=True):
fn = math.pi * 1. / sys.dt
# TODO: What distance to the Nyquist frequency is appropriate?
freq_interesting.append(fn * 0.9)
features_ = np.concatenate((sys.pole(),
sys.zero()))
# Get rid of poles and zeros on the real axis (imag==0)
# * origin and real < 0
# * at 1.: would result in omega=0. (logaritmic plot!)
toreplace = (features_.imag == 0.0) & (
(features_.real <= 0.) |
(np.abs(features_.real - 1.0) < 1.e-10))
if np.any(toreplace):
features_ = features_[~toreplace]
# TODO: improve
features_ = np.abs(np.log(features_) / (1.j * sys.dt))
else:
# TODO
raise NotImplementedError(
"type of system in not implemented now")
features = np.concatenate((features, features_))
except NotImplementedError:
pass
# Make sure there is at least one point in the range
if features.shape[0] == 0:
features = np.array([1.])
if Hz:
features /= 2. * math.pi
features = np.log10(features)
lsp_min = np.floor(np.min(features) - feature_periphery_decades)
lsp_max = np.ceil(np.max(features) + feature_periphery_decades)
lsp_min += np.log10(2. * math.pi)
lsp_max += np.log10(2. * math.pi)
else:
features = np.log10(features)
lsp_min = np.floor(np.min(features) - feature_periphery_decades)
lsp_max = np.ceil(np.max(features) + feature_periphery_decades)
if freq_interesting:
lsp_min = min(lsp_min, np.log10(min(freq_interesting)))
lsp_max = max(lsp_max, np.log10(max(freq_interesting)))
# TODO: Add a check in discrete case to make sure we don't get aliasing
# (Attention: there is a list of system but only one omega vector)
# Set the range to be an order of magnitude beyond any features
if number_of_samples:
omega = np.logspace(
lsp_min, lsp_max, num=number_of_samples, endpoint=True)
else:
omega = np.logspace(lsp_min, lsp_max, endpoint=True)
return omega
#
# Utility functions to create nice looking labels (KLD 5/23/11)
#
def get_pow1000(num):
"""Determine exponent for which significand of a number is within the
range [1, 1000).
"""
# Based on algorithm from http://www.mail-archive.com/
# [email protected]/msg14433.html, accessed 2010/11/7
# by Jason Heeris 2009/11/18
from decimal import Decimal
from math import floor
dnum = Decimal(str(num))
if dnum == 0:
return 0
elif dnum < 0:
dnum = -dnum
return int(floor(dnum.log10() / 3))
def gen_prefix(pow1000):
"""Return the SI prefix for a power of 1000.
"""
# Prefixes according to Table 5 of [BIPM 2006] (excluding hecto,
# deca, deci, and centi).
if pow1000 < -8 or pow1000 > 8:
raise ValueError(
"Value is out of the range covered by the SI prefixes.")
return ['Y', # yotta (10^24)
'Z', # zetta (10^21)
'E', # exa (10^18)
'P', # peta (10^15)
'T', # tera (10^12)
'G', # giga (10^9)
'M', # mega (10^6)
'k', # kilo (10^3)
'', # (10^0)
'm', # milli (10^-3)
r'$\mu$', # micro (10^-6)
'n', # nano (10^-9)
'p', # pico (10^-12)
'f', # femto (10^-15)
'a', # atto (10^-18)
'z', # zepto (10^-21)
'y'][8 - pow1000] # yocto (10^-24)
def find_nearest_omega(omega_list, omega):
omega_list = np.asarray(omega_list)
return omega_list[(np.abs(omega_list - omega)).argmin()]
# Function aliases
bode = bode_plot
nyquist = nyquist_plot
gangof4 = gangof4_plot
| bsd-3-clause | 5,160,184,504,070,688,000 | 38.701607 | 98 | 0.555929 | false |
daniel-noland/MemoryOracle | memoryoracle/memoryoracle/tracked.py | 1 | 2431 | #!/usr/bin/env python
# -*- encoding UTF-8 -*-
"""
File containing the abstract Tracked class.
"""
# import gdb
import pymongo
import mongoengine
import execution
# NOTE: The read_preference should not be needed. This is a workaround for a
# bug in pymongo. (http://goo.gl/Somoeu)
mongoengine.connect('memoryoracle',
read_preference=\
pymongo.read_preferences.ReadPreference.PRIMARY)
class Tracked(mongoengine.Document):
"""
*Abstract* class to represent a piece of information from the debugee
to track.
"""
meta = {'allow_inheritance': True}
# execution = mongoengine.ReferenceField(execution.Execution)
# def _init(self, description):
# self._description = description
# def __init__(self, *args, **kwargs):
# raise NotImplementedError(
# "Attempted to instantiate abstract class Tracked")
# @property
# def description(self):
# return self._description
# @property
# def name(self):
# return self._name
def track(self):
raise NotImplementedError(
"Attempted to track abstract class")
@property
def description(self):
return self._description
class Owner(Tracked):
"""
*Abstract* class representing an object which owns another object.
The Owner may both be owned, and contin objects which own other objects
"""
class Reference(Tracked):
"""
*Abstract* class representing an object which is a reference to another
object.
"""
target = mongoengine.ReferenceField(Tracked)
class ProgramFile(Tracked):
"""
*Abstract* class to track a file belonging to the debugee
"""
pass
class ObjectFile(ProgramFile):
"""
*Concrete* class to track a compiled object file in the debugee
"""
source_file = mongoengine.ReferenceField("SourceFile")
pass
class SourceFile(ProgramFile):
"""
*Abstract* class to track a source code file belonging to the debugee.
"""
object_file = mongoengine.ReferenceField(ObjectFile)
pass
class UntrackedDecorator(Tracked):
"""
*Decorator* anti-class to essentially turn off the behavior of the parent.
Use this class when an object would normally be tracked, but you do not
wish it to be.
"""
def __init__(self, *args, **kwargs):
pass
def track(self):
pass
| lgpl-3.0 | 6,284,965,725,703,786,000 | 21.509259 | 78 | 0.646236 | false |
ujvl/ray-ng | python/ray/experimental/streaming/communication.py | 2 | 15791 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import logging
import sys
from ray.experimental.streaming.operator import PStrategy
from ray.experimental.streaming.batched_queue import BatchedQueue
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# Forward and broadcast stream partitioning strategies
forward_broadcast_strategies = [PStrategy.Forward, PStrategy.Broadcast]
# Used to choose output channel in case of hash-based shuffling
def _hash(value):
if isinstance(value, int):
return value
try:
return int(hashlib.sha1(value.encode("utf-8")).hexdigest(), 16)
except AttributeError:
return int(hashlib.sha1(value).hexdigest(), 16)
# A data channel is a batched queue between two
# operator instances in a streaming environment
class DataChannel(object):
"""A data channel for actor-to-actor communication.
Attributes:
env (Environment): The environment the channel belongs to.
src_operator_id (UUID): The id of the source operator of the channel.
dst_operator_id (UUID): The id of the destination operator of the
channel.
src_instance_id (int): The id of the source instance.
dst_instance_id (int): The id of the destination instance.
queue (BatchedQueue): The batched queue used for data movement.
"""
def __init__(self, env, src_operator_id, dst_operator_id, src_instance_id,
dst_instance_id):
self.env = env
self.src_operator_id = src_operator_id
self.dst_operator_id = dst_operator_id
self.src_instance_id = src_instance_id
self.dst_instance_id = dst_instance_id
self.queue = BatchedQueue(
max_size=self.env.config.queue_config.max_size,
max_batch_size=self.env.config.queue_config.max_batch_size,
max_batch_time=self.env.config.queue_config.max_batch_time,
prefetch_depth=self.env.config.queue_config.prefetch_depth,
background_flush=self.env.config.queue_config.background_flush)
def __repr__(self):
return "({},{},{},{})".format(
self.src_operator_id, self.dst_operator_id, self.src_instance_id,
self.dst_instance_id)
# Pulls and merges data from multiple input channels
class DataInput(object):
"""An input gate of an operator instance.
The input gate pulls records from all input channels in a round-robin
fashion.
Attributes:
input_channels (list): The list of input channels.
channel_index (int): The index of the next channel to pull from.
max_index (int): The number of input channels.
closed (list): A list of flags indicating whether an input channel
has been marked as 'closed'.
all_closed (bool): Denotes whether all input channels have been
closed (True) or not (False).
"""
def __init__(self, channels):
self.input_channels = channels
self.channel_index = 0
self.max_index = len(channels)
self.closed = [False] * len(
self.input_channels) # Tracks the channels that have been closed
self.all_closed = False
# Fetches records from input channels in a round-robin fashion
# TODO (john): Make sure the instance is not blocked on any of its input
# channels
# TODO (john): In case of input skew, it might be better to pull from
# the largest queue more often
def _pull(self):
while True:
if self.max_index == 0:
# TODO (john): We should detect this earlier
return None
# Channel to pull from
channel = self.input_channels[self.channel_index]
self.channel_index += 1
if self.channel_index == self.max_index: # Reset channel index
self.channel_index = 0
if self.closed[self.channel_index - 1]:
continue # Channel has been 'closed', check next
record = channel.queue.read_next()
logger.debug("Actor ({},{}) pulled '{}'.".format(
channel.src_operator_id, channel.src_instance_id, record))
if record is None:
# Mark channel as 'closed' and pull from the next open one
self.closed[self.channel_index - 1] = True
self.all_closed = True
for flag in self.closed:
if flag is False:
self.all_closed = False
break
if not self.all_closed:
continue
# Returns 'None' iff all input channels are 'closed'
return record
# Selects output channel(s) and pushes data
class DataOutput(object):
"""An output gate of an operator instance.
The output gate pushes records to output channels according to the
user-defined partitioning scheme.
Attributes:
partitioning_schemes (dict): A mapping from destination operator ids
to partitioning schemes (see: PScheme in operator.py).
forward_channels (list): A list of channels to forward records.
shuffle_channels (list(list)): A list of output channels to shuffle
records grouped by destination operator.
shuffle_key_channels (list(list)): A list of output channels to
shuffle records by a key grouped by destination operator.
shuffle_exists (bool): A flag indicating that there exists at least
one shuffle_channel.
shuffle_key_exists (bool): A flag indicating that there exists at
least one shuffle_key_channel.
"""
def __init__(self, channels, partitioning_schemes):
self.key_selector = None
self.round_robin_indexes = [0]
self.partitioning_schemes = partitioning_schemes
# Prepare output -- collect channels by type
self.forward_channels = [] # Forward and broadcast channels
slots = sum(1 for scheme in self.partitioning_schemes.values()
if scheme.strategy == PStrategy.RoundRobin)
self.round_robin_channels = [[]] * slots # RoundRobin channels
self.round_robin_indexes = [-1] * slots
slots = sum(1 for scheme in self.partitioning_schemes.values()
if scheme.strategy == PStrategy.Shuffle)
# Flag used to avoid hashing when there is no shuffling
self.shuffle_exists = slots > 0
self.shuffle_channels = [[]] * slots # Shuffle channels
slots = sum(1 for scheme in self.partitioning_schemes.values()
if scheme.strategy == PStrategy.ShuffleByKey)
# Flag used to avoid hashing when there is no shuffling by key
self.shuffle_key_exists = slots > 0
self.shuffle_key_channels = [[]] * slots # Shuffle by key channels
# Distinct shuffle destinations
shuffle_destinations = {}
# Distinct shuffle by key destinations
shuffle_by_key_destinations = {}
# Distinct round robin destinations
round_robin_destinations = {}
index_1 = 0
index_2 = 0
index_3 = 0
for channel in channels:
p_scheme = self.partitioning_schemes[channel.dst_operator_id]
strategy = p_scheme.strategy
if strategy in forward_broadcast_strategies:
self.forward_channels.append(channel)
elif strategy == PStrategy.Shuffle:
pos = shuffle_destinations.setdefault(channel.dst_operator_id,
index_1)
self.shuffle_channels[pos].append(channel)
if pos == index_1:
index_1 += 1
elif strategy == PStrategy.ShuffleByKey:
pos = shuffle_by_key_destinations.setdefault(
channel.dst_operator_id, index_2)
self.shuffle_key_channels[pos].append(channel)
if pos == index_2:
index_2 += 1
elif strategy == PStrategy.RoundRobin:
pos = round_robin_destinations.setdefault(
channel.dst_operator_id, index_3)
self.round_robin_channels[pos].append(channel)
if pos == index_3:
index_3 += 1
else: # TODO (john): Add support for other strategies
sys.exit("Unrecognized or unsupported partitioning strategy.")
# A KeyedDataStream can only be shuffled by key
assert not (self.shuffle_exists and self.shuffle_key_exists)
# Flushes any remaining records in the output channels
# 'close' indicates whether we should also 'close' the channel (True)
# by propagating 'None'
# or just flush the remaining records to plasma (False)
def _flush(self, close=False):
"""Flushes remaining output records in the output queues to plasma.
None is used as special type of record that is propagated from sources
to sink to notify that the end of data in a stream.
Attributes:
close (bool): A flag denoting whether the channel should be
also marked as 'closed' (True) or not (False) after flushing.
"""
for channel in self.forward_channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.shuffle_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.shuffle_key_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
for channels in self.round_robin_channels:
for channel in channels:
if close is True:
channel.queue.put_next(None)
channel.queue._flush_writes()
# TODO (john): Add more channel types
# Returns all destination actor ids
def _destination_actor_ids(self):
destinations = []
for channel in self.forward_channels:
destinations.append((channel.dst_operator_id,
channel.dst_instance_id))
for channels in self.shuffle_channels:
for channel in channels:
destinations.append((channel.dst_operator_id,
channel.dst_instance_id))
for channels in self.shuffle_key_channels:
for channel in channels:
destinations.append((channel.dst_operator_id,
channel.dst_instance_id))
for channels in self.round_robin_channels:
for channel in channels:
destinations.append((channel.dst_operator_id,
channel.dst_instance_id))
# TODO (john): Add more channel types
return destinations
# Pushes the record to the output
# Each individual output queue flushes batches to plasma periodically
# based on 'batch_max_size' and 'batch_max_time'
def _push(self, record):
# Forward record
for channel in self.forward_channels:
logger.debug("[writer] Push record '{}' to channel {}".format(
record, channel))
channel.queue.put_next(record)
# Forward record
index = 0
for channels in self.round_robin_channels:
self.round_robin_indexes[index] += 1
if self.round_robin_indexes[index] == len(channels):
self.round_robin_indexes[index] = 0 # Reset index
channel = channels[self.round_robin_indexes[index]]
logger.debug("[writer] Push record '{}' to channel {}".format(
record, channel))
channel.queue.put_next(record)
index += 1
# Hash-based shuffling by key
if self.shuffle_key_exists:
key, _ = record
h = _hash(key)
for channels in self.shuffle_key_channels:
num_instances = len(channels) # Downstream instances
channel = channels[h % num_instances]
logger.debug(
"[key_shuffle] Push record '{}' to channel {}".format(
record, channel))
channel.queue.put_next(record)
elif self.shuffle_exists: # Hash-based shuffling per destination
h = _hash(record)
for channels in self.shuffle_channels:
num_instances = len(channels) # Downstream instances
channel = channels[h % num_instances]
logger.debug("[shuffle] Push record '{}' to channel {}".format(
record, channel))
channel.queue.put_next(record)
else: # TODO (john): Handle rescaling
pass
# Pushes a list of records to the output
# Each individual output queue flushes batches to plasma periodically
# based on 'batch_max_size' and 'batch_max_time'
def _push_all(self, records):
# Forward records
for record in records:
for channel in self.forward_channels:
logger.debug("[writer] Push record '{}' to channel {}".format(
record, channel))
channel.queue.put_next(record)
# Hash-based shuffling by key per destination
if self.shuffle_key_exists:
for record in records:
key, _ = record
h = _hash(key)
for channels in self.shuffle_channels:
num_instances = len(channels) # Downstream instances
channel = channels[h % num_instances]
logger.debug(
"[key_shuffle] Push record '{}' to channel {}".format(
record, channel))
channel.queue.put_next(record)
elif self.shuffle_exists: # Hash-based shuffling per destination
for record in records:
h = _hash(record)
for channels in self.shuffle_channels:
num_instances = len(channels) # Downstream instances
channel = channels[h % num_instances]
logger.debug(
"[shuffle] Push record '{}' to channel {}".format(
record, channel))
channel.queue.put_next(record)
else: # TODO (john): Handle rescaling
pass
# Batched queue configuration
class QueueConfig(object):
"""The configuration of a batched queue.
Attributes:
max_size (int): The maximum size of the queue in number of batches
(if exceeded, backpressure kicks in).
max_batch_size (int): The size of each batch in number of records.
max_batch_time (float): The flush timeout per batch.
prefetch_depth (int): The number of batches to prefetch from plasma.
background_flush (bool): Denotes whether a daemon flush thread should
be used (True) to flush batches to plasma.
"""
def __init__(self,
max_size=999999,
max_batch_size=99999,
max_batch_time=0.01,
prefetch_depth=10,
background_flush=False):
self.max_size = max_size
self.max_batch_size = max_batch_size
self.max_batch_time = max_batch_time
self.prefetch_depth = prefetch_depth
self.background_flush = background_flush
| apache-2.0 | -8,586,290,594,554,785,000 | 42.986072 | 79 | 0.592869 | false |
bitsabhi/vyked | examples/account_service.py | 5 | 1657 | from vyked import Host, TCPService, TCPServiceClient, api, publish, request, subscribe
import asyncio
REGISTRY_HOST = '127.0.0.1'
REGISTRY_PORT = 4500
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
ACCOUNTS_HOST = '127.0.0.1'
ACCOUNTS_PORT = 4503
class AccountService(TCPService):
def __init__(self, host, port):
super(AccountService, self).__init__("AccountService", 1, host, port)
@api
def authenticate(self, user_name, password):
return user_name
@publish
def logged_out(self, user_name):
return locals()
class AccountClient(TCPServiceClient):
@request
def authenticate(self, user_name, password):
return locals()
@subscribe
def logged_out(self, user_name):
pass
class IdentityClient(TCPServiceClient):
def __init__(self):
super(IdentityClient, self).__init__("IdentityService", 1)
@request
def create(self, user_name, password):
return locals()
@subscribe
def password_changed(self, user_name):
print("Password changed event received")
yield from asyncio.sleep(4)
print("Password changed {}".format(user_name))
def repeat_request(self):
yield from asyncio.sleep(5)
yield from self.create('test', 'test@123')
yield from self.repeat_request()
if __name__ == '__main__':
tcp = AccountService(ACCOUNTS_HOST, ACCOUNTS_PORT)
tcp.clients = [IdentityClient()]
Host.registry_host = REGISTRY_HOST
Host.registry_port = REGISTRY_PORT
Host.pubsub_host = REDIS_HOST
Host.pubsub_port = REDIS_PORT
Host.name = 'Identity'
Host.attach_service(tcp)
Host.run()
| mit | 6,056,624,559,673,627,000 | 24.492308 | 86 | 0.652384 | false |
divio/django | django/core/mail/message.py | 28 | 16149 | from __future__ import unicode_literals
import mimetypes
import os
import random
import time
from email import (
charset as Charset, encoders as Encoders, generator, message_from_string,
)
from email.header import Header
from email.message import Message
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr, formatdate, getaddresses, parseaddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils import six
from django.utils.encoding import force_text
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python 3.2+ standard library, with the following modifications:
# * Used cached hostname for performance.
# TODO: replace with email.utils.make_msgid(.., domain=DNS_NAME) when dropping
# Python 2 (Python 2's version doesn't have domain parameter) (#23905).
def make_msgid(idstring=None, domain=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<[email protected]>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id. Optional domain if given provides the
portion of the message id after the '@'. It defaults to the locally
defined hostname.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
pid = os.getpid()
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
if domain is None:
# stdlib uses socket.getfqdn() here instead
domain = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return str(name), val
def sanitize_address(addr, encoding):
if not isinstance(addr, tuple):
addr = parseaddr(force_text(addr))
nm, addr = addr
nm = Header(nm, encoding).encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN
if '@' in addr:
localpart, domain = addr.split('@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna').decode('ascii')
addr = '@'.join([localpart, domain])
else:
addr = Header(addr, encoding).encode()
return formataddr((nm, addr))
class MIMEMixin():
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.StringIO()
g = generator.Generator(fp, mangle_from_=False)
if six.PY2:
g.flatten(self, unixfrom=unixfrom)
else:
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
if six.PY2:
as_bytes = as_string
else:
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
if _charset == 'utf-8':
# Unfortunately, Python < 3.5 doesn't support setting a Charset instance
# as MIMEText init parameter (http://bugs.python.org/issue16324).
# We do it manually and trigger re-encoding of the payload.
MIMEText.__init__(self, _text, _subtype, None)
del self['Content-Transfer-Encoding']
self.set_payload(_text, utf8_charset)
self.replace_header('Content-Type', 'text/%s; charset="%s"' % (_subtype, _charset))
elif _charset is None:
# the default value of '_charset' is 'us-ascii' on Python 2
MIMEText.__init__(self, _text, _subtype)
else:
MIMEText.__init__(self, _text, _subtype, _charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
if isinstance(to, six.string_types):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, six.string_types):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, six.string_types):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, six.string_types):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))
if self.cc:
msg['Cc'] = ', '.join(map(force_text, self.cc))
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(map(force_text, self.reply_to)))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return self.to + self.cc + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
with open(path, 'rb') as f:
content = f.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
if six.PY2:
filename = filename.encode('utf-8')
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| bsd-3-clause | 4,601,302,792,816,258,600 | 36.731308 | 107 | 0.613846 | false |
qsnake/numpy | numpy/f2py/auxfuncs.py | 8 | 19521 | #!/usr/bin/env python
"""
Auxiliary functions for f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) LICENSE.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/24 19:01:55 $
Pearu Peterson
"""
__version__ = "$Revision: 1.65 $"[10:-1]
import __version__
f2py_version = __version__.version
import pprint
import sys
import types
import cfuncs
errmess=sys.stderr.write
#outmess=sys.stdout.write
show=pprint.pprint
options={}
debugoptions=[]
wrapfuncs = 1
if sys.version_info[0] >= 3:
from functools import reduce
def outmess(t):
if options.get('verbose',1):
sys.stdout.write(t)
def debugcapi(var):
return 'capi' in debugoptions
def _isstring(var):
return 'typespec' in var and var['typespec']=='character' and (not isexternal(var))
def isstring(var):
return _isstring(var) and not isarray(var)
def ischaracter(var):
return isstring(var) and 'charselector' not in var
def isstringarray(var):
return isarray(var) and _isstring(var)
def isarrayofstrings(var):
# leaving out '*' for now so that
# `character*(*) a(m)` and `character a(m,*)`
# are treated differently. Luckily `character**` is illegal.
return isstringarray(var) and var['dimension'][-1]=='(*)'
def isarray(var):
return 'dimension' in var and (not isexternal(var))
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def iscomplex(var):
return isscalar(var) and var.get('typespec') in ['complex','double complex']
def islogical(var):
return isscalar(var) and var.get('typespec')=='logical'
def isinteger(var):
return isscalar(var) and var.get('typespec')=='integer'
def isreal(var):
return isscalar(var) and var.get('typespec')=='real'
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def islong_long(var):
if not isscalar(var):
return 0
if var.get('typespec') not in ['integer','logical']:
return 0
return get_kind(var)=='8'
def isunsigned_char(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-1'
def isunsigned_short(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-2'
def isunsigned(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-4'
def isunsigned_long_long(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-8'
def isdouble(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='8'
def islong_double(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='16'
def islong_complex(var):
if not iscomplex(var):
return 0
return get_kind(var)=='32'
def iscomplexarray(var):
return isarray(var) and var.get('typespec') in ['complex','double complex']
def isint1array(var):
return isarray(var) and var.get('typespec')=='integer' \
and get_kind(var)=='1'
def isunsigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-1'
def isunsigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-2'
def isunsignedarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-4'
def isunsigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-8'
def issigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='1'
def issigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='2'
def issigned_array(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='4'
def issigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='8'
def isallocatable(var):
return 'attrspec' in var and 'allocatable' in var['attrspec']
def ismutable(var):
return not (not 'dimension' in var or isstring(var))
def ismoduleroutine(rout):
return 'modulename' in rout
def ismodule(rout):
return ('block' in rout and 'module'==rout['block'])
def isfunction(rout):
return ('block' in rout and 'function'==rout['block'])
#def isfunction_wrap(rout):
# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout))
def isfunction_wrap(rout):
if isintent_c(rout):
return 0
return wrapfuncs and isfunction(rout) and (not isexternal(rout))
def issubroutine(rout):
return ('block' in rout and 'subroutine'==rout['block'])
def isroutine(rout):
return isfunction(rout) or issubroutine(rout)
def islogicalfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islogical(rout['vars'][a])
return 0
def islong_longfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_long(rout['vars'][a])
return 0
def islong_doublefunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_double(rout['vars'][a])
return 0
def iscomplexfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return iscomplex(rout['vars'][a])
return 0
def iscomplexfunction_warn(rout):
if iscomplexfunction(rout):
outmess("""\
**************************************************************
Warning: code with a function returning complex value
may not work correctly with your Fortran compiler.
Run the following test before using it in your applications:
$(f2py install dir)/test-site/{b/runme_scalar,e/runme}
When using GNU gcc/g77 compilers, codes should work correctly.
**************************************************************\n""")
return 1
return 0
def isstringfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return isstring(rout['vars'][a])
return 0
def hasexternals(rout):
return 'externals' in rout and rout['externals']
def isthreadsafe(rout):
return 'f2pyenhancements' in rout and 'threadsafe' in rout['f2pyenhancements']
def hasvariables(rout):
return 'vars' in rout and rout['vars']
def isoptional(var):
return ('attrspec' in var and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var)
def isexternal(var):
return ('attrspec' in var and 'external' in var['attrspec'])
def isrequired(var):
return not isoptional(var) and isintent_nothide(var)
def isintent_in(var):
if 'intent' not in var:
return 1
if 'hide' in var['intent']:
return 0
if 'inplace' in var['intent']:
return 0
if 'in' in var['intent']:
return 1
if 'out' in var['intent']:
return 0
if 'inout' in var['intent']:
return 0
if 'outin' in var['intent']:
return 0
return 1
def isintent_inout(var):
return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent']
def isintent_out(var):
return 'out' in var.get('intent',[])
def isintent_hide(var):
return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout,isintent_inplace)(var)))))
def isintent_nothide(var):
return not isintent_hide(var)
def isintent_c(var):
return 'c' in var.get('intent',[])
# def isintent_f(var):
# return not isintent_c(var)
def isintent_cache(var):
return 'cache' in var.get('intent',[])
def isintent_copy(var):
return 'copy' in var.get('intent',[])
def isintent_overwrite(var):
return 'overwrite' in var.get('intent',[])
def isintent_callback(var):
return 'callback' in var.get('intent',[])
def isintent_inplace(var):
return 'inplace' in var.get('intent',[])
def isintent_aux(var):
return 'aux' in var.get('intent',[])
def isintent_aligned4(var):
return 'aligned4' in var.get('intent',[])
def isintent_aligned8(var):
return 'aligned8' in var.get('intent',[])
def isintent_aligned16(var):
return 'aligned16' in var.get('intent',[])
isintent_dict = {isintent_in:'INTENT_IN',isintent_inout:'INTENT_INOUT',
isintent_out:'INTENT_OUT',isintent_hide:'INTENT_HIDE',
isintent_cache:'INTENT_CACHE',
isintent_c:'INTENT_C',isoptional:'OPTIONAL',
isintent_inplace:'INTENT_INPLACE',
isintent_aligned4:'INTENT_ALIGNED4',
isintent_aligned8:'INTENT_ALIGNED8',
isintent_aligned16:'INTENT_ALIGNED16',
}
def isprivate(var):
return 'attrspec' in var and 'private' in var['attrspec']
def hasinitvalue(var):
return '=' in var
def hasinitvalueasstring(var):
if not hasinitvalue(var):
return 0
return var['='][0] in ['"',"'"]
def hasnote(var):
return 'note' in var
def hasresultnote(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return hasnote(rout['vars'][a])
return 0
def hascommon(rout):
return 'common' in rout
def containscommon(rout):
if hascommon(rout):
return 1
if hasbody(rout):
for b in rout['body']:
if containscommon(b):
return 1
return 0
def containsmodule(block):
if ismodule(block):
return 1
if not hasbody(block):
return 0
for b in block['body']:
if containsmodule(b):
return 1
return 0
def hasbody(rout):
return 'body' in rout
def hascallstatement(rout):
return getcallstatement(rout) is not None
def istrue(var):
return 1
def isfalse(var):
return 0
class F2PYError(Exception):
pass
class throw_error:
def __init__(self,mess):
self.mess = mess
def __call__(self,var):
mess = '\n\n var = %s\n Message: %s\n' % (var,self.mess)
raise F2PYError,mess
def l_and(*f):
l,l2='lambda v',[]
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l,i,i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l,' and '.join(l2)))
def l_or(*f):
l,l2='lambda v',[]
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l,i,i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l,' or '.join(l2)))
def l_not(f):
return eval('lambda v,f=f:not f(v)')
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname']==''
except KeyError:
return 0
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if name=='':
raise KeyError
if not name:
errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements']))
raise KeyError
except KeyError:
name = rout['name']
return name
def getmultilineblock(rout,blockname,comment=1,counter=0):
try:
r = rout['f2pyenhancements'].get(blockname)
except KeyError:
return
if not r: return
if counter>0 and type(r) is type(''):
return
if type(r) is type([]):
if counter>=len(r): return
r = r[counter]
if r[:3]=="'''":
if comment:
r = '\t/* start ' + blockname + ' multiline ('+`counter`+') */\n' + r[3:]
else:
r = r[3:]
if r[-3:]=="'''":
if comment:
r = r[:-3] + '\n\t/* end multiline ('+`counter`+')*/'
else:
r = r[:-3]
else:
errmess("%s multiline block should end with `'''`: %s\n" \
% (blockname,repr(r)))
return r
def getcallstatement(rout):
return getmultilineblock(rout,'callstatement')
def getcallprotoargument(rout,cb_map={}):
r = getmultilineblock(rout,'callprotoargument',comment=0)
if r: return r
if hascallstatement(rout):
outmess('warning: callstatement is defined without callprotoargument\n')
return
from capi_maps import getctype
arg_types,arg_types2 = [],[]
if l_and(isstringfunction,l_not(isfunction_wrap))(rout):
arg_types.extend(['char*','size_t'])
for n in rout['args']:
var = rout['vars'][n]
if isintent_callback(var):
continue
if n in cb_map:
ctype = cb_map[n]+'_typedef'
else:
ctype = getctype(var)
if l_and(isintent_c,l_or(isscalar,iscomplex))(var):
pass
elif isstring(var):
pass
#ctype = 'void*'
else:
ctype = ctype+'*'
if isstring(var) or isarrayofstrings(var):
arg_types2.append('size_t')
arg_types.append(ctype)
proto_args = ','.join(arg_types+arg_types2)
if not proto_args:
proto_args = 'void'
#print proto_args
return proto_args
def getusercode(rout):
return getmultilineblock(rout,'usercode')
def getusercode1(rout):
return getmultilineblock(rout,'usercode',counter=1)
def getpymethoddef(rout):
return getmultilineblock(rout,'pymethoddef')
def getargs(rout):
sortargs,args=[],[]
if 'args' in rout:
args=rout['args']
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=rout['args']
return args,sortargs
def getargs2(rout):
sortargs,args=[],rout.get('args',[])
auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\
and a not in args]
args = auxvars + args
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=auxvars + rout['args']
return args,sortargs
def getrestdoc(rout):
if 'f2pymultilines' not in rout:
return None
k = None
if rout['block']=='python module':
k = rout['block'],rout['name']
return rout['f2pymultilines'].get(k,None)
def gentitle(name):
l=(80-len(name)-6)//2
return '/*%s %s %s*/'%(l*'*',name,l*'*')
def flatlist(l):
if type(l)==types.ListType:
return reduce(lambda x,y,f=flatlist:x+f(y),l,[])
return [l]
def stripcomma(s):
if s and s[-1]==',': return s[:-1]
return s
def replace(str,d,defaultsep=''):
if type(d)==types.ListType:
return map(lambda d,f=replace,sep=defaultsep,s=str:f(s,d,sep),d)
if type(str)==types.ListType:
return map(lambda s,f=replace,sep=defaultsep,d=d:f(s,d,sep),str)
for k in 2*d.keys():
if k=='separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep=d['separatorsfor'][k]
else:
sep=defaultsep
if type(d[k])==types.ListType:
str=str.replace('#%s#'%(k),sep.join(flatlist(d[k])))
else:
str=str.replace('#%s#'%(k),d[k])
return str
def dictappend(rd,ar):
if type(ar)==types.ListType:
for a in ar:
rd=dictappend(rd,a)
return rd
for k in ar.keys():
if k[0]=='_':
continue
if k in rd:
if type(rd[k])==str:
rd[k]=[rd[k]]
if type(rd[k])==types.ListType:
if type(ar[k])==types.ListType:
rd[k]=rd[k]+ar[k]
else:
rd[k].append(ar[k])
elif type(rd[k])==types.DictType:
if type(ar[k])==types.DictType:
if k=='separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1]=ar[k][k1]
else:
rd[k]=dictappend(rd[k],ar[k])
else:
rd[k]=ar[k]
return rd
def applyrules(rules,d,var={}):
ret={}
if type(rules)==types.ListType:
for r in rules:
rr=applyrules(r,d,var)
ret=dictappend(ret,rr)
if '_break' in rr:
break
return ret
if '_check' in rules and (not rules['_check'](var)):
return ret
if 'need' in rules:
res = applyrules({'needs':rules['need']},d,var)
if 'needs' in res:
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if k=='separatorsfor':
ret[k]=rules[k]; continue
if type(rules[k])==str:
ret[k]=replace(rules[k],d)
elif type(rules[k])==types.ListType:
ret[k]=[]
for i in rules[k]:
ar=applyrules({k:i},d,var)
if k in ar:
ret[k].append(ar[k])
elif k[0]=='_':
continue
elif type(rules[k])==types.DictType:
ret[k]=[]
for k1 in rules[k].keys():
if type(k1)==types.FunctionType and k1(var):
if type(rules[k][k1])==types.ListType:
for i in rules[k][k1]:
if type(i)==types.DictType:
res=applyrules({'supertext':i},d,var)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i,d))
else:
i=rules[k][k1]
if type(i)==types.DictType:
res=applyrules({'supertext':i},d)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i,d))
else:
errmess('applyrules: ignoring rule %s.\n'%`rules[k]`)
if type(ret[k])==types.ListType:
if len(ret[k])==1:
ret[k]=ret[k][0]
if ret[k]==[]:
del ret[k]
return ret
| bsd-3-clause | -8,395,359,681,943,064,000 | 27.08777 | 182 | 0.562369 | false |
bjornlevi/5thpower | nefndaralit/env/lib/python3.6/site-packages/lxml/builder.py | 18 | 7907 | #
# Element generator factory by Fredrik Lundh.
#
# Source:
# http://online.effbot.org/2006_11_01_archive.htm#et-builder
# http://effbot.python-hosting.com/file/stuff/sandbox/elementlib/builder.py
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
The ``E`` Element factory for generating XML documents.
"""
import lxml.etree as ET
from functools import partial
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
class ElementMaker(object):
"""Element generator factory.
Unlike the ordinary Element factory, the E factory allows you to pass in
more than just a tag and some optional attributes; you can also pass in
text and other elements. The text is added as either text or tail
attributes, and elements are inserted at the right spot. Some small
examples::
>>> from lxml import etree as ET
>>> from lxml.builder import E
>>> ET.tostring(E("tag"))
'<tag/>'
>>> ET.tostring(E("tag", "text"))
'<tag>text</tag>'
>>> ET.tostring(E("tag", "text", key="value"))
'<tag key="value">text</tag>'
>>> ET.tostring(E("tag", E("subtag", "text"), "tail"))
'<tag><subtag>text</subtag>tail</tag>'
For simple tags, the factory also allows you to write ``E.tag(...)`` instead
of ``E('tag', ...)``::
>>> ET.tostring(E.tag())
'<tag/>'
>>> ET.tostring(E.tag("text"))
'<tag>text</tag>'
>>> ET.tostring(E.tag(E.subtag("text"), "tail"))
'<tag><subtag>text</subtag>tail</tag>'
Here's a somewhat larger example; this shows how to generate HTML
documents, using a mix of prepared factory functions for inline elements,
nested ``E.tag`` calls, and embedded XHTML fragments::
# some common inline elements
A = E.a
I = E.i
B = E.b
def CLASS(v):
# helper function, 'class' is a reserved word
return {'class': v}
page = (
E.html(
E.head(
E.title("This is a sample document")
),
E.body(
E.h1("Hello!", CLASS("title")),
E.p("This is a paragraph with ", B("bold"), " text in it!"),
E.p("This is another paragraph, with a ",
A("link", href="http://www.python.org"), "."),
E.p("Here are some reserved characters: <spam&egg>."),
ET.XML("<p>And finally, here is an embedded XHTML fragment.</p>"),
)
)
)
print ET.tostring(page)
Here's a prettyprinted version of the output from the above script::
<html>
<head>
<title>This is a sample document</title>
</head>
<body>
<h1 class="title">Hello!</h1>
<p>This is a paragraph with <b>bold</b> text in it!</p>
<p>This is another paragraph, with <a href="http://www.python.org">link</a>.</p>
<p>Here are some reserved characters: <spam&egg>.</p>
<p>And finally, here is an embedded XHTML fragment.</p>
</body>
</html>
For namespace support, you can pass a namespace map (``nsmap``)
and/or a specific target ``namespace`` to the ElementMaker class::
>>> E = ElementMaker(namespace="http://my.ns/")
>>> print(ET.tostring( E.test ))
<test xmlns="http://my.ns/"/>
>>> E = ElementMaker(namespace="http://my.ns/", nsmap={'p':'http://my.ns/'})
>>> print(ET.tostring( E.test ))
<p:test xmlns:p="http://my.ns/"/>
"""
def __init__(self, typemap=None,
namespace=None, nsmap=None, makeelement=None):
if namespace is not None:
self._namespace = '{' + namespace + '}'
else:
self._namespace = None
if nsmap:
self._nsmap = dict(nsmap)
else:
self._nsmap = None
if makeelement is not None:
assert callable(makeelement)
self._makeelement = makeelement
else:
self._makeelement = ET.Element
# initialize type map for this element factory
if typemap:
typemap = dict(typemap)
else:
typemap = {}
def add_text(elem, item):
try:
elem[-1].tail = (elem[-1].tail or "") + item
except IndexError:
elem.text = (elem.text or "") + item
def add_cdata(elem, cdata):
if elem.text:
raise ValueError("Can't add a CDATA section. Element already has some text: %r" % elem.text)
elem.text = cdata
if str not in typemap:
typemap[str] = add_text
if unicode not in typemap:
typemap[unicode] = add_text
if ET.CDATA not in typemap:
typemap[ET.CDATA] = add_cdata
def add_dict(elem, item):
attrib = elem.attrib
for k, v in item.items():
if isinstance(v, basestring):
attrib[k] = v
else:
attrib[k] = typemap[type(v)](None, v)
if dict not in typemap:
typemap[dict] = add_dict
self._typemap = typemap
def __call__(self, tag, *children, **attrib):
typemap = self._typemap
if self._namespace is not None and tag[0] != '{':
tag = self._namespace + tag
elem = self._makeelement(tag, nsmap=self._nsmap)
if attrib:
typemap[dict](elem, attrib)
for item in children:
if callable(item):
item = item()
t = typemap.get(type(item))
if t is None:
if ET.iselement(item):
elem.append(item)
continue
for basetype in type(item).__mro__:
# See if the typemap knows of any of this type's bases.
t = typemap.get(basetype)
if t is not None:
break
else:
raise TypeError("bad argument type: %s(%r)" %
(type(item).__name__, item))
v = t(elem, item)
if v:
typemap.get(type(v))(elem, v)
return elem
def __getattr__(self, tag):
return partial(self, tag)
# create factory object
E = ElementMaker()
| mit | -5,038,378,353,767,283,000 | 32.646809 | 108 | 0.549007 | false |
ddebrunner/streamsx.topology | test/python/spl/tk17/opt/.__splpy/packages/streamsx/topology/tester_runtime.py | 3 | 5369 | # coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2017
"""
Contains test related code that is executed at runtime
in the context of the application under test.
"""
import streamsx.ec as ec
import streamsx.topology.context as stc
import os
import unittest
import logging
import collections
import threading
from streamsx.rest import StreamsConnection
from streamsx.rest import StreamingAnalyticsConnection
from streamsx.topology.context import ConfigParams
import time
class Condition(object):
"""A condition for testing.
Args:
name(str): Condition name, must be unique within the tester.
"""
_METRIC_PREFIX = "streamsx.condition:"
@staticmethod
def _mn(mt, name):
return Condition._METRIC_PREFIX + mt + ":" + name
def __init__(self, name=None):
self.name = name
self._starts_valid = False
self._valid = False
self._fail = False
@property
def valid(self):
"""Is the condition valid.
A subclass must set `valid` when the condition becomes valid.
"""
return self._valid
@valid.setter
def valid(self, v):
if self._fail:
return
if self._valid != v:
if v:
self._metric_valid.value = 1
else:
self._metric_valid.value = 0
self._valid = v
self._metric_seq += 1
def fail(self):
"""Fail the condition.
Marks the condition as failed. Once a condition has failed it
can never become valid, the test that uses the condition will fail.
"""
self._metric_fail.value = 1
self.valid = False
self._fail = True
if (ec.is_standalone()):
raise AssertionError("Condition failed:" + str(self))
def __getstate__(self):
# Remove metrics from saved state.
state = self.__dict__.copy()
for key in state:
if key.startswith('_metric'):
del state[key]
return state
def __setstate__(self, state):
self.__dict__.update(state)
def __enter__(self):
self._metric_valid = self._create_metric("valid", kind='Gauge')
self._metric_seq = self._create_metric("seq")
self._metric_fail = self._create_metric("fail", kind='Gauge')
if self._starts_valid:
self.valid = True
def __exit__(self, exc_type, exc_value, traceback):
if (ec.is_standalone()):
if not self._fail and not self.valid:
raise AssertionError("Condition failed:" + str(self))
def _create_metric(self, mt, kind=None):
return ec.CustomMetric(self, name=Condition._mn(mt, self.name), kind=kind)
class _TupleExactCount(Condition):
def __init__(self, target, name=None):
super(_TupleExactCount, self).__init__(name)
self.target = target
self.count = 0
self._starts_valid = target == 0
def __call__(self, tuple):
self.count += 1
self.valid = self.target == self.count
if self.count > self.target:
self.fail()
def __str__(self):
return "Exact tuple count: expected:" + str(self.target) + " received:" + str(self.count)
class _TupleAtLeastCount(Condition):
def __init__(self, target, name=None):
super(_TupleAtLeastCount, self).__init__(name)
self.target = target
self.count = 0
self._starts_valid = target == 0
def __call__(self, tuple):
self.count += 1
self.valid = self.count >= self.target
def __str__(self):
return "At least tuple count: expected:" + str(self.target) + " received:" + str(self.count)
class _StreamContents(Condition):
def __init__(self, expected, name=None):
super(_StreamContents, self).__init__(name)
self.expected = expected
self.received = []
def __call__(self, tuple):
self.received.append(tuple)
if len(self.received) > len(self.expected):
self.fail()
return
if self._check_for_failure():
return
self.valid = len(self.received) == len(self.expected)
def _check_for_failure(self):
"""Check for failure.
"""
if self.expected[len(self.received) - 1] != self.received[-1]:
self.fail()
return True
return False
def __str__(self):
return "Stream contents: expected:" + str(self.expected) + " received:" + str(self.received)
class _UnorderedStreamContents(_StreamContents):
def _check_for_failure(self):
"""Unordered check for failure.
Can only check when the expected number of tuples have been received.
"""
if len(self.expected) == len(self.received):
if collections.Counter(self.expected) != collections.Counter(self.received):
self.fail()
return True
return False
class _TupleCheck(Condition):
def __init__(self, checker, name=None):
super(_TupleCheck, self).__init__(name)
self.checker = checker
def __call__(self, tuple):
if not self.checker(tuple):
self.fail()
else:
# Will not override if already failed
self.valid = True
def __str__(self):
return "Tuple checker:" + str(self.checker)
| apache-2.0 | -6,090,504,589,488,551,000 | 28.827778 | 100 | 0.587074 | false |
drakuna/odoo | openerp/addons/base/res/res_lang.py | 3 | 13756 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import locale
from locale import localeconv
import logging
from operator import itemgetter
import re
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
class lang(osv.osv):
_name = "res.lang"
_description = "Languages"
_disallowed_datetime_patterns = tools.DATETIME_FORMATS_MAP.keys()
_disallowed_datetime_patterns.remove('%y') # this one is in fact allowed, just not good practice
def install_lang(self, cr, uid, **args):
"""
This method is called from openerp/addons/base/base_data.xml to load
some language and set it as the default for every partners. The
language is set via tools.config by the RPC 'create' method on the
'db' object. This is a fragile solution and something else should be
found.
"""
# config['load_language'] is a comma-separated list or None
lang = (tools.config['load_language'] or 'en_US').split(',')[0]
lang_ids = self.search(cr, uid, [('code','=', lang)])
if not lang_ids:
self.load_lang(cr, uid, lang)
ir_values_obj = self.pool.get('ir.values')
default_value = ir_values_obj.get(cr, uid, 'default', False, ['res.partner'])
if not default_value:
ir_values_obj.set(cr, uid, 'default', False, 'lang', ['res.partner'], lang)
return True
def load_lang(self, cr, uid, lang, lang_name=None):
""" Create the given language if necessary, and make it active. """
# if the language exists, simply make it active
lang_ids = self.search(cr, uid, [('code', '=', lang)], context={'active_test': False})
if lang_ids:
self.write(cr, uid, lang_ids, {'active': True})
return lang_ids[0]
# create the language with locale information
fail = True
iso_lang = tools.get_iso_codes(lang)
for ln in tools.get_locales(lang):
try:
locale.setlocale(locale.LC_ALL, str(ln))
fail = False
break
except locale.Error:
continue
if fail:
lc = locale.getdefaultlocale()[0]
msg = 'Unable to get information for locale %s. Information from the default locale (%s) have been used.'
_logger.warning(msg, lang, lc)
if not lang_name:
lang_name = lang
def fix_xa0(s):
"""Fix badly-encoded non-breaking space Unicode character from locale.localeconv(),
coercing to utf-8, as some platform seem to output localeconv() in their system
encoding, e.g. Windows-1252"""
if s == '\xa0':
return '\xc2\xa0'
return s
def fix_datetime_format(format):
"""Python's strftime supports only the format directives
that are available on the platform's libc, so in order to
be 100% cross-platform we map to the directives required by
the C standard (1989 version), always available on platforms
with a C standard implementation."""
# For some locales, nl_langinfo returns a D_FMT/T_FMT that contains
# unsupported '%-' patterns, e.g. for cs_CZ
format = format.replace('%-', '%')
for pattern, replacement in tools.DATETIME_FORMATS_MAP.iteritems():
format = format.replace(pattern, replacement)
return str(format)
lang_info = {
'code': lang,
'iso_code': iso_lang,
'name': lang_name,
'active': True,
'translatable': True,
'date_format' : fix_datetime_format(locale.nl_langinfo(locale.D_FMT)),
'time_format' : fix_datetime_format(locale.nl_langinfo(locale.T_FMT)),
'decimal_point' : fix_xa0(str(locale.localeconv()['decimal_point'])),
'thousands_sep' : fix_xa0(str(locale.localeconv()['thousands_sep'])),
}
lang_id = False
try:
lang_id = self.create(cr, uid, lang_info)
finally:
tools.resetlocale()
return lang_id
def _register_hook(self, cr):
# check that there is at least one active language
if not self.search_count(cr, SUPERUSER_ID, []):
_logger.error("No language is active.")
def _check_active(self, cr, uid, ids, context=None):
# do not check during installation
return not self.pool.ready or bool(self.search_count(cr, uid, []))
def _check_format(self, cr, uid, ids, context=None):
for lang in self.browse(cr, uid, ids, context=context):
for pattern in self._disallowed_datetime_patterns:
if (lang.time_format and pattern in lang.time_format)\
or (lang.date_format and pattern in lang.date_format):
return False
return True
def _check_grouping(self, cr, uid, ids, context=None):
for lang in self.browse(cr, uid, ids, context=context):
try:
if not all(isinstance(x, int) for x in eval(lang.grouping)):
return False
except Exception:
return False
return True
def _get_default_date_format(self, cursor, user, context=None):
return '%m/%d/%Y'
def _get_default_time_format(self, cursor, user, context=None):
return '%H:%M:%S'
_columns = {
'name': fields.char('Name', required=True),
'code': fields.char('Locale Code', size=16, required=True, help='This field is used to set/get locales for user'),
'iso_code': fields.char('ISO code', size=16, required=False, help='This ISO code is the name of po files to use for translations'),
'translatable': fields.boolean('Translatable'),
'active': fields.boolean('Active'),
'direction': fields.selection([('ltr', 'Left-to-Right'), ('rtl', 'Right-to-Left')], 'Direction', required=True),
'date_format':fields.char('Date Format', required=True),
'time_format':fields.char('Time Format', required=True),
'grouping':fields.char('Separator Format', required=True,help="The Separator Format should be like [,n] where 0 < n :starting from Unit digit.-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500;[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. Provided ',' as the thousand separator in each case."),
'decimal_point':fields.char('Decimal Separator', required=True),
'thousands_sep':fields.char('Thousands Separator'),
}
_defaults = {
'active': False,
'translatable': False,
'direction': 'ltr',
'date_format':_get_default_date_format,
'time_format':_get_default_time_format,
'grouping': '[]',
'decimal_point': '.',
'thousands_sep': ',',
}
_sql_constraints = [
('name_uniq', 'unique (name)', 'The name of the language must be unique !'),
('code_uniq', 'unique (code)', 'The code of the language must be unique !'),
]
_constraints = [
(_check_active, "At least one language must be active.", ['active']),
(_check_format, 'Invalid date/time format directive specified. Please refer to the list of allowed directives, displayed when you edit a language.', ['time_format', 'date_format']),
(_check_grouping, "The Separator Format should be like [,n] where 0 < n :starting from Unit digit.-1 will end the separation. e.g. [3,2,-1] will represent 106500 to be 1,06,500;[1,2,-1] will represent it to be 106,50,0;[3] will represent it as 106,500. Provided ',' as the thousand separator in each case.", ['grouping'])
]
@tools.ormcache('lang')
def _lang_get(self, cr, uid, lang):
lang_ids = self.search(cr, uid, [('code', '=', lang)]) or \
self.search(cr, uid, [('code', '=', 'en_US')]) or \
self.search(cr, uid, [])
return lang_ids[0]
@tools.ormcache('lang', 'monetary')
def _lang_data_get(self, cr, uid, lang, monetary=False):
if type(lang) in (str, unicode):
lang = self._lang_get(cr, uid, lang)
conv = localeconv()
lang_obj = self.browse(cr, uid, lang)
thousands_sep = lang_obj.thousands_sep or conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
decimal_point = lang_obj.decimal_point
grouping = lang_obj.grouping
return grouping, thousands_sep, decimal_point
@tools.ormcache()
def get_available(self, cr, uid, context=None):
""" Return the available languages as a list of (code, name) sorted by name. """
langs = self.browse(cr, uid, self.search(cr, uid, [], context={'active_test': False}))
return sorted([(lang.code, lang.name) for lang in langs], key=itemgetter(1))
@tools.ormcache()
def get_installed(self, cr, uid, context=None):
""" Return the installed languages as a list of (code, name) sorted by name. """
langs = self.browse(cr, uid, self.search(cr, uid, []))
return sorted([(lang.code, lang.name) for lang in langs], key=itemgetter(1))
def create(self, cr, uid, vals, context=None):
self.clear_caches()
return super(lang, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('active') == False:
users = self.pool.get('res.users')
for current_id in ids:
current_language = self.browse(cr, uid, current_id, context=context)
if users.search(cr, uid, [('lang', '=', current_language.code)], context=context):
raise UserError(_("Cannot unactivate a language that is currently used by users."))
self.clear_caches()
return super(lang, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
languages = self.read(cr, uid, ids, ['code','active'], context=context)
for language in languages:
ctx_lang = context.get('lang')
if language['code']=='en_US':
raise UserError(_("Base Language 'en_US' can not be deleted!"))
if ctx_lang and (language['code']==ctx_lang):
raise UserError(_("You cannot delete the language which is User's Preferred Language!"))
if language['active']:
raise UserError(_("You cannot delete the language which is Active!\nPlease de-activate the language first."))
trans_obj = self.pool.get('ir.translation')
trans_ids = trans_obj.search(cr, uid, [('lang','=',language['code'])], context=context)
trans_obj.unlink(cr, uid, trans_ids, context=context)
self.clear_caches()
return super(lang, self).unlink(cr, uid, ids, context=context)
#
# IDS: can be a list of IDS or a list of XML_IDS
#
def format(self, cr, uid, ids, percent, value, grouping=False, monetary=False, context=None):
""" Format() will return the language-specific output for float values"""
if percent[0] != '%':
raise ValueError("format() must be given exactly one %char format specifier")
formatted = percent % value
# floats and decimal ints need special action!
if grouping:
lang_grouping, thousands_sep, decimal_point = \
self._lang_data_get(cr, uid, ids[0], monetary)
eval_lang_grouping = eval(lang_grouping)
if percent[-1] in 'eEfFgG':
parts = formatted.split('.')
parts[0], _ = intersperse(parts[0], eval_lang_grouping, thousands_sep)
formatted = decimal_point.join(parts)
elif percent[-1] in 'diu':
formatted = intersperse(formatted, eval_lang_grouping, thousands_sep)[0]
return formatted
# import re, operator
# _percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
# r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
lang()
def split(l, counts):
"""
>>> split("hello world", [])
['hello world']
>>> split("hello world", [1])
['h', 'ello world']
>>> split("hello world", [2])
['he', 'llo world']
>>> split("hello world", [2,3])
['he', 'llo', ' world']
>>> split("hello world", [2,3,0])
['he', 'llo', ' wo', 'rld']
>>> split("hello world", [2,-1,3])
['he', 'llo world']
"""
res = []
saved_count = len(l) # count to use when encoutering a zero
for count in counts:
if not l:
break
if count == -1:
break
if count == 0:
while l:
res.append(l[:saved_count])
l = l[saved_count:]
break
res.append(l[:count])
l = l[count:]
saved_count = count
if l:
res.append(l)
return res
intersperse_pat = re.compile('([^0-9]*)([^ ]*)(.*)')
def intersperse(string, counts, separator=''):
"""
See the asserts below for examples.
"""
left, rest, right = intersperse_pat.match(string).groups()
def reverse(s): return s[::-1]
splits = split(reverse(rest), counts)
res = separator.join(map(reverse, reverse(splits)))
return left + res + right, len(splits) > 0 and len(splits) -1 or 0
| gpl-3.0 | 1,400,965,177,192,862,000 | 41.45679 | 360 | 0.585272 | false |
ee08b397/panda3d | direct/src/distributed/DistributedObjectBase.py | 6 | 3341 |
from direct.showbase.DirectObject import DirectObject
#from direct.directnotify.DirectNotifyGlobal import directNotify
class DistributedObjectBase(DirectObject):
"""
The Distributed Object class is the base class for all network based
(i.e. distributed) objects. These will usually (always?) have a
dclass entry in a *.dc file.
"""
notify = directNotify.newCategory("DistributedObjectBase")
def __init__(self, cr):
assert self.notify.debugStateCall(self)
self.cr = cr
self.children = {}
self.parentId = None
self.zoneId = None
if __debug__:
def status(self, indent=0):
"""
print out "doId(parentId, zoneId) className"
"""
spaces=' '*(indent+2)
try:
print "%s%s:"%(
' '*indent, self.__class__.__name__)
print "%sfrom DistributedObject doId:%s, parent:%s, zone:%s"%(
spaces,
self.doId, self.parentId, self.zoneId),
except Exception, e: print "%serror printing status"%(spaces,), e
def getLocation(self):
try:
if self.parentId == 0 and self.zoneId == 0:
return None
# This is a -1 stuffed into a uint32
if self.parentId == 0xffffffff and self.zoneId == 0xffffffff:
return None
return (self.parentId, self.zoneId)
except AttributeError:
return None
def handleChildArrive(self, childObj, zoneId):
"""
A new child has just setLocation beneath us. Give us a
chance to run code when a new child sets location to us. For
example, we may want to scene graph reparent the child to
some subnode we own.
"""
assert self.notify.debugCall()
# Inheritors should override
pass
def handleChildArriveZone(self, childObj, zoneId):
"""
A child has just changed zones beneath us with setLocation.
Give us a chance to run code when an existing child sets
location to us. For example, we may want to scene graph
reparent the child to some subnode we own.
"""
assert self.notify.debugCall()
# Inheritors should override
pass
def handleChildLeave(self, childObj, zoneId):
"""
A child is about to setLocation away from us. Give us a
chance to run code just before a child sets location away from us.
"""
assert self.notify.debugCall()
# Inheritors should override
pass
def handleChildLeaveZone(self, childObj, zoneId):
"""
A child is about to setLocation to another zone beneath us.
Give us a chance to run code just before a child sets
location to that zone.
"""
assert self.notify.debugCall()
# Inheritors should override
pass
def handleQueryObjectChildrenLocalDone(self, context):
assert self.notify.debugCall()
# Inheritors should override
pass
def getParentObj(self):
if self.parentId is None:
return None
return self.cr.doId2do.get(self.parentId)
def hasParentingRules(self):
return self.dclass.getFieldByName('setParentingRules') != None
| bsd-3-clause | -2,426,981,163,203,136,500 | 33.443299 | 78 | 0.59982 | false |
tousix/tousix-manager | tousix_manager/Log_Statistics/views.py | 1 | 2114 | # Copyright 2015 Rémy Lapeyrade <remy at lapeyrade dot net>
# Copyright 2015 LAAS-CNRS
#
#
# This file is part of TouSIX-Manager.
#
# TouSIX-Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TouSIX-Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TouSIX-Manager. If not, see <http://www.gnu.org/licenses/>.
import json
from django.shortcuts import HttpResponse, Http404
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View
from tousix_manager.Authentication.AddressMixin import AddressLimitationMixin
from tousix_manager.Log_Statistics.flows import FlowProcess
class RecieveStatsForm(AddressLimitationMixin, View):
"""
View for statistics reception, coming from the controller.
"""
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
"""
Enter in the flow process class if all the requirements are avaliable.
:param request:
:param args:
:param kwargs:
:return:
"""
if self.verify_address() is not None:
raise Http404
if request.method == "POST":
data = json.loads(request.body.decode(encoding='utf-8'))
if request.GET.__contains__("time"):
process = FlowProcess()
process.decode_request(data, request.GET.__getitem__("time"))
return HttpResponse(status=200)
else:
return HttpResponse(status=400)
elif request.method == "GET":
return HttpResponse(status=200)
| gpl-3.0 | 1,480,449,321,250,376,400 | 37.418182 | 78 | 0.679129 | false |
eyolfson/django-ssh | django_ssh/views.py | 1 | 2607 | # Copyright 2014 Jon Eyolfson
#
# This file is part of Django SSH.
#
# Django SSH is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# Django SSH is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Django SSH. If not, see <http://www.gnu.org/licenses/>.
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ValidationError
from django.shortcuts import redirect, render
from django_ssh.forms import KeyFileForm, KeyTextForm
from django_ssh.models import Key
@login_required
def index(request):
return render(request, 'ssh/index.html',
{'keys': Key.objects.filter(user=request.user)})
@login_required
def add_file(request):
if request.method == 'POST':
form = KeyFileForm(request.POST, request.FILES)
if form.is_valid():
file = form.cleaned_data['file']
key = Key(user=request.user, body=file.read().decode())
try:
key.full_clean()
key.save()
return redirect('ssh:index')
except ValidationError as e:
for field, error_list in e.error_dict.items():
for error in error_list:
form.add_error(None, error)
else:
form = KeyFileForm()
return render(request, 'ssh/add_file.html', {'form': form})
@login_required
def add_text(request):
if request.method == 'POST':
form = KeyTextForm(request.POST)
if form.is_valid():
body = form.cleaned_data['body']
comment = form.cleaned_data['comment']
key = Key(user=request.user, body=body, comment=comment)
try:
key.full_clean()
key.save()
return redirect('ssh:index')
except ValidationError as e:
form.add_error(None, e)
else:
form = KeyTextForm()
return render(request, 'ssh/add_text.html', {'form': form})
@login_required
def remove(request, key_id):
try:
key = Key.objects.get(pk=key_id, user=request.user)
key.delete()
except Key.DoesNotExist:
pass
return redirect('ssh:index')
| gpl-3.0 | -4,214,255,497,998,668,000 | 34.712329 | 79 | 0.634446 | false |
corvorepack/REPOIVAN | plugin.video.movie.ultra.7k/resources/tools/scrape.py | 2 | 29534 | # -*- coding: utf-8 -*-
#--------------------------------------------------------
# Movie Ultra 7K
# (http://forum.rojadirecta.es/
# (http://xbmcspain.com/foro/
# Version 0.0.5 (04.11.2014)
#--------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
#--------------------------------------------------------
import os,sys,urlparse,urllib,urllib2,re,shutil,zipfile,inspect,types
import xbmc,xbmcgui,xbmcaddon,xbmcplugin,plugintools
'''
#from inspect import getmembers, isfunction
print "WISE\n"
functions_list = [o for o in getmembers(unwise) if isfunction(o[1])]
print str(functions_list)
print getmembers(unwise)
for i in dir(unwise): print i
for i in getmembers(unwise): print i
print [key for key in locals().keys()
if isinstance(locals()[key], type(sys)) and not key.startswith('__')]
'''
#print unwise
def shsp(params):
url = params.get("url")
thumb = params.get("thumbnail")
plugintools.add_item( action="shsp3" , title="[COLOR=orange]Shedule[/COLOR]" , url=url ,thumbnail=thumb ,fanart=thumb , isPlayable=False, folder=True )
plugintools.add_item( action="shsp5" , title="[COLOR=orange]List[/COLOR]" , url="http://showsport-tv.com/",thumbnail=thumb ,fanart=thumb , isPlayable=False, folder=True )
plugintools.add_item( action="shsp4" , title="[COLOR=orange]Embed[/COLOR]" , url="http://showsport-tv.com/update/embed.html" ,thumbnail=thumb ,fanart=thumb , isPlayable=False, folder=True )
def shsp3(params):
url = params.get("url")
thumb = params.get("thumbnail")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#os.environ["HTTP_PROXY"]=Proxy
data=body
#print "START="+params.get("url")
import re
p = re.compile(ur'(<a\sclass="mac".*?<\/div>)', re.DOTALL)
matches = re.findall(p, data)
#del matches[0]
for match in matches:
#url = scrapedurl.strip()
#print match
p = re.compile(ur'<img\ssrc=\'?"?([^\'"]+).*?<span\sclass="mactext">([^<]+).*?\s(<div.*?<\/div>)', re.DOTALL)
links=re.findall(p, match)
for imgs,titles,divs in links:
title=titles.replace(" ","")
title=title.replace(" ","|")
#print divs
plugintools.add_item( action="shsp2" , title=title , url=divs ,thumbnail=thumb ,fanart=thumb , isPlayable=False, folder=True )
def shsp5(params):
url = params.get("url")
thumb = params.get("thumbnail")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#os.environ["HTTP_PROXY"]=Proxy
data=body;
#print "START="+params.get("url")
import re
p = re.compile(ur'<a\sclass="menuitem\ssubmenuheader".*?>([^<]+)(.*?)<\/div>', re.DOTALL)
matches = re.findall(p, data)
#del matches[0]
for match,links in matches:
url="http://showsport-tv.com/"+links
plugintools.add_item( action="shsp6" , title=match , url=url ,thumbnail=thumb ,fanart=thumb , isPlayable=False, folder=True )
def shsp6(params):
url = params.get("url")
thumb = params.get("thumbnail")
import re
p = re.compile(ur'href="([^"]+).*?>([^<]+)', re.DOTALL)
a=re.findall(p,url);
for links,channels in a:
url="http://showsport-tv.com/"+links
plugintools.add_item( action="shsp7" , title=channels , url=url ,thumbnail=thumb ,fanart=thumb , isPlayable=True, folder=False )
def shsp7(params):
url = params.get("url")
url=url.replace("/ch/","/update/").replace("php","html");
ref="http://showsport-tv.com/"
thumb = params.get("thumbnail")
title = params.get("title")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
request_headers.append(["Referer",ref])
bodyy,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
if bodyy.find("googlecode"):
print "GOING XUSCACAMUSCA"
p = re.compile(ur'id="([^"]+).*?src="([^"]+)', re.DOTALL)
elif bodyy.find("iguide"):
p = re.compile(ur'var\s?id\s?=\s?([^;]+).*?src="?\'?([^\'"]+)', re.DOTALL)
print "GOING IGUIDE"
else:
print "UNKNOWN"
pars=re.findall(p,bodyy);ref=url;res='';
for id,script in pars:
if script.find("xuscacamusca"):
ref=url
url='http://xuscacamusca.se/gen_h.php?id='+id+'&width=100%&height=100%'
peak2(params)
elif script.find("iguide"):
url=script+"1009&height=460&channel="+id+"&autoplay=true"
from nstream import iguide2
iguide2(url,ref,res)
else:
print "NO SCRIPT"
def shsp4(params):
url = params.get("url")
thumb = params.get("thumbnail")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#print body
import re
p = re.compile(ur'<div class="match1"(.*?\s+.*?)\$\(\s"\#m\d"\s\)', re.DOTALL)
a=re.findall(p,body);
for match in a:
#print "\nFFFFFF";print match;
p = re.compile(ur'img\ssrc="([^"]+).*?div\sclass="name">([^<]+)', re.DOTALL)
foldr=re.findall(p,match)
for img,catg in foldr:
#print "\n"+img;print "\n"+catg;
thumb="http://showsport-tv.com/"+img
title=catg
plugintools.add_item( action="shsp1" , title=title , url=match ,thumbnail=thumb ,fanart=thumb , isPlayable=False, folder=True )
#plugintools.add_item( action="" , title=title , url=str(match) ,thumbnail=thumb ,fanart=thumb , isPlayable=False, folder=True )
def shsp1(params):
url = params.get("url")
thumb = params.get("thumbnail")
import re
p = re.compile(ur'<div\sclass="name">([^<]+).*?fid=\"([^\&]+).*?v_width=([^;]+).*?v_height=([^;]+).*?src=\"([^\&]+)', re.DOTALL)
foldr=re.findall(p,url)
for name,fid,w,h,jsrc in foldr:
thumb=thumb
title=name
url='http://showsport-tv.com/update/'+ fid +".html"
plugintools.add_item( action="peaktv2" , title=title , url=url ,thumbnail=thumb ,fanart=thumb , isPlayable=True, folder=False )
def shsp2(params):
divs = params.get("url")
thumb = params.get("thumbnail")
import re
p = re.compile(ur'href=\'?"?([^\'"]+).*?>([^<]+)')
link=re.findall(p, divs)
#print link
for lin in link:
url="http://showsport-tv.com"+lin[0].replace("/ch/","/update/").replace("php","html");
title=lin[1];print url+"\n"+title
plugintools.add_item( action="peaktv2" , title=title , url=url , isPlayable=True, folder=False )
def peaktv(params):
#plugintools.get_localized_string(21)
url = params.get("url")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#os.environ["HTTP_PROXY"]=Proxy
data=body
#print "START="+params.get("url")
p = 'href="([^<]*)'
matches = plugintools.find_multiple_matches_multi(data,p)
del matches[0]
for scrapedurl in matches:
url = scrapedurl.strip()
#print url
title = plugintools.find_single_match(url,'>(.*?:[^:]+)')
#title = title.replace("\xe2","a".encode('iso8859-16'));
title = title.replace("\xe2","a");
title = title.replace("\xc3","t");
title = title.replace("\xe0","f");
title = title.replace("\xfc","u");
title = title.replace("\xdb","s");
title = title.replace("\x15f","s");
'''
#print title.decode("utf-8")
print unicode(title,"iso8859-16")
'''
canal = plugintools.find_single_match(url,'php\?([^"]+)')
url = 'http://peaktv.me/Live.php/?'+canal.strip()
if 'DigiSport1' in str(url):
thumb='http://www.digisport.ro/img/sigla_digisport1.png'
elif 'DigiSport2' in str(url):
thumb='http://itutorial.ro/wp-content/uploads/digi_sport2.png'
elif 'DigiSport3' in str(url):
thumb='http://www.sport4u.tv/web/logo/sport/digi_sport3_ro.png'
elif 'DolceSportHD' in str(url):
thumb='http://static.dolcetv.ro/img/tv_sigle/sigle_black/116.png'
elif 'DolceSport1' in str(url):
thumb='http://static.dolcetv.ro/img/tv_sigle/sigle_black/101.png'
elif 'DolceSport2' in str(url):
thumb='http://static.dolcetv.ro/img/tv_sigle/sigle_black/107.png'
elif 'DolceSport3' in str(url):
thumb='http://static.dolcetv.ro/img/tv_sigle/sigle_black/134.png'
elif 'DolceSport4' in str(url):
thumb='http://static.dolcetv.ro/img/tv_sigle/sigle_black/247.png'
elif 'EuroSport2HD' in str(url):
thumb='http://www.sport4u.tv/web/logo/sport/eurosport-2.png'
elif 'EuroSport1HD' in str(url):
thumb='http://4.bp.blogspot.com/-k50Qb45ZHGY/UrMCA2zRoGI/AAAAAAAAStA/Dj6sF0dHcs8/s1600/790px-Eurosport_logo.svg.png'
elif 'LookPlusHD' in str(url):
thumb='http://upload.wikimedia.org/wikipedia/commons/thumb/a/ac/Look_Plus_HD.png/100px-Look_Plus_HD.png'
elif 'LookTVHD' in str(url):
thumb='http://upload.wikimedia.org/wikipedia/commons/thumb/8/89/Look_TV_HD_logo.png/100px-Look_TV_HD_logo.png'
else:
thumb='http://frocus.net/images/logotv/Sport-ro_HD.jpg'
print thumb
fanart = thumb
plugintools.add_item( action="peaktv2" , title=title , url=url ,thumbnail=thumb ,fanart=fanart , isPlayable=True, folder=False )
def peaktv2(params):
msg = "Buscando enlace\nespere,porfavor... "
#plugintools.message("CipQ-TV",msg)
url = params.get("url")
print "START="+url
title = params.get("title")
thumb = params.get("thumbnail")
ref=url
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers,timeout=30)
#os.environ["HTTP_PROXY"]=Proxy
data=body
#print "START="+data
p = '<script type="text\/javascript">id="([^"]+).*?width="([^"]+).*?height="([^"]+).*?src="([^"]+)'
matches = plugintools.find_multiple_matches_multi(data,p)
#print "START=";print matches
for id,width,height,cast in matches:
#url = 'http://xuscacamusca.se/?id='+id+'&width='+width+'&height='+height.strip()
url = 'http://fa16bb1eb942c5c48ac3cd66aff4c32f2a015b1af198c14b88.com/gen_s.php?id='+id+'&width='+width+'&height='+height.strip()
#print "START="+url
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
request_headers.append(["Referer",ref])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers,timeout=10)
data=body
#print "START="+data
p='src=\'?"?([^\/]+)\/jwplayer\.js\.pagespeed'
swf = plugintools.find_single_match(data,p)
#print "SWF";print swf
swf='http://xuscacamusca.se/'+swf+'/jwplayer.flash.swf'
print "SWF = "+swf
p = ';eval(.*?)<\/script>'
mat = plugintools.find_multiple_matches_multi(data,p)
print "wisenx="+str(mat)
'''
try:
print "wisenx="+str(mat)
swfobj=str(mat)
#print "swfobj="+swfobj
import unwise
decr = unwise.unwise_process(data)
except:
print "Link outdated"
msg = "Enlace caducado,solo estara activo durante el partido ... "
plugintools.message("CipQ-TV",msg)
'''
if mat:
swfobj=mat[1]
#print "swfobj="+swfobj
import unwise
decr = unwise.unwise_process(data)
else:
print "Link outdated"
msg = "Enlace caducado,solo estara activo durante el partido ... "
plugintools.message("CipQ-TV",msg)
return
#print "DECR="+decr
p = ",file:'(.*?)'"
rtmp = plugintools.find_single_match(decr,p)
print "PLPATH="+rtmp
media_url = rtmp+' swfUrl='+swf+' live=1 timeout=15 swfVfy=1 pageUrl='+url
#plugintools.add_item( action="play_resolved_url" , title=title , url=media_url ,thumbnail=thumb , isPlayable=True, folder=False )
plugintools.play_resolved_url(media_url)
print media_url
def pltptc(params):
url = params.get("url")
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
data=body
print "START="+params.get("url")
if params.get("title")=="PonTuCanal" :
pattern1 = 'popUp\(\'([^\']+).*src="([^"]+)'
pattern2 = "http://canalesgratis.me/canales/"
pattern3 = ".php"
else :
pattern1 = 'popUp\(\'([^\']+).*src="([^"]+)'
pattern2 = "http://verdirectotv.com/canales/"
pattern3 = ".html"
matches = plugintools.find_multiple_matches_multi(data,pattern1)
for scrapedurl, scrapedthumbnail in matches:
#thumbnail = urlparse.urljoin( params.get("url") , scrapedthumbnail )
thumbnail = scrapedthumbnail
url = urlparse.urljoin( params.get("url") , scrapedurl.strip() )
rep = str.replace(url,pattern2,"")
title = str.replace(rep,pattern3,"").capitalize()
plot = ""
msg = "Resolviendo enlace ... "
uri=url
rref = 'http://verdirectotv.com/carrusel/tv.html'
uri = uri+'@'+title+'@'+rref
#plugintools.log("URI= "+uri)
pattern = "\s+"
import re
uri = re.sub(pattern,'',uri)
uri = uri.encode('base64')
url = 'http://localhost/000/ptc2xbmc.php?page='+uri
url = re.sub(pattern,'',url)
plugintools.log("LSP URL= "+url)
url = 'plugin://plugin.video.live.streamspro/?url='+plugintools.urllib.quote_plus(url)+'&mode=1&name='+plugintools.urllib.quote_plus(title)
#plugintools.log("LINK= "+url)
plugintools.add_item( action="runPlugin" , title=title , plot=plot , url=url ,thumbnail=thumbnail , isPlayable=False, folder=True )
def vipracing0(params):
#plugintools.log("cipq.webpage "+repr(params))#print list of pages (PLT,PTC)
# Fetch video list from website feed
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36"])
url = params.get("url")
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#plugintools.log("data= "+body)
thumb="http://cs301412.vk.me/v301412640/5ab5/fJUqz4EDdTM.jpg"
pattern1 = '"shortcut":"([^"]*)'
match = plugintools.find_multiple_matches_multi(body,pattern1)
match = sorted(list(set(match)))
#match = sorted(match.items(), key=lambda x: x[1])
for opcions in match:
title = "Vip Racing "+str(opcions.replace("opcion-",""))
title = title.capitalize()
url = "http://vipracing.tv/channel/"+opcions
#url = str(url.split())
url = ", ".join(url.split())
#plugintools.log("TITLE:"+url)
plugintools.add_item(action="vipracing2" ,title=title ,url=url ,thumbnail=thumb ,fanart=thumb ,isPlayable=True, folder=False )
def vipracing2(params):
msg = "Resolviendo enlace ... "
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/38.0.2125.104 Safari/537.36"])
request_headers.append(["Referer","http://vipracing.tv"])
ref = 'http://vipracing.tv'
url = params.get("url");ref=url
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
data = body.replace('window\.location\.replace\(ncr\)','')
'''
array('freelivetv','byetv','9stream','castalba','castamp','direct2watch','kbps','flashstreaming','cast247','ilive','freebroadcast','flexstream','mips','veemi','yocast','yukons','ilive','iguide','ucaster','ezcast','maxstream','dinozap','janjua','tutelehd')
'''
pattern = '<script type="text\/javascript" src="(.*direct2watch[^"]+)'
uri = plugintools.find_single_match(body,pattern)
pattern = 'embed\/([^\&]+).*?width=([^\&]+).*?height=([^\&]+)'
match = plugintools.find_multiple_matches_multi(uri,pattern)
for id,width,height in match:
plugintools.log("ID= "+id)
plugintools.log("WIDTH= "+width)
plugintools.log("HEIGHT= "+height)
data = plugintools.read(uri)
p = 'src=\'?"?([^\'"]+)'
uri = plugintools.find_single_match(data,p)
plugintools.log("URI= "+uri)
url=uri
#print "URL = "+url;print "REF = "+ref;
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
request_headers.append(["Referer",ref])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#print "List : ", request_headers;
bodi = body
import ioncube
vals=ioncube.ioncube1(bodi)
#print tkserv+"\n"+strmr+"\n"+plpath+"\n"+swf#print valz;#
print "URL = "+url;print "REF = "+ref;
tkserv=vals[0][1];strmr=vals[1][1].replace("\/","/");plpath=vals[2][1].replace(".flv","");swf=vals[3][1];
ref=url;url=tkserv;bodyi=[];bodyy='';urli=[];
from plt import curl_frame
bodi=curl_frame(url,ref,body,bodyi,bodyy,urli);
p='token":"([^"]+)';token=plugintools.find_single_match(bodi,p);#print token
media_url = strmr+'/'+plpath+' swfUrl='+swf+' token='+token+' live=1 timeout=15 swfVfy=1 pageUrl='+ref
#media_url ='http://cpliga.nmp.hls.emision.dof6.com/hls/live/201767/channelpc2/index.m3u8'
#media_url ='http://cpliga.nmp.hls.emision.dof6.com/hls/live/201767/channelpc2/20141028T074633-05-15185.ts'
plugintools.play_resolved_url(media_url)
print media_url
'''
p = '(\$\.getJSON\(|streamer\'?"?:?\s?|file\'?"?:?\s?|flash\'?"?,\s?src:?\s?)\'?"?([^\'"]+)'
match = plugintools.find_multiple_matches_multi(body,p)
print str(match);
tokserv = match[0][1]
strmr = match[1][1].replace("\\","")
plpath = match[2][1].replace(".flv","")
swf = match[3][1]
#print strmr
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
request_headers.append(["Referer",uri])
body,response_headers = plugintools.read_body_and_headers(tokserv, headers=request_headers)
p=':\'?"?([^\'"]+)'
tok=plugintools.find_single_match(body,p)
media_url=strmr+"/"+plpath+" swfUrl="+swf+" live=1 token="+tok+" timeout=15 swfVfy=1 pageUrl="+uri
plugintools.play_resolved_url(media_url)
print media_url
'''
def dolce(params):
plugintools.get_localized_string(21)
#plugintools.log("cipq.webpage "+repr(params))#print list of pages (PLT,PTC)
# Fetch video list from website feed
#data = plugintools.read( params.get("url") )
url = 'http://www.dolcetv.ro/tv-live'
data = plugintools.read(url)
#plugintools.log("DATA:"+data)
#returnheaders = plugintools.read_body_and_headers( params.get("url") )
#data = plugintools.read_body_and_headers( params.get("url") )
#for tup, tur, other in returnheaders:
#plugintools.log("TUPLE:"+tup+"STR:"+tur+"OTHER:"+other)
#plugintools.log("bug tuple " + data(tuple(tmp.split(', '))))
pattern1 = '<img\s+class="thumb".*?alt="([^"]+).*?\s+<span class="thumbtvlive_over"><\/span>\s+<img\s+class="thumbtvlive_logo"\s+src="([^"]+)'
pattern2 = "\/([0-9]{1,3})\.png"
pattern3 = "thumb-([^\/]+)"#wigs,title
match = plugintools.find_multiple_matches_multi(data,pattern1)
match = sorted(list(set(match)))
for wigs,sigle in match:
#plugintools.log("WIGS:"+wigs)
#plugintools.log("SIGLE:"+sigle)#print list of url
title = plugintools.find_single_match(wigs,pattern3)
title = title.capitalize()
thumb = sigle
id = plugintools.find_single_match(sigle,pattern2)
pattern4 = 'href=.*?'+id+'.*?class="acum">([^<]+)'
pattern5 = 'href=.*?'+id+'.*?class="next">([^<]+)'
#acum = plugintools.find_single_match(data,pattern4)
#next = plugintools.find_single_match(data,pattern5)
#title=acum
url = 'http://www.dolcetv.ro/service/play/index/id/'+id+'/category/0/type/live-tv/editionId/0/module_name/androidtablet'
url=url.strip()
show="ACUM1"
episode="ACUM2"
extra="ACUM3"
page="ACUM4"
info_labels="ACUM5"
plugintools.add_item( action="dolce2" ,title=title ,url=url ,thumbnail=wigs ,fanart=thumb , page=page,isPlayable=False, folder=True )
def dolce2(params):
msg = "Resolviendo enlace ... "
url = params.get("url")
data = plugintools.read(params.get("url"))
data = data.replace('&','&')
data = data.replace('"',"'")
#plugintools.log("LSS URL= "+url)
thumbnail=params.get("thumbnail")
title = params.get("title")
plot=params.get("plot")
pattern = '"high quality stream name":"([^"]+).*?token-low":"([^"]+).*?token-high":"([^"]+)'
match = plugintools.find_multiple_matches_multi_multi(data,pattern)
for name,low,high in match:
plugintools.log("NAME= "+name)
#plugintools.log("HIGH= "+high)
plugintools.add_item( action="" , title=title , plot=plot , url=url ,thumbnail=thumbnail , isPlayable=False, folder=False )
def lsstv(params):
thumbnail=params.get("thumbnail");
fanart=params.get("fanart");
data = plugintools.read("http://www.livesportstreams.tv/sidebar.php?top=1&type=1&l=es");
grups='<span\sid="span_link_sidebar.*?(\(.*?\)).*?<\/span>';grups=plugintools.find_multiple_matches(data,grups);grups=list(set(grups));grup=[];
for i in range(1,len(grups)):
a=grups[i].replace("1, ","").split("'");grup+=([a[1],a[7],a[9]]);
j=0
for j in range(len(grup)):
if j%3==0:
sport=grup[j];
elif j%3==1:
link="http://www.livesportstreams.tv/events.php?top=1&type=1&l=es&"+grup[j];
plugintools.add_item( action="lsstv1" , title=sport , url=link ,thumbnail=thumbnail , isPlayable=False, folder=True )
#print "sport="+grup[j];j+=1;print "link="+grup[j];j+=1;print "nrevnt="+grup[j];j+=1;
else:
sport=sport+' ('+grup[j]+'partidos)';
def lsstv1(params):
data=plugintools.read(params.get("url"));
pattern1 = 'onClick=\'showLinks\("event_", (.*?<img alt=".*?style="width: 40px;">.*?letter-spacing: 0px;">.*?<td rowspan=2 style="font-size:11px; font-style: italic; text-align: right;" title=\'[^\']+.)'
pattern2 = '"([^"]+).*<img alt="([^"]+).*style="width: 40px;">([^<]+).*?letter-spacing: 0px;">([^<]+).*<td rowspan=2 style="font-size:11px; font-style: italic; text-align: right;" title=\'([^\']+)'
pattern3 = ""
match = plugintools.find_multiple_matches_multi_multi(data,pattern1)
#for (i,id) in enumerate(match):
match = sorted(list(set(match)))#array_unique !!!
for ids in match:
'''
thumbnail = "http://cdn-a.streamshell.net/images/icons/48x48px.png"
#plugintools.log("TITLE"+ids)#print list of channels
url = "http://www.livesportstreams.tv/es/player.php?e=" + ids + "&s=13&c=4"
url = url.strip()
plot = ""
title = ids.capitalize()
'''
matches = plugintools.find_multiple_matches_multi(ids,pattern2)
for id, champ, ora, meci, lang in matches:
thumbnail = "http://cdn-a.streamshell.net/images/icons/48x48px.png"
url = "http://www.livesportstreams.tv/es/links.php?links=1&id=" + id
url = url.strip()
plugintools.log("URL:"+url)#print list of url
#champ = champ.replace('futbol','')
mec = "[COLOR=green]"+ ora + "[COLOR=yellow] : " + meci.upper() + " ([COLOR=red]" + lang.lower() + "[/COLOR][/COLOR][/COLOR]) :" + champ
title = mec
plot = ""
#plugintools.log("cipq.webpage_play "+title)#print list of channels
#uri = plugintools.find_single_match(data,rep)
# Appends a new item to the xbmc item list
plugintools.add_item( action="lsstv2" , title=title , plot=plot , url=url ,thumbnail=thumbnail , isPlayable=True, folder=True )
def lsstv2(params):
msg = "Resolviendo enlace ... "
ref = params.get("url")
data = plugintools.read( params.get("url") )
data = data.replace('&','&')
data = data.replace('"',"'")
#plugintools.log("LSS URL= "+data)
thumbnail=params.get("thumbnail")
title = params.get("meci")
plot=params.get("plot")
pattern = '\?(e=[^\'"]+)'
match = plugintools.find_multiple_matches_multi_multi(data,pattern)
match = sorted(list(set(match)))
i=1
for id in match:
url = "http://www.livesportstreams.tv/es/player.php?" + id + "@" + ref
url=url.strip()
plugintools.log("LSS URL= "+url)
title = "Link " + str(i)
i+=1
#xbmc.executebuiltin('XBMC.RunPlugin('+url+')')
#params['action'] = 'runPlugin'
#plugintools.play_resolved_url(url)
#xbmc.executebuiltin('XBMC.RunPlugin(' + url +')')
#xbmc.Player(xbmc.PLAYER_CORE_MPLAYER).play(item=url)
plugintools.add_item( action="lsstv3" , title=title , plot=plot , url=url ,thumbnail=thumbnail , isPlayable=True, folder=False )
def lsstv3(params):
splitted = params.get("url").split('@')
page=splitted[0]
ref =splitted[1]
plugintools.log("FIRST URL= "+page)
plugintools.log("REFERER= "+ref)
title=params.get("title")
plot=params.get("plot")
thumbnail=params.get("thumbnail")
'''
msg = "Pasando enlace a SpDevil... "
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('CipQ TV', msg, 1 , art+'icon.png'))#quitar art+...
url = 'plugin://plugin.video.SportsDevil/?item=catcher=streams&title='+title+'&url='+page+'&videoTitle='+title+'&director=movie.ultra.7k&genre=Live TV&referer='+ref+'&definedIn=&type=rss&icon='+thumbnail+'&mode=1'
url=url.strip()
#url=urllib.quote_plus("url")
plugintools.log("LINK= "+str(url))
xbmc.executebuiltin('XBMC.RunPlugin('+url+')')
ref = 'http://www.livesportstreams.tv/es/main.php'
http://www.castasap.pw/public/embed.php?id=b2bbaf4db04a87f50bf659b5df1939f9a805698fa2d5a0ce0ff8c45807033ee4&cid=1413903241&eid=5446206223e03717132813&rid=54467440a19a7&hon=1&w=768&h=432
Referer: http://www.livesportstreams.tv/es/player.php?e=5446206223e03717132813&s=13&c=51
http://37.48.85.217:43911/ls/51/index.m3u8?c=1eca49fd84273d860fa4783f036c2f280df754b3ac57c7d1a5206e5b95bc52b7&cid=1413903241&eid=5446206223e03717132813
Referer:http://www.castasap.pw/public/embed.php?id=b2bbaf4db04a87f50bf659b5df1939f9a805698fa2d5a0ce0ff8c45807033ee4&cid=1413903241&eid=5446206223e03717132813&rid=54467440a19a7&hon=1&w=768&h=432
'''
pattern = 'document.write\(\'<iframe\s+frameborder\=0 marginheight\=0\s+marginwidth\=0\s+scrolling\=no\s+src=\'?"?([^\'"]+)';
data = plugintools.read(page)
url = plugintools.find_single_match(data,pattern)
url = unescape(url).encode("utf-8")
ref=page
print "CASTURL:"+url
print "CASTREF:"+ref
txt='\.([^\/]+)';txt=plugintools.find_single_match(url,txt);
#plugintools.log("LINK EMBED= "+url)
request_headers=[]
request_headers.append(["User-Agent","Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)"])
request_headers.append(["Referer",ref])#"http://www.livesportstreams.tv/es/main.php"
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#print "<br>"+body
p='(swfobject\.embedSWF\("|st":?\s?|file\'?"?:?\s?|flash\'?"?,\s?src:?\s?)\'?"?([^\'"]+)'
match = plugintools.find_multiple_matches_multi(body,p)
#print match;
strmr = match[0][1].replace("\\","")
plpath = match[1][1].replace(".flv","")
swf = match[2][1]
print swf;
print "STRM="+strmr
print "PATH="+plpath
a='http://cdn-b.streamshell.net/swf/uppod-hls.swf';b='http://www.'+txt+'/st/'+txt+'.txt';txt=a+'?st='+b+'&file='+plpath;print txt
#plugintools.play_resolved_url(txt);sys.exit()
#data = plugintools.read(txt);print data;
#'(http.*?index\.m3u8.*)'
splitted = url.split('?')
splitte = splitted[1].split('=')
id=splitte[1].split('&cid')
id = id[0]
#id = unescape(id[0]).encode("utf-8")
#id = xpodd(id)
print "XPODD="+id
cid=splitte[2].split('&')
eid=splitte[3].split('&')
rid=splitte[4].split('&')
url = 'http://37.48.85.217:43911/ls/58/index.m3u8?c='+id+'&cid='+cid[0]+'&eid='+eid[0]
url='http://37.48.82.65:43911/ls/95/index.m3u8?c=500c4c9a8b7c345a15fe37e17bda7f2a0c673b920dd3ac41d54e6c23c642d241'+'&cid='+cid[0]+'&eid='+eid[0]
plugintools.play_resolved_url(url);sys.exit()
url = url.strip()
#plugintools.play_resolved_url(url);sys.exit();
print "URL="+url
print "REF="+ref
rtmplink="rtmp://37.48.85.217:43911/ls/58/"+strmr+" playpath="+plpath+" swfUrl="+swf+" live=true timeout=30 swfVfy=1 pageUrl="+unescape(ref).encode("utf-8")+" Conn=S:OK --live"
plugintools.play_resolved_url(rtmplink)
'''
request_headers=[]
request_headers.append(["Referer",ref])
print request_headers
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
print body
'''
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def xpodd(param):
#-- define variables
loc_3 = [0,0,0,0]
loc_4 = [0,0,0]
loc_2 = ''
#-- define hash parameters for decoding
dec = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
hash1 = ["Z", "v", "6", "W", "m", "y", "g", "X", "b", "o", "V", "d", "k", "t", "M", "Q", "u", "5", "D", "e", "J", "s", "z", "f", "L", "="]
hash2 = ["a", "G", "9", "w", "1", "N", "l", "T", "I", "R", "7", "2", "n", "B", "4", "H", "3", "U", "0", "p", "Y", "c", "i", "x", "8", "q"]
#-- decode
for i in range(0, len(hash1)):
re1 = hash1[i]
re2 = hash2[i]
param = param.replace(re1, '___')
param = param.replace(re2, re1)
param = param.replace('___', re2)
i = 0
while i < len(param):
j = 0
while j < 4 and i+j < len(param):
loc_3[j] = dec.find(param[i+j])
j = j + 1
loc_4[0] = (loc_3[0] << 2) + ((loc_3[1] & 48) >> 4);
loc_4[1] = ((loc_3[1] & 15) << 4) + ((loc_3[2] & 60) >> 2);
loc_4[2] = ((loc_3[2] & 3) << 6) + loc_3[3];
j = 0
while j < 3:
if loc_3[j + 1] == 64:
break
try:
loc_2 += unichr(loc_4[j])
except:
pass
j = j + 1
i = i + 4;
return loc_2
def find_multiple_matches_multi(text,pattern):
matches = re.findall(pattern,text, re.MULTILINE)
return matches
| gpl-2.0 | 7,560,572,108,504,564,000 | 41.251788 | 256 | 0.652468 | false |
jamslevy/gsoc | app/django/db/backends/sqlite3/base.py | 1 | 7964 | """
SQLite3 backend for django.
Python 2.3 and 2.4 require pysqlite2 (http://pysqlite.org/).
Python 2.5 and later use the sqlite3 module in the standard library.
"""
from django.db.backends import *
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
try:
try:
from sqlite3 import dbapi2 as Database
except ImportError, e1:
from pysqlite2 import dbapi2 as Database
except ImportError, exc:
import sys
from django.core.exceptions import ImproperlyConfigured
if sys.version_info < (2, 5, 0):
module = 'pysqlite2'
else:
module = 'sqlite3'
exc = e1
raise ImproperlyConfigured, "Error loading %s module: %s" % (module, exc)
try:
import decimal
except ImportError:
from django.utils import _decimal as decimal # for Python 2.3
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
Database.register_converter("bool", lambda s: str(s) == '1')
Database.register_converter("time", util.typecast_time)
Database.register_converter("date", util.typecast_date)
Database.register_converter("datetime", util.typecast_timestamp)
Database.register_converter("timestamp", util.typecast_timestamp)
Database.register_converter("TIMESTAMP", util.typecast_timestamp)
Database.register_converter("decimal", util.typecast_decimal)
Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
if Database.version_info >= (2,4,1):
# Starting in 2.4.1, the str type is not accepted anymore, therefore,
# we convert all str objects to Unicode
# As registering a adapter for a primitive type causes a small
# slow-down, this adapter is only registered for sqlite3 versions
# needing it.
Database.register_adapter(str, lambda s:s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_extract that's registered in connect().
return 'django_extract("%s", %s)' % (lookup_type.lower(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect().
return 'django_date_trunc("%s", %s)' % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return 'NULL'
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def year_lookup_bounds(self, value):
first = '%s-01-01'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
class DatabaseWrapper(BaseDatabaseWrapper):
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient()
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation()
def _cursor(self, settings):
if self.connection is None:
if not settings.DATABASE_NAME:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured, "Please fill out DATABASE_NAME in the settings module before using the database."
kwargs = {
'database': settings.DATABASE_NAME,
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(self.options)
self.connection = Database.connect(**kwargs)
# Register extract, date_trunc, and regexp functions.
self.connection.create_function("django_extract", 2, _sqlite_extract)
self.connection.create_function("django_date_trunc", 2, _sqlite_date_trunc)
self.connection.create_function("regexp", 2, _sqlite_regexp)
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
from django.conf import settings
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if settings.DATABASE_NAME != ":memory:":
BaseDatabaseWrapper.close(self)
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=()):
query = self.convert_query(query, len(params))
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
try:
query = self.convert_query(query, len(param_list[0]))
return Database.Cursor.executemany(self, query, param_list)
except (IndexError,TypeError):
# No parameter list provided
return None
def convert_query(self, query, num_params):
return query % tuple("?" * num_params)
def _sqlite_extract(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
return unicode(getattr(dt, lookup_type))
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
def _sqlite_regexp(re_pattern, re_string):
import re
try:
return bool(re.search(re_pattern, re_string))
except:
return False
| apache-2.0 | -8,789,656,336,015,074,000 | 37.84878 | 125 | 0.643395 | false |
aasiutin/electrum | gui/kivy/uix/qrcodewidget.py | 18 | 3411 | ''' Kivy Widget that accepts data and displays qrcode
'''
from threading import Thread
from functools import partial
import qrcode
from kivy.uix.floatlayout import FloatLayout
from kivy.graphics.texture import Texture
from kivy.properties import StringProperty
from kivy.properties import ObjectProperty, StringProperty, ListProperty,\
BooleanProperty
from kivy.lang import Builder
from kivy.clock import Clock
Builder.load_string('''
<QRCodeWidget>
canvas.before:
# Draw white Rectangle
Color:
rgba: root.background_color
Rectangle:
size: self.size
pos: self.pos
canvas.after:
Color:
rgba: root.foreground_color
Rectangle:
size: self.size
pos: self.pos
Image
id: qrimage
pos_hint: {'center_x': .5, 'center_y': .5}
allow_stretch: True
size_hint: None, None
size: root.width * .9, root.height * .9
''')
class QRCodeWidget(FloatLayout):
data = StringProperty(None, allow_none=True)
background_color = ListProperty((1, 1, 1, 1))
foreground_color = ListProperty((0, 0, 0, 0))
def __init__(self, **kwargs):
super(QRCodeWidget, self).__init__(**kwargs)
self.data = None
self.qr = None
self._qrtexture = None
def on_data(self, instance, value):
if not (self.canvas or value):
return
self.update_qr()
def set_data(self, data):
if self.data == data:
return
MinSize = 210 if len(data) < 128 else 500
self.setMinimumSize((MinSize, MinSize))
self.data = data
self.qr = None
def update_qr(self):
if not self.data and self.qr:
return
L = qrcode.constants.ERROR_CORRECT_L
data = self.data
self.qr = qr = qrcode.QRCode(
version=None,
error_correction=L,
box_size=10,
border=0,
)
qr.add_data(data)
qr.make(fit=True)
self.update_texture()
def setMinimumSize(self, size):
# currently unused, do we need this?
self._texture_size = size
def _create_texture(self, k):
self._qrtexture = texture = Texture.create(size=(k,k), colorfmt='rgb')
# don't interpolate texture
texture.min_filter = 'nearest'
texture.mag_filter = 'nearest'
def update_texture(self):
if not self.qr:
return
matrix = self.qr.get_matrix()
k = len(matrix)
# create the texture
self._create_texture(k)
buff = []
bext = buff.extend
cr, cg, cb, ca = self.background_color[:]
cr, cg, cb = cr*255, cg*255, cb*255
for r in range(k):
for c in range(k):
bext([0, 0, 0] if matrix[k-1-r][c] else [cr, cg, cb])
# then blit the buffer
buff = ''.join(map(chr, buff))
# update texture
self._upd_texture(buff)
def _upd_texture(self, buff):
texture = self._qrtexture
texture.blit_buffer(buff, colorfmt='rgb', bufferfmt='ubyte')
img = self.ids.qrimage
img.anim_delay = -1
img.texture = texture
img.canvas.ask_update()
if __name__ == '__main__':
from kivy.app import runTouchApp
import sys
data = str(sys.argv[1:])
runTouchApp(QRCodeWidget(data=data))
| mit | 8,204,450,151,380,347,000 | 26.288 | 78 | 0.574318 | false |
jd28/pynwn-tools | tlkie/tlkie.py | 1 | 1935 | #!/usr/bin/env python
import argparse, os, sys
from pynwn.file.tlk import Tlk
from pynwn.file.tls import TLS
from pynwn.util.helper import get_encoding
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', action='version', version='0.1')
parser.add_argument('-o', '--output', help='Output TLK or TLS file.')
parser.add_argument('-l', '--language', help='TLK language.', default=0)
parser.add_argument('file', help='TLK or TLS file.', nargs='+')
args = parser.parse_args()
def load_by_ext(f, ext):
if '.tlk' == ext:
return Tlk(open(f, 'rb'))
elif '.tls' == ext:
return TLS(f)
else:
raise ValueError("Tlkie can only process a TLK or TLS file.")
def save_by_ext(main, ext):
if '.tlk' == ext:
if isinstance(main, Tlk):
with open(args.output, 'wb') as f:
main.write(f)
elif isinstance(main, TLS):
with open(args.output, 'wb') as f:
main.write_tlk(f, args.language)
elif '.tls' == ext:
if isinstance(main, Tlk):
with open(args.output, 'w', encoding=get_encoding()) as f:
main.write_tls(f)
elif isinstance(main, TLS):
with open(args.output, 'w', encoding=get_encoding()) as f:
main.write(f)
if __name__ == "__main__":
basef = os.path.basename(args.output)
outext = os.path.splitext(basef)[1].lower()
if outext == '.tlk':
main = Tlk()
elif outext == '.tls':
main = TLS()
else:
raise ValueError("Tlkie can only output a TLK or TLS file.")
for f in args.file:
basef = os.path.basename(f)
ext = os.path.splitext(basef)[1]
tl = load_by_ext(f, ext.lower())
print("Adding: %s" % f)
if main is None:
main = tl
continue
main.inject(tl)
print("Saving output: %s" % args.output)
save_by_ext(main, outext)
| mit | -8,679,597,406,136,864,000 | 29.234375 | 72 | 0.567442 | false |
stevekuznetsov/Klampt | Python/utils/make_planar_rob.py | 4 | 1110 | import sys
n = int(sys.argv[1])
#scale = 1.0/n
scale = 1.0;
intro = """### A %dR planar robot ###
TParent 1 0 0 0 1 0 0 0 1 0 0 0 """ %(n,)
trans = """ \\\n1 0 0 0 1 0 0 0 1 %g 0 0"""%(scale,)
axis = "0 1 0"
jointtype = "r"
qMin = "0"
qMax = "6.28319"
q ="0"
geometry = ' "objects/thincube.tri"'
geomscale = "%g"%(max(scale,0.05))
mass = "1"
torqueMax = "1"
accMax = "1"
velMax = "1"
joint = "joint spin"
servoP = "50"
servoI = "100"
servoD = "3"
print intro,
for i in xrange(n-1):
print trans,
print
print "axis\t"+"\t".join([axis]*n)
print "jointtype\t"+" ".join([jointtype]*n)
print "qMin\t"+" ".join([qMin]*n)
print "qMax\t"+" ".join([qMax]*n)
print "q\t"+" ".join([q]*n)
print "geometry\t"+" ".join([geometry]*n)
print "geomscale\t"+" ".join([geomscale]*n)
print "mass\t"+" ".join([mass]*n)
print "automass"
print "torqueMax\t"+" ".join([torqueMax]*n)
print "accMax\t"+" ".join([accMax]*n)
print "velMax\t"+" ".join([velMax]*n)
for i in xrange(n):
print joint,i
print "servoP\t"+" ".join([servoP]*n)
print "servoI\t"+" ".join([servoI]*n)
print "servoD\t"+" ".join([servoD]*n)
| bsd-3-clause | -7,736,613,923,280,864,000 | 21.653061 | 60 | 0.577477 | false |
tacaswell/scikit-xray | skbeam/core/stats.py | 12 | 3985 | #! encoding: utf-8
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for statistics.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.stats
from skbeam.core.utils import _defaults # Dan is dubious about this.
import logging
logger = logging.getLogger(__name__)
def statistics_1D(x, y, stat='mean', nx=None, min_x=None, max_x=None):
"""
Bin the values in y based on their x-coordinates
Parameters
----------
x : array
position
y : array
intensity
stat: str or func, optional
statistic to be used on the binned values defaults to mean
see scipy.stats.binned_statistic
nx : integer, optional
number of bins to use defaults to default bin value
min_x : float, optional
Left edge of first bin defaults to minimum value of x
max_x : float, optional
Right edge of last bin defaults to maximum value of x
Returns
-------
edges : array
edges of bins, length nx + 1
val : array
statistics of values in each bin, length nx
"""
# handle default values
if min_x is None:
min_x = np.min(x)
if max_x is None:
max_x = np.max(x)
if nx is None:
nx = _defaults["bins"]
# use a weighted histogram to get the bin sum
bins = np.linspace(start=min_x, stop=max_x, num=nx+1, endpoint=True)
val, _, _ = scipy.stats.binned_statistic(x, y, statistic=stat, bins=bins)
# return the two arrays
return bins, val
| bsd-3-clause | 361,784,405,801,642,000 | 43.277778 | 77 | 0.561355 | false |
webuildcity/wbc | testing/settings.py | 1 | 1376 | from .local import *
INSTALLED_APPS = [
# django
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# other dependencies
'rest_framework',
'rest_framework_gis',
'widget_tweaks',
'markdown',
# we build city apps
'wbc.core',
'wbc.region',
'wbc.process',
'wbc.news',
'wbc.comments',
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'testing.urls'
WSGI_APPLICATION = 'wbc.wsgi.application'
LANGUAGE_CODE = 'de-de'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_FROM = '[email protected]'
FEED_TITLE = "Test Feed"
FEED_DESCRIPTION = "Test Feed Description"
| lgpl-3.0 | -3,804,258,089,250,883,000 | 24.962264 | 69 | 0.707849 | false |
davduran/thumbor | tests/handlers/test_base_handler.py | 1 | 42935 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from urllib import quote
import tempfile
import shutil
from os.path import abspath, join, dirname
import os
from datetime import datetime, timedelta
import pytz
import subprocess
from json import loads
import tornado.web
from preggy import expect
from mock import Mock, patch
import unittest
from thumbor.config import Config
from thumbor.importer import Importer
from thumbor.context import Context, ServerParameters, RequestParameters
from thumbor.handlers import FetchResult, BaseHandler
from thumbor.storages.file_storage import Storage as FileStorage
from thumbor.storages.no_storage import Storage as NoStorage
from thumbor.utils import which
from tests.base import TestCase, PythonTestCase, normalize_unicode_path
from thumbor.engines.pil import Engine
from libthumbor import CryptoURL
from tests.fixtures.images import (
default_image,
alabama1,
space_image,
invalid_quantization,
animated_image,
not_so_animated_image,
)
class FetchResultTestCase(PythonTestCase):
def test_can_create_default_fetch_result(self):
result = FetchResult()
expect(result.normalized).to_be_false()
expect(result.buffer).to_be_null()
expect(result.engine).to_be_null()
expect(result.successful).to_be_false()
expect(result.loader_error).to_be_null()
def test_can_create_fetch_result(self):
buffer_mock = Mock()
engine_mock = Mock()
error_mock = Mock()
result = FetchResult(
normalized=True,
buffer=buffer_mock,
engine=engine_mock,
successful=True,
loader_error=error_mock,
)
expect(result.normalized).to_be_true()
expect(result.buffer).to_equal(buffer_mock)
expect(result.engine).to_equal(engine_mock)
expect(result.successful).to_be_true()
expect(result.loader_error).to_equal(error_mock)
class ErrorHandler(BaseHandler):
def get(self):
self._error(403)
class BaseHandlerTestApp(tornado.web.Application):
def __init__(self, context):
self.context = context
super(BaseHandlerTestApp, self).__init__([
(r'/error', ErrorHandler),
])
class BaseImagingTestCase(TestCase):
@classmethod
def setUpClass(cls, *args, **kw):
cls.root_path = tempfile.mkdtemp()
cls.loader_path = abspath(join(dirname(__file__), '../fixtures/images/'))
cls.base_uri = "/image"
@classmethod
def tearDownClass(cls, *args, **kw):
shutil.rmtree(cls.root_path)
class ImagingOperationsWithHttpLoaderTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.http_loader"
cfg.STORAGE = "thumbor.storages.file_storage"
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_image_already_generated_by_thumbor(self):
with open('./tests/fixtures/images/image.jpg', 'r') as f:
self.context.modules.storage.put(
quote("http://test.com/smart/image.jpg"),
f.read()
)
crypto = CryptoURL('ACME-SEC')
image_url = self.get_url(
crypto.generate(
image_url=quote("http://test.com/smart/image.jpg")
)
)
url = crypto.generate(
image_url=quote(image_url)
)
response = self.fetch(url)
expect(response.code).to_equal(200)
def test_image_already_generated_by_thumbor_2_times(self):
with open(
normalize_unicode_path(u'./tests/fixtures/images/alabama1_ap620é.jpg'), 'r'
) as f:
self.context.modules.storage.put(
quote("http://test.com/smart/alabama1_ap620é"),
f.read()
)
crypto = CryptoURL('ACME-SEC')
image_url = self.get_url(
crypto.generate(
image_url=quote(self.get_url(
crypto.generate(
image_url=quote("http://test.com/smart/alabama1_ap620é")
)
))
)
)
url = crypto.generate(
image_url=quote(image_url)
)
response = self.fetch(url)
expect(response.code).to_equal(200)
def test_image_with_utf8_url(self):
with open('./tests/fixtures/images/maracujá.jpg', 'r') as f:
self.context.modules.storage.put(
quote(u"http://test.com/maracujá.jpg".encode('utf-8')),
f.read()
)
crypto = CryptoURL('ACME-SEC')
image_url = self.get_url(
quote(u"/unsafe/http://test.com/maracujá.jpg".encode('utf-8'))
)
url = crypto.generate(
image_url=quote(image_url)
)
response = self.fetch(url)
expect(response.code).to_equal(200)
def test_image_with_http_utf8_url(self):
with open('./tests/fixtures/images/maracujá.jpg', 'r') as f:
self.context.modules.storage.put(
quote(u"http://test.com/maracujá.jpg".encode('utf-8')),
f.read()
)
url = quote(u"/unsafe/http://test.com/maracujá.jpg".encode('utf-8'))
response = self.fetch(url)
expect(response.code).to_equal(200)
class ImagingOperationsTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.file_storage"
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
cfg.QUALITY = 'keep'
cfg.SVG_DPI = 200
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_can_get_image(self):
response = self.fetch('/unsafe/smart/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_can_get_image_without_extension(self):
response = self.fetch('/unsafe/smart/image')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_get_unknown_image_returns_not_found(self):
response = self.fetch('/unsafe/smart/imag')
expect(response.code).to_equal(404)
def test_can_get_unicode_image(self):
response = self.fetch(u'/unsafe/%s' % quote(u'15967251_212831_19242645_АгатавЗоопарке.jpg'.encode('utf-8')))
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_can_get_signed_regular_image(self):
response = self.fetch('/_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_url_without_unsafe_or_hash_fails(self):
response = self.fetch('/alabama1_ap620%C3%A9.jpg')
expect(response.code).to_equal(400)
def test_url_without_image(self):
response = self.fetch('/unsafe/')
expect(response.code).to_equal(400)
def test_utf8_encoded_image_name_with_encoded_url(self):
url = '/lc6e3kkm_2Ww7NWho8HPOe-sqLU=/smart/alabama1_ap620%C3%A9.jpg'
response = self.fetch(url)
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(alabama1())
def test_url_with_encoded_hash(self):
url = '/%D1%80=/alabama1_ap620%C3%A9.jpg'
response = self.fetch(url)
expect(response.code).to_equal(400)
def test_image_with_spaces_on_url(self):
response = self.fetch(u'/unsafe/image%20space.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(space_image())
def test_can_get_image_with_filter(self):
response = self.fetch('/5YRxzS2yxZxj9SZ50SoZ11eIdDI=/filters:fill(blue)/image.jpg')
expect(response.code).to_equal(200)
def test_can_get_image_with_invalid_quantization_table(self):
response = self.fetch('/unsafe/invalid_quantization.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(invalid_quantization())
def test_getting_invalid_image_returns_bad_request(self):
response = self.fetch('/unsafe/image_invalid.jpg')
expect(response.code).to_equal(400)
def test_can_read_monochromatic_jpeg(self):
response = self.fetch('/unsafe/grayscale.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_jpeg()
def test_can_read_image_with_small_width_and_no_height(self):
response = self.fetch('/unsafe/0x0:1681x596/1x/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_jpeg()
def test_can_read_cmyk_jpeg(self):
response = self.fetch('/unsafe/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_jpeg()
def test_can_read_cmyk_jpeg_as_png(self):
response = self.fetch('/unsafe/filters:format(png)/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
def test_can_read_image_svg_with_px_units_and_convert_png(self):
response = self.fetch('/unsafe/Commons-logo.svg')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
engine = Engine(self.context)
engine.load(response.body, '.png')
expect(engine.size).to_equal((1024, 1376))
def test_can_read_image_svg_with_inch_units_and_convert_png(self):
response = self.fetch('/unsafe/Commons-logo-inches.svg')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
engine = Engine(self.context)
engine.load(response.body, '.png')
expect(engine.size).to_equal((2000, 2600))
def test_can_read_8bit_tiff_as_png(self):
response = self.fetch('/unsafe/gradient_8bit.tif')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
def test_can_read_16bit_lsb_tiff_as_png(self):
response = self.fetch('/unsafe/gradient_lsb_16bperchannel.tif')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
def test_can_read_16bit_msb_tiff_as_png(self):
response = self.fetch('/unsafe/gradient_msb_16bperchannel.tif')
expect(response.code).to_equal(200)
expect(response.body).to_be_png()
class ImageOperationsWithoutUnsafeTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.ALLOW_UNSAFE_URL = False
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8890, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_can_get_image_with_signed_url(self):
response = self.fetch('/_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_getting_unsafe_image_fails(self):
response = self.fetch('/unsafe/smart/image.jpg')
expect(response.code).to_equal(400)
class ImageOperationsWithStoredKeysTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='MYKEY')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.ALLOW_UNSAFE_URL = False
cfg.ALLOW_OLD_URLS = True
cfg.STORES_CRYPTO_KEY_FOR_EACH_IMAGE = True
cfg.STORAGE = 'thumbor.storages.file_storage'
cfg.STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8891, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'MYKEY'
return Context(server, cfg, importer)
def test_stored_security_key_with_regular_image(self):
storage = self.context.modules.storage
self.context.server.security_key = 'MYKEY'
storage.put_crypto('image.jpg') # Write a file on the file storage containing the security key
self.context.server.security_key = 'MYKEY2'
try:
response = self.fetch('/nty7gpBIRJ3GWtYDLLw6q1PgqTo=/smart/image.jpg')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
finally:
self.context.server.security_key = 'MYKEY'
def test_stored_security_key_with_regular_image_with_querystring(self):
storage = self.context.modules.storage
self.context.server.security_key = 'MYKEY'
storage.put_crypto('image.jpg%3Fts%3D1') # Write a file on the file storage containing the security key
self.context.server.security_key = 'MYKEY2'
response = self.fetch('/Iw7LZGdr-hHj2gQ4ZzksP3llQHY=/smart/image.jpg%3Fts%3D1')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
def test_stored_security_key_with_regular_image_with_hash(self):
storage = self.context.modules.storage
self.context.server.security_key = 'MYKEY'
storage.put_crypto('image.jpg%23something') # Write a file on the file storage containing the security key
self.context.server.security_key = 'MYKEY2'
response = self.fetch('/fxOHtHcTZMyuAQ1YPKh9KWg7nO8=/smart/image.jpg%23something')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(default_image())
class ImageOperationsWithAutoWebPTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def get_as_webp(self, url):
return self.fetch(url, headers={
"Accept": 'image/webp,*/*;q=0.8'
})
def test_can_auto_convert_jpeg(self):
response = self.get_as_webp('/unsafe/image.jpg')
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
def test_should_bad_request_if_bigger_than_75_megapixels(self):
response = self.get_as_webp('/unsafe/16384x16384.png')
expect(response.code).to_equal(400)
def test_should_bad_request_if_bigger_than_75_megapixels_jpeg(self):
response = self.get_as_webp('/unsafe/9643x10328.jpg')
expect(response.code).to_equal(400)
def test_should_not_convert_animated_gifs_to_webp(self):
response = self.get_as_webp('/unsafe/animated.gif')
expect(response.code).to_equal(200)
expect(response.headers).not_to_include('Vary')
expect(response.body).to_be_gif()
def test_should_convert_image_with_small_width_and_no_height(self):
response = self.get_as_webp('/unsafe/0x0:1681x596/1x/image.jpg')
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
def test_should_convert_monochromatic_jpeg(self):
response = self.get_as_webp('/unsafe/grayscale.jpg')
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
def test_should_convert_cmyk_jpeg(self):
response = self.get_as_webp('/unsafe/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
def test_shouldnt_convert_cmyk_jpeg_if_format_specified(self):
response = self.get_as_webp('/unsafe/filters:format(png)/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.headers).not_to_include('Vary')
expect(response.body).to_be_png()
def test_shouldnt_convert_cmyk_jpeg_if_gif(self):
response = self.get_as_webp('/unsafe/filters:format(gif)/cmyk.jpg')
expect(response.code).to_equal(200)
expect(response.headers).not_to_include('Vary')
expect(response.body).to_be_gif()
def test_shouldnt_convert_if_format_specified(self):
response = self.get_as_webp('/unsafe/filters:format(gif)/image.jpg')
expect(response.code).to_equal(200)
expect(response.headers).not_to_include('Vary')
expect(response.body).to_be_gif()
def test_shouldnt_add_vary_if_format_specified(self):
response = self.get_as_webp('/unsafe/filters:format(webp)/image.jpg')
expect(response.code).to_equal(200)
expect(response.headers).not_to_include('Vary')
expect(response.body).to_be_webp()
def test_should_add_vary_if_format_invalid(self):
response = self.get_as_webp('/unsafe/filters:format(asdf)/image.jpg')
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
def test_converting_return_etags(self):
response = self.get_as_webp('/unsafe/image.jpg')
expect(response.headers).to_include('Etag')
class ImageOperationsWithAutoWebPWithResultStorageTestCase(BaseImagingTestCase):
def get_request(self, *args, **kwargs):
return RequestParameters(*args, **kwargs)
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.request = self.get_request()
ctx.server.gifsicle_path = which('gifsicle')
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
def get_as_webp(self, url):
return self.fetch(url, headers={
"Accept": 'image/webp,*/*;q=0.8'
})
@patch('thumbor.handlers.Context')
def test_can_auto_convert_jpeg_from_result_storage(self, context_mock):
context_mock.return_value = self.context
crypto = CryptoURL('ACME-SEC')
url = crypto.generate(image_url=quote("http://test.com/smart/image.jpg"))
self.context.request = self.get_request(url=url, accepts_webp=True)
with open('./tests/fixtures/images/image.webp', 'r') as f:
self.context.modules.result_storage.put(f.read())
response = self.get_as_webp(url)
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
@patch('thumbor.handlers.Context')
def test_can_auto_convert_unsafe_jpeg_from_result_storage(self, context_mock):
context_mock.return_value = self.context
self.context.request = self.get_request(accepts_webp=True)
response = self.get_as_webp('/unsafe/image.jpg')
expect(response.code).to_equal(200)
expect(response.headers).to_include('Vary')
expect(response.headers['Vary']).to_include('Accept')
expect(response.body).to_be_webp()
expect(self.context.request.engine.extension).to_equal('.webp')
class ImageOperationsWithoutEtagsTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.ENABLE_ETAGS = False
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_can_get_image_without_etags(self):
response = self.fetch('/unsafe/image.jpg', headers={
"Accept": 'image/webp,*/*;q=0.8'
})
expect(response.code).to_equal(200)
expect(response.headers).not_to_include('Etag')
class ImageOperationsWithLastModifiedTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.SEND_IF_MODIFIED_LAST_MODIFIED_HEADERS = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
@property
def result_storage(self):
return self.context.modules.result_storage
def write_image(self):
expected_path = self.result_storage.normalize_path('_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg')
if not os.path.exists(dirname(expected_path)):
os.makedirs(dirname(expected_path))
if not os.path.exists(expected_path):
with open(expected_path, 'w') as img:
img.write(default_image())
def test_can_get_304_with_last_modified(self):
self.write_image()
response = self.fetch(
'/_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg',
headers={
"Accept": 'image/webp,*/*;q=0.8',
"If-Modified-Since":
(datetime.utcnow() + timedelta(seconds=1))
.replace(tzinfo=pytz.utc).strftime("%a, %d %b %Y %H:%M:%S GMT"), # NOW +1 sec UTC
})
expect(response.code).to_equal(304)
def test_can_get_image_with_last_modified(self):
self.write_image()
response = self.fetch(
'/_wIUeSaeHw8dricKG2MGhqu5thk=/smart/image.jpg',
headers={
"Accept": 'image/webp,*/*;q=0.8',
"If-Modified-Since": (datetime.utcnow() - timedelta(days=365))
.replace(tzinfo=pytz.utc).strftime("%a, %d %b %Y %H:%M:%S GMT"), # Last Year
}
)
expect(response.code).to_equal(200)
expect(response.headers).to_include('Last-Modified')
class ImageOperationsWithGifVTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.FFMPEG_PATH = which('ffmpeg')
cfg.OPTIMIZERS = [
'thumbor.optimizers.gifv',
]
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def test_should_convert_animated_gif_to_mp4_when_filter_without_params(self):
response = self.fetch('/unsafe/filters:gifv()/animated.gif')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('video/mp4')
def test_should_convert_animated_gif_to_webm_when_filter_with_gifv_webm_param(self):
response = self.fetch('/unsafe/filters:gifv(webm)/animated.gif')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('video/webm')
class ImageOperationsImageCoverTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.AUTO_WEBP = True
cfg.USE_GIFSICLE_ENGINE = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def test_can_get_image_cover(self):
response = self.fetch('/unsafe/filters:cover()/animated.gif')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('image/gif')
class ImageOperationsWithResultStorageTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.USE_GIFSICLE_ENGINE = True
cfg.FFMPEG_PATH = which('ffmpeg')
cfg.AUTO_WEBP = True
cfg.OPTIMIZERS = [
'thumbor.optimizers.gifv',
]
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
@patch('tornado.ioloop.IOLoop.instance')
def test_saves_image_to_result_storage(self, instance_mock):
instance_mock.return_value = self.io_loop
response = self.fetch('/gTr2Xr9lbzIa2CT_dL_O0GByeR0=/animated.gif')
expect(response.code).to_equal(200)
self.context.request = Mock(
accepts_webp=False,
)
expected_path = self.result_storage.normalize_path('gTr2Xr9lbzIa2CT_dL_O0GByeR0=/animated.gif')
expect(expected_path).to_exist()
expect(response.body).to_be_similar_to(animated_image())
class ImageOperationsResultStorageOnlyTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = '/tmp/path/that/does/not/exist'
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.FFMPEG_PATH = which('ffmpeg')
cfg.USE_GIFSICLE_ENGINE = True
cfg.AUTO_WEBP = True
cfg.OPTIMIZERS = [
'thumbor.optimizers.gifv',
]
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
def test_loads_image_from_result_storage(self):
self.context.request = Mock(
accepts_webp=False,
)
expected_path = self.result_storage.normalize_path('gTr2Xr9lbzIa2CT_dL_O0GByeR0=/animated.gif')
os.makedirs(dirname(expected_path))
with open(expected_path, 'w') as img:
img.write(animated_image())
response = self.fetch('/gTr2Xr9lbzIa2CT_dL_O0GByeR0=/animated.gif')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(animated_image())
class ImageOperationsWithGifWithoutGifsicle(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
return ctx
def test_should_be_ok_with_single_frame_gif(self):
response = self.fetch('/5Xr8gyuWE7jL_VB72K0wvzTMm2U=/animated-one-frame.gif')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('image/gif')
expect(response.body).to_be_similar_to(not_so_animated_image())
class ImageOperationsWithGifWithoutGifsicleOnResultStorage(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = '/tmp/path/that/does/not/exist'
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
return ctx
@property
def result_storage(self):
return self.context.modules.result_storage
def test_loads_image_from_result_storage(self):
self.context.request = Mock(
accepts_webp=False,
)
expected_path = self.result_storage.normalize_path('5Xr8gyuWE7jL_VB72K0wvzTMm2U=/animated-one-frame.gif')
os.makedirs(dirname(expected_path))
with open(expected_path, 'w') as img:
img.write(not_so_animated_image())
response = self.fetch('/5Xr8gyuWE7jL_VB72K0wvzTMm2U=/animated-one-frame.gif')
expect(response.code).to_equal(200)
expect(response.body).to_be_similar_to(not_so_animated_image())
class ImageOperationsWithMaxWidthAndMaxHeight(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.RESULT_STORAGE = 'thumbor.result_storages.file_storage'
cfg.RESULT_STORAGE_EXPIRATION_SECONDS = 60
cfg.RESULT_STORAGE_FILE_STORAGE_ROOT_PATH = self.root_path
cfg.AUTO_WEBP = True
cfg.MAX_WIDTH = 150
cfg.MAX_HEIGHT = 150
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
return ctx
def test_should_be_ok_but_150x150(self):
response = self.fetch('/unsafe/200x200/grayscale.jpg')
engine = Engine(self.context)
engine.load(response.body, '.jpg')
expect(response.code).to_equal(200)
expect(response.headers['Content-Type']).to_equal('image/jpeg')
expect(engine.size).to_equal((150, 150))
class ImageOperationsWithMaxPixels(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.MAX_PIXELS = 1000
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def test_should_error(self):
response = self.fetch('/unsafe/200x200/grayscale.jpg')
expect(response.code).to_equal(400)
class ImageOperationsWithRespectOrientation(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.RESPECT_ORIENTATION = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
self.context = Context(server, cfg, importer)
self.context.server.gifsicle_path = which('gifsicle')
return self.context
def test_should_be_ok_when_orientation_exif(self):
response = self.fetch('/unsafe/10_years_of_Wikipedia_by_Guillaume_Paumier.jpg')
expect(response.code).to_equal(200)
engine = Engine(self.context)
engine.load(response.body, '.jpg')
expect(engine.size).to_equal((4052, 3456))
def test_should_be_ok_without_orientation_exif(self):
response = self.fetch('/unsafe/20x20.jpg')
expect(response.code).to_equal(200)
engine = Engine(self.context)
engine.load(response.body, '.jpg')
expect(engine.size).to_equal((20, 20))
class EngineLoadException(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.FILTERS = []
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
@unittest.skip("For some strange reason, this test breaks on Travis.")
def test_should_error_on_engine_load_exception(self):
with patch.object(Engine, 'load', side_effect=ValueError):
response = self.fetch('/unsafe/image.jpg')
expect(response.code).to_equal(504)
def test_should_release_ioloop_on_error_on_engine_exception(self):
response = self.fetch('/unsafe/fit-in/134x134/940x2.png')
expect(response.code).to_equal(200)
def test_should_exec_other_operations_on_error_on_engine_exception(self):
response = self.fetch('/unsafe/fit-in/134x134/filters:equalize()/940x2.png')
expect(response.code).to_equal(200)
class StorageOverride(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.file_storage"
cfg.FILE_STORAGE_ROOT_PATH = self.root_path
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
return Context(server, cfg, importer)
def test_shouldnt_call_put_when_storage_overridden_to_nostorage(self):
old_load = Engine.load
old_put = FileStorage.put
def load_override(self, foo, bar):
self.context.modules.storage = NoStorage(None)
return old_load(self, foo, bar)
def put_override(self, path, contents):
expect.not_to_be_here()
Engine.load = load_override
FileStorage.put = put_override
response = self.fetch('/unsafe/image.jpg')
Engine.load = old_load
FileStorage.put = old_put
expect(response.code).to_equal(200)
class ImageOperationsWithJpegtranTestCase(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.JPEGTRAN_PATH = which('jpegtran')
cfg.PROGRESSIVE_JPEG = True,
cfg.RESULT_STORAGE_STORES_UNSAFE = True,
cfg.OPTIMIZERS = [
'thumbor.optimizers.jpegtran',
]
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
return ctx
def test_should_optimize_jpeg(self):
response = self.fetch('/unsafe/200x200/image.jpg')
tmp_fd, tmp_file_path = tempfile.mkstemp(suffix='.jpg')
f = os.fdopen(tmp_fd, 'w')
f.write(response.body)
f.close()
exiftool = which('exiftool')
if not exiftool:
raise AssertionError('exiftool was not found. Please install it to run thumbor\'s tests.')
command = [
exiftool,
tmp_file_path,
'-DeviceModel',
'-EncodingProcess'
]
try:
with open(os.devnull) as null:
output = subprocess.check_output(command, stdin=null)
expect(response.code).to_equal(200)
expect(output).to_equal('Encoding Process : Progressive DCT, Huffman coding\n')
finally:
os.remove(tmp_file_path)
def test_with_meta(self):
response = self.fetch('/unsafe/meta/800x400/image.jpg')
expect(response.code).to_equal(200)
def test_with_meta_cached(self):
self.fetch('/unsafe/meta/800x400/image.jpg')
response = self.fetch('/unsafe/meta/800x400/image.jpg')
expect(response.code).to_equal(200)
class ImageOperationsWithoutStorage(BaseImagingTestCase):
def get_context(self):
cfg = Config(SECURITY_KEY='ACME-SEC')
cfg.LOADER = "thumbor.loaders.file_loader"
cfg.FILE_LOADER_ROOT_PATH = self.loader_path
cfg.STORAGE = "thumbor.storages.no_storage"
cfg.AUTO_WEBP = True
cfg.USE_GIFSICLE_ENGINE = True
importer = Importer(cfg)
importer.import_modules()
server = ServerParameters(8889, 'localhost', 'thumbor.conf', None, 'info', None)
server.security_key = 'ACME-SEC'
ctx = Context(server, cfg, importer)
ctx.server.gifsicle_path = which('gifsicle')
return ctx
def test_meta(self):
response = self.fetch('/unsafe/meta/800x400/image.jpg')
expect(response.code).to_equal(200)
def test_meta_with_unicode(self):
response = self.fetch('/unsafe/meta/200x300/alabama1_ap620%C3%A9.jpg')
expect(response.code).to_equal(200)
obj = loads(response.body)
expect(obj['thumbor']['target']['width']).to_equal(200)
expect(obj['thumbor']['target']['height']).to_equal(300)
def test_meta_frame_count(self):
response = self.fetch('/unsafe/meta/800x400/image.jpg')
expect(response.code).to_equal(200)
obj = loads(response.body)
expect(obj['thumbor']['source']['frameCount']).to_equal(1)
def test_meta_frame_count_with_gif(self):
response = self.fetch('/unsafe/meta/animated.gif')
expect(response.code).to_equal(200)
obj = loads(response.body)
expect(obj['thumbor']['source']['frameCount']).to_equal(2)
def test_max_bytes(self):
response = self.fetch('/unsafe/filters:max_bytes(35000)/Giunchedi%2C_Filippo_January_2015_01.jpg')
expect(response.code).to_equal(200)
expect(len(response.body)).to_be_lesser_or_equal_to(35000)
def test_max_bytes_impossible(self):
response = self.fetch('/unsafe/filters:max_bytes(1000)/Giunchedi%2C_Filippo_January_2015_01.jpg')
expect(response.code).to_equal(200)
expect(len(response.body)).to_be_greater_than(1000)
class TranslateCoordinatesTestCase(TestCase):
def setUp(self, *args, **kwargs):
super(TranslateCoordinatesTestCase, self).setUp(*args, **kwargs)
coords = self.get_coords()
self.translate_crop_coordinates = BaseHandler.translate_crop_coordinates(
original_width=coords['original_width'],
original_height=coords['original_height'],
width=coords['width'],
height=coords['height'],
crop_left=coords['crop_left'],
crop_top=coords['crop_top'],
crop_right=coords['crop_right'],
crop_bottom=coords['crop_bottom']
)
def get_coords(self):
return {
'original_width': 3000,
'original_height': 2000,
'width': 1200,
'height': 800,
'crop_left': 100,
'crop_top': 100,
'crop_right': 200,
'crop_bottom': 200,
'expected_crop': (40, 40, 80, 80)
}
def test_should_be_a_list_of_coords(self):
expect(self.translate_crop_coordinates).to_be_instance_of(tuple)
def test_should_translate_from_original_to_resized(self):
expect(self.translate_crop_coordinates).to_equal(self.get_coords()['expected_crop'])
| mit | -8,804,031,900,127,129,000 | 37.076309 | 116 | 0.638609 | false |
karthikvadla16/spark-tk | python/sparktk/frame/ops/tally_percent.py | 14 | 2413 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def tally_percent(self, sample_col, count_val):
"""
Compute a cumulative percent count.
Parameters
----------
:param sample_col: (str) The name of the column from which to compute the cumulative sum.
:param count_val: (str) The column value to be used for the counts.
A cumulative percent count is computed by sequentially stepping through
the rows, observing the column values and keeping track of the percentage of the
total number of times the specified *count_value* has been seen up to
the current value.
Examples
--------
Consider Frame *my_frame*, which accesses a frame that contains a single
column named *obs*:
<hide>
>>> my_frame = tc.frame.create([[0],[1],[2],[0],[1],[2]], [("obs", int)])
-etc-
</hide>
>>> my_frame.inspect()
[#] obs
========
[0] 0
[1] 1
[2] 2
[3] 0
[4] 1
[5] 2
The cumulative percent count for column *obs* is obtained by:
>>> my_frame.tally_percent("obs", "1")
<progress>
The Frame *my_frame* accesses the original frame that now contains two
columns, *obs* that contains the original column values, and
*obsCumulativePercentCount* that contains the cumulative percent count:
>>> my_frame.inspect()
[#] obs obs_tally_percent
===========================
[0] 0 0.0
[1] 1 0.5
[2] 2 0.5
[3] 0 0.5
[4] 1 1.0
[5] 2 1.0
"""
self._scala.tallyPercent(sample_col, count_val) | apache-2.0 | -2,300,180,724,566,109,000 | 31 | 93 | 0.578158 | false |
michael-lazar/praw3 | tests/test_decorators.py | 1 | 5974 | from __future__ import print_function, unicode_literals
from .helper import OAuthPRAWTest, betamax
from .mock_response import MockResponse
from praw import errors
from praw.decorator_helpers import _make_func_args
from praw.decorators import restrict_access
from praw.internal import _modify_relationship
from six import text_type
class DecoratorTest(OAuthPRAWTest):
def test_require_access_failure(self):
self.assertRaises(TypeError, restrict_access, scope=None,
oauth_only=True)
def test_make_func_args(self):
def foo(arg1, arg2, arg3):
pass
def bar(arg1, arg2, arg3, *args, **kwargs):
pass
arglist = ['arg1', 'arg2', 'arg3']
self.assertEqual(_make_func_args(foo), arglist)
self.assertEqual(_make_func_args(bar), arglist)
def test_restrict_access_permission_errors(self):
# PRAW doesn't currently use _modify_relationship for friending but
# this same check might be needed in the future, so, lets use this to
# our advantage, temporarily bind a custom unfriend function, ensure
# the proper error is raised, and then, unbind this function.
redditor = self.r.get_redditor(self.un)
redditor.temp_make_friend = _modify_relationship('friend')
self.assertRaises(errors.LoginRequired, redditor.temp_make_friend,
thing=None, user=self.other_user_name)
del redditor.temp_make_friend
# PRAW doesn't currently use restrict_access for mod duties without
# setting a scope but this might be needed in the future, so, lets use
# _modify_relationship to our advantage, temporarily bind a custom
# nonsense function, ensure the proper error is raised, and then,
# unbind this function. This can also be used to detect the
# ModeratorRequired exception from restrict_access as PRAW doesn't have
# any functions that would ordinarily end in this outcome, as all
# moderator reddit endpoints are oauth compatible.
subreddit = self.r.get_subreddit(self.sr)
type(subreddit).temp_nonsense = _modify_relationship('nonsense')
self.assertRaises(errors.LoginRequired,
subreddit.temp_nonsense, user=self.un)
self.r.login(self.other_non_mod_name, self.other_non_mod_pswd,
disable_warning=True)
subreddit = self.r.get_subreddit(self.sr)
self.assertRaises(errors.ModeratorRequired,
subreddit.temp_nonsense, user=self.un)
del type(subreddit).temp_nonsense
# PRAW doesn't currently have a method in which the subreddit
# is taken from function defaults, so, let's write one instead
@restrict_access(mod=True, scope=None)
def fake_func(obj, **kwargs):
return None
type(self.r).fake_func = fake_func
self.assertRaises(errors.ModeratorRequired, self.r.fake_func)
del type(self.r).fake_func
@betamax()
def test_error_list(self):
# use the other account to get a InvalidCaptcha
self.r.refresh_access_information(self.other_refresh_token['submit'])
# implicitly tests the RateLimitExceeded Exception as well
err_list = self.assertExceptionList(
[errors.InvalidCaptcha, errors.RateLimitExceeded], self.r.submit,
self.sr, "test ratelimit error 1", 'ratelimit error test call 1')
captcha_err, ratelimit_err = err_list.errors
self.assertEqual('`{0}` on field `{1}`'.format(captcha_err.message,
captcha_err.field),
str(captcha_err))
self.assertEqual('`{0}` on field `{1}`'.format(ratelimit_err.message,
ratelimit_err.field),
str(ratelimit_err))
expected_list_str = '\n' + "".join(
'\tError {0}) {1}\n'.format(i, text_type(error))
for i, error in enumerate(err_list.errors))
self.assertEqual(expected_list_str, str(err_list))
@betamax()
def test_limit_chars(self):
self.r.refresh_access_information(self.refresh_token['read'])
submission = self.r.get_submission(
submission_id=self.submission_limit_chars_id)
before_limit = text_type(
'{0} :: {1}').format(
submission.score,
submission.title.replace('\r\n', ' '))
expected = before_limit[:self.r.config.output_chars_limit - 3]
expected += '...'
self.assertEqual(str(submission), expected)
self.assertLess(str(submission), before_limit)
@betamax()
def test_raise_nonspecific_apiexception(self):
self.r.refresh_access_information(self.refresh_token['submit'])
err = self.assertRaisesAndReturn(errors.APIException,
self.r.submit, self.sr,
"".join("0" for i in range(301)),
"BODY")
self.assertEqual('({0}) `{1}` on field `{2}`'.format(err.error_type,
err.message,
err.field),
str(err))
@betamax(pass_recorder=True)
def test_raise_not_modified(self, recorder):
self.r.refresh_access_information(self.refresh_token['read'])
with MockResponse.as_context(
recorder.current_cassette.interactions[-1], status_code=304,
reason="Not Modified", json={'error': 304},
headers={"Content-Length": 1}):
err = self.assertRaisesAndReturn(
errors.NotModified, list, self.r.get_subreddit(
self.sr).get_new(limit=25))
self.assertEqual(str(err), 'That page has not been modified.')
| gpl-3.0 | 8,041,067,499,241,472,000 | 47.177419 | 79 | 0.600268 | false |
tfroehlich82/EventGhost | eg/Classes/PluginItem.py | 2 | 6460 | # -*- coding: utf-8 -*-
#
# This file is part of EventGhost.
# Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/>
#
# EventGhost is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option)
# any later version.
#
# EventGhost is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with EventGhost. If not, see <http://www.gnu.org/licenses/>.
import base64
import pickle
import wx
# Local imports
import eg
from ActionItem import ActionItem
from TreeItem import TreeItem
class PluginItem(ActionItem):
xmlTag = "Plugin"
icon = eg.Icons.PLUGIN_ICON
isRenameable = False
info = None
@eg.AssertInActionThread
def __init__(self, parent, node):
TreeItem.__init__(self, parent, node)
if node.text:
try:
args = pickle.loads(base64.b64decode(node.text))
except AttributeError:
args = ()
else:
args = ()
evalName = node.attrib.get('identifier', None)
self.pluginName = node.attrib.get('file', None)
guid = node.attrib.get('guid', self.pluginName)
self.info = info = eg.pluginManager.OpenPlugin(
guid,
evalName,
args,
self,
)
self.name = eg.text.General.pluginLabel % info.label
if info.icon != self.icon:
self.icon = eg.Icons.PluginSubIcon(info.icon)
#self.icon = info.icon
self.url = info.url
self.executable = info.instance
def AskCut(self):
return self.AskDelete()
def AskDelete(self):
actionItemCls = self.document.ActionItem
def SearchFunc(obj):
if obj.__class__ == actionItemCls:
if obj.executable and obj.executable.plugin == self.executable:
return True
return None
if self.root.Traverse(SearchFunc) is not None:
eg.MessageBox(
eg.text.General.deletePlugin,
eg.APP_NAME,
wx.NO_DEFAULT | wx.OK | wx.ICON_EXCLAMATION
)
return False
if not TreeItem.AskDelete(self):
return False
return True
@eg.AssertInActionThread
def Delete(self):
info = self.info
def DoIt():
info.Close()
info.instance.OnDelete()
info.RemovePluginInstance()
eg.actionThread.Call(DoIt)
ActionItem.Delete(self)
self.executable = None
self.info = None
@eg.AssertInActionThread
def Execute(self):
if not self.isEnabled:
return None, None
if eg.config.logActions:
self.Print(self.name)
if self.shouldSelectOnExecute:
wx.CallAfter(self.Select)
eg.indent += 1
self.info.Start()
eg.indent -= 1
eg.result = self.executable
return None, None
# The Find function calls this from MainThread, so we can't restrict this
# to the ActionThread
#@eg.AssertInActionThread
def GetArguments(self):
return self.info.args
def GetBasePath(self):
"""
Returns the filesystem path, where additional files (like pictures)
should be found.
Overrides ActionItem.GetBasePath()
"""
return self.info.path
def GetData(self):
attr, text = TreeItem.GetData(self)
del attr[0]
attr.append(('Identifier', self.executable.info.evalName))
guid = self.executable.info.guid
if guid:
attr.append(('Guid', guid))
attr.append(('File', self.pluginName))
text = base64.b64encode(pickle.dumps(self.info.args, 2))
return attr, text
def GetLabel(self):
return self.name
def GetTypeName(self):
return self.executable.info.name
def NeedsStartupConfiguration(self):
"""
Returns True if the item wants to be configured after creation.
Overrides ActionItem.NeedsStartupConfiguration()
"""
# if the Configure method of the executable is overriden, we assume
# the item wants to be configured after creation
return (
self.executable.Configure.im_func !=
eg.PluginBase.Configure.im_func
)
def RefreshAllVisibleActions(self):
"""
Calls Refresh() for all currently visible actions of this plugin.
"""
actionItemCls = self.document.ActionItem
plugin = self.info.instance
def Traverse(item):
if item.__class__ == actionItemCls:
if item.executable.plugin == plugin:
pass
#eg.Notify("NodeChanged", item)
else:
if item.childs and item in item.document.expandedNodes:
for child in item.childs:
Traverse(child)
Traverse(self.root)
@eg.LogIt
def RestoreState(self):
if self.isEnabled:
eg.actionThread.Call(self.info.Start)
@eg.LogIt
@eg.AssertInActionThread
def SetArguments(self, args):
info = self.info
if not info.lastException and args == self.info.args:
return
self.info.args = args
label = info.instance.GetLabel(*args)
if label != info.label:
info.label = label
self.name = eg.text.General.pluginLabel % label
#eg.Notify("NodeChanged", self)
self.RefreshAllVisibleActions()
if self.isEnabled:
eg.actionThread.Call(self.info.Stop)
eg.actionThread.Call(self.info.Start)
def SetAttributes(self, tree, itemId):
if (
self.info is None or
self.info.lastException or
self.info.initFailed
):
tree.SetItemTextColour(itemId, eg.colour.pluginError)
@eg.AssertInActionThread
def SetEnable(self, flag=True):
ActionItem.SetEnable(self, flag)
if flag:
self.info.Start()
else:
self.info.Stop()
| gpl-2.0 | 343,042,178,201,736,000 | 29.757143 | 79 | 0.596068 | false |
rajendrant/ArduinoControl | python/ArduinoControlClient/simple_serial.py | 1 | 1053 | import serial
import sys
import base64
import ArduinoControlClient
import time
class SerialSocket(object):
def __init__(self, dev):
self.ser = serial.Serial(dev, 115200, timeout=1)
time.sleep(1)
def is_connected(self):
return self.ser.is_open
def send_msg(self, m):
self.ser.write(base64.b16encode(m))
self.ser.write('\n')
def recv_msg(self):
resp = self.ser.readline()
return base64.b16decode(resp[:-2])
class SerialBoardClient(ArduinoControlClient.BoardClient):
def __init__(self, dev):
self.dev = dev
super(SerialBoardClient, self).__init__()
def init_sock(self):
self.sock = SerialSocket(dev)
dev = sys.argv[1] if len(sys.argv)>1 else '/dev/ttyUSB1'
b = SerialBoardClient(dev)
def ping_test(b):
for i in range(4):
latency = b.ping_test()
print 'ping_test', 'PASSED latency=%d'%(latency) if latency else 'FAILED'
time.sleep(0.5)
ping_test(b)
print b.get_this_address()
print b.get_system_uptime()
| gpl-3.0 | 7,608,013,402,955,939,000 | 24.071429 | 81 | 0.62868 | false |
askhl/ase | ase/calculators/aims.py | 1 | 24803 | """This module defines an ASE interface to FHI-aims.
Felix Hanke [email protected]
Jonas Bjork [email protected]
"""
import os
import numpy as np
from ase.units import Hartree
from ase.io.aims import write_aims, read_aims
from ase.data import atomic_numbers
from ase.calculators.calculator import FileIOCalculator, Parameters, kpts2mp, \
ReadError
float_keys = [
'charge',
'charge_mix_param',
'default_initial_moment',
'fixed_spin_moment',
'hartree_convergence_parameter',
'harmonic_length_scale',
'ini_linear_mix_param',
'ini_spin_mix_parma',
'initial_moment',
'MD_MB_init',
'MD_time_step',
'prec_mix_param',
'set_vacuum_level',
'spin_mix_param',
]
exp_keys = [
'basis_threshold',
'occupation_thr',
'sc_accuracy_eev',
'sc_accuracy_etot',
'sc_accuracy_forces',
'sc_accuracy_rho',
'sc_accuracy_stress',
]
string_keys = [
'communication_type',
'density_update_method',
'KS_method',
'mixer',
'output_level',
'packed_matrix_format',
'relax_unit_cell',
'restart',
'restart_read_only',
'restart_write_only',
'spin',
'total_energy_method',
'qpe_calc',
'xc',
'species_dir',
'run_command',
]
int_keys = [
'empty_states',
'ini_linear_mixing',
'max_relaxation_steps',
'max_zeroin',
'multiplicity',
'n_max_pulay',
'sc_iter_limit',
'walltime',
]
bool_keys = [
'collect_eigenvectors',
'compute_forces',
'compute_kinetic',
'compute_numerical_stress',
'compute_analytical_stress',
'distributed_spline_storage',
'evaluate_work_function',
'final_forces_cleaned',
'hessian_to_restart_geometry',
'load_balancing',
'MD_clean_rotations',
'MD_restart',
'override_illconditioning',
'override_relativity',
'restart_relaxations',
'squeeze_memory',
'symmetry_reduced_k_grid',
'use_density_matrix',
'use_dipole_correction',
'use_local_index',
'use_logsbt',
'vdw_correction_hirshfeld',
]
list_keys = [
'init_hess',
'k_grid',
'k_offset',
'MD_run',
'MD_schedule',
'MD_segment',
'mixer_threshold',
'occupation_type',
'output',
'cube',
'preconditioner',
'relativistic',
'relax_geometry',
]
class Aims(FileIOCalculator):
command = 'aims.version.serial.x > aims.out'
implemented_properties = ['energy', 'forces', 'stress', 'dipole']
def __init__(self, restart=None, ignore_bad_restart_file=False,
label=os.curdir, atoms=None, cubes=None, radmul=None, tier=None, **kwargs):
"""Construct FHI-aims calculator.
The keyword arguments (kwargs) can be one of the ASE standard
keywords: 'xc', 'kpts' and 'smearing' or any of FHI-aims'
native keywords.
Additional arguments:
cubes: AimsCube object
Cube file specification.
radmul: int
Set radial multiplier for the basis set of all atomic species.
tier: int or array of ints
Set basis set tier for all atomic species.
"""
try:
self.outfilename = kwargs.get('run_command').split()[-1]
except:
self.outfilename = 'aims.out'
FileIOCalculator.__init__(self, restart, ignore_bad_restart_file,
label, atoms,
command = kwargs.get('run_command'),
**kwargs)
self.cubes = cubes
self.radmul = radmul
self.tier = tier
def set_label(self, label):
self.label = label
self.directory = label
self.prefix = ''
self.out = os.path.join(label, self.outfilename)
def check_state(self, atoms):
system_changes = FileIOCalculator.check_state(self, atoms)
# Ignore unit cell for molecules:
if not atoms.pbc.any() and 'cell' in system_changes:
system_changes.remove('cell')
return system_changes
def set(self, **kwargs):
xc = kwargs.get('xc')
if xc:
kwargs['xc'] = {'LDA': 'pw-lda', 'PBE': 'pbe'}.get(xc, xc)
changed_parameters = FileIOCalculator.set(self, **kwargs)
if changed_parameters:
self.reset()
return changed_parameters
def write_input(self, atoms, properties=None, system_changes=None, ghosts=None):
FileIOCalculator.write_input(self, atoms, properties, system_changes)
have_lattice_vectors = atoms.pbc.any()
have_k_grid = ('k_grid' in self.parameters or
'kpts' in self.parameters)
if have_lattice_vectors and not have_k_grid:
raise RuntimeError('Found lattice vectors but no k-grid!')
if not have_lattice_vectors and have_k_grid:
raise RuntimeError('Found k-grid but no lattice vectors!')
write_aims(os.path.join(self.directory, 'geometry.in'), atoms, ghosts)
self.write_control(atoms, os.path.join(self.directory, 'control.in'))
self.write_species(atoms, os.path.join(self.directory, 'control.in'))
self.parameters.write(os.path.join(self.directory, 'parameters.ase'))
def write_control(self, atoms, filename):
output = open(filename, 'w')
for line in ['=====================================================',
'FHI-aims file: ' + filename,
'Created using the Atomic Simulation Environment (ASE)',
'',
'List of parameters used to initialize the calculator:',
'=====================================================']:
output.write('#' + line + '\n')
assert not ('kpts' in self.parameters and 'k_grid' in self.parameters)
assert not ('smearing' in self.parameters and
'occupation_type' in self.parameters)
for key, value in self.parameters.items():
if key == 'kpts':
mp = kpts2mp(atoms, self.parameters.kpts)
output.write('%-35s%d %d %d\n' % (('k_grid',) + tuple(mp)))
dk = 0.5 - 0.5 / np.array(mp)
output.write('%-35s%f %f %f\n' % (('k_offset',) + tuple(dk)))
elif key == 'species_dir' or key == 'run_command':
continue
elif key == 'smearing':
name = self.parameters.smearing[0].lower()
if name == 'fermi-dirac':
name = 'fermi'
width = self.parameters.smearing[1]
output.write('%-35s%s %f' % ('occupation_type', name, width))
if name == 'methfessel-paxton':
order = self.parameters.smearing[2]
output.write(' %d' % order)
output.write('\n' % order)
elif key == 'output':
for output_type in value:
output.write('%-35s%s\n' % (key, output_type))
elif key == 'vdw_correction_hirshfeld' and value:
output.write('%-35s\n' % key)
elif key in bool_keys:
output.write('%-35s.%s.\n' % (key, repr(bool(value)).lower()))
elif isinstance(value, (tuple, list)):
output.write('%-35s%s\n' %
(key, ' '.join(str(x) for x in value)))
elif isinstance(value, str):
output.write('%-35s%s\n' % (key, value))
else:
output.write('%-35s%r\n' % (key, value))
if self.cubes:
self.cubes.write(output)
output.write(
'#=======================================================\n\n')
output.close()
def read(self, label):
FileIOCalculator.read(self, label)
geometry = os.path.join(self.directory, 'geometry.in')
control = os.path.join(self.directory, 'control.in')
for filename in [geometry, control, self.out]:
if not os.path.isfile(filename):
raise ReadError
self.atoms = read_aims(geometry)
self.parameters = Parameters.read(os.path.join(self.directory,
'parameters.ase'))
self.read_results()
def read_results(self):
converged = self.read_convergence()
if not converged:
os.system('tail -20 ' + self.out)
raise RuntimeError('FHI-aims did not converge!\n' +
'The last lines of output are printed above ' +
'and should give an indication why.')
self.read_energy()
if ('compute_forces' in self.parameters or
'sc_accuracy_forces' in self.parameters):
self.read_forces()
if ('compute_numerical_stress' in self.parameters or
'compute_analytical_stress' in self.parameters):
self.read_stress()
if ('dipole' in self.parameters.get('output', []) and
not self.atoms.pbc.any()):
self.read_dipole()
def write_species(self, atoms, filename='control.in'):
self.ctrlname = filename
species_path = self.parameters.get('species_dir')
if species_path is None:
species_path = os.environ.get('AIMS_SPECIES_DIR')
if species_path is None:
raise RuntimeError(
'Missing species directory! Use species_dir ' +
'parameter or set $AIMS_SPECIES_DIR environment variable.')
control = open(filename, 'a')
symbols = atoms.get_chemical_symbols()
symbols2 = []
for n, symbol in enumerate(symbols):
if symbol not in symbols2:
symbols2.append(symbol)
if self.tier is not None:
if isinstance(self.tier, int):
self.tierlist = np.ones(len(symbols2),'int') * self.tier
elif isinstance(self.tier, list):
assert len(self.tier) == len(symbols2)
self.tierlist = self.tier
for i, symbol in enumerate(symbols2):
fd = os.path.join(species_path, '%02i_%s_default' %
(atomic_numbers[symbol], symbol))
reached_tiers = False
for line in open(fd, 'r'):
if self.tier is not None:
if 'First tier' in line:
reached_tiers = True
self.targettier = self.tierlist[i]
self.foundtarget = False
self.do_uncomment = True
if reached_tiers:
line = self.format_tiers(line)
control.write(line)
if self.tier is not None and not self.foundtarget:
raise RuntimeError(
"Basis tier %i not found for element %s"\
% (self.targettier, symbol))
control.close()
if self.radmul is not None:
self.set_radial_multiplier()
def format_tiers(self, line):
if 'meV' in line:
assert line[0] == '#'
if 'tier' in line and 'Further' not in line:
tier = line.split(" tier")[0]
tier = tier.split('"')[-1]
current_tier = self.translate_tier(tier)
if current_tier == self.targettier:
self.foundtarget = True
elif current_tier > self.targettier:
self.do_uncomment = False
else:
self.do_uncomment = False
return line
elif self.do_uncomment and line[0] == '#':
return line[1:]
elif not self.do_uncomment and line[0] != '#':
return '#'+line
else:
return line
def translate_tier(self, tier):
if tier.lower() == 'first':
return 1
elif tier.lower() == 'second':
return 2
elif tier.lower() == 'third':
return 3
elif tier.lower() == 'fourth':
return 4
else:
return -1
def set_radial_multiplier(self):
assert isinstance(self.radmul, int)
newctrl = self.ctrlname+'.new'
fin = open(self.ctrlname, 'r')
fout = open(newctrl, 'w')
newline = " radial_multiplier %i\n" % self.radmul
for line in fin:
if ' radial_multiplier' in line:
fout.write(newline)
else:
fout.write(line)
fin.close()
fout.close()
os.rename(newctrl, self.ctrlname)
def get_dipole_moment(self, atoms):
if ('dipole' not in self.parameters.get('output', []) or
atoms.pbc.any()):
raise NotImplementedError
return FileIOCalculator.get_dipole_moment(self, atoms)
def get_stress(self, atoms):
if ('compute_numerical_stress' not in self.parameters and
'compute_analytical_stress' not in self.parameters):
raise NotImplementedError
return FileIOCalculator.get_stress(self, atoms)
def get_forces(self, atoms):
if ('compute_forces' not in self.parameters and
'sc_accuracy_forces' not in self.parameters):
raise NotImplementedError
return FileIOCalculator.get_forces(self, atoms)
def read_dipole(self):
"Method that reads the electric dipole moment from the output file."
for line in open(self.out, 'r'):
if line.rfind('Total dipole moment [eAng]') > -1:
dipolemoment = np.array([float(f)
for f in line.split()[6:9]])
self.results['dipole'] = dipolemoment
def read_energy(self):
for line in open(self.out, 'r'):
if line.rfind('Total energy corrected') > -1:
E0 = float(line.split()[5])
elif line.rfind('Total energy uncorrected') > -1:
F = float(line.split()[5])
self.results['free_energy'] = F
self.results['energy'] = E0
def read_forces(self):
"""Method that reads forces from the output file.
If 'all' is switched on, the forces for all ionic steps
in the output file will be returned, in other case only the
forces for the last ionic configuration are returned."""
lines = open(self.out, 'r').readlines()
forces = np.zeros([len(self.atoms), 3])
for n, line in enumerate(lines):
if line.rfind('Total atomic forces') > -1:
for iatom in range(len(self.atoms)):
data = lines[n + iatom + 1].split()
for iforce in range(3):
forces[iatom, iforce] = float(data[2 + iforce])
self.results['forces'] = forces
def read_stress(self):
lines = open(self.out, 'r').readlines()
stress = None
for n, line in enumerate(lines):
if (line.rfind('| Analytical stress tensor') > -1 or
line.rfind('Numerical stress tensor') > -1):
stress = []
for i in [n + 5, n + 6, n + 7]:
data = lines[i].split()
stress += [float(data[2]), float(data[3]), float(data[4])]
# rearrange in 6-component form and return
self.results['stress'] = np.array([stress[0], stress[4], stress[8],
stress[5], stress[2], stress[1]])
def read_convergence(self):
converged = False
lines = open(self.out, 'r').readlines()
for n, line in enumerate(lines):
if line.rfind('Have a nice day') > -1:
converged = True
return converged
def get_number_of_iterations(self):
return self.read_number_of_iterations()
def read_number_of_iterations(self):
niter = None
lines = open(self.out, 'r').readlines()
for n, line in enumerate(lines):
if line.rfind('| Number of self-consistency cycles') > -1:
niter = int(line.split(':')[-1].strip())
return niter
def get_electronic_temperature(self):
return self.read_electronic_temperature()
def read_electronic_temperature(self):
width = None
lines = open(self.out, 'r').readlines()
for n, line in enumerate(lines):
if line.rfind('Occupation type:') > -1:
width = float(line.split('=')[-1].strip().split()[0])
return width
def get_number_of_electrons(self):
return self.read_number_of_electrons()
def read_number_of_electrons(self):
nelect = None
lines = open(self.out, 'r').readlines()
for n, line in enumerate(lines):
if line.rfind('The structure contains') > -1:
nelect = float(line.split()[-2].strip())
return nelect
def get_number_of_bands(self):
return self.read_number_of_bands()
def read_number_of_bands(self):
nband = None
lines = open(self.out, 'r').readlines()
for n, line in enumerate(lines):
if line.rfind('Total number of basis functions') > -1:
nband = int(line.split(':')[-1].strip())
return nband
def get_k_point_weights(self):
return self.read_kpts(mode='k_point_weights')
def get_bz_k_points(self):
raise NotImplementedError
def get_ibz_k_points(self):
return self.read_kpts(mode='ibz_k_points')
def get_spin_polarized(self):
return self.read_number_of_spins()
def get_number_of_spins(self):
return 1 + self.get_spin_polarized()
def read_number_of_spins(self):
spinpol = None
lines = open(self.out, 'r').readlines()
for n, line in enumerate(lines):
if line.rfind('| Number of spin channels') > -1:
spinpol = int(line.split(':')[-1].strip()) - 1
return spinpol
def read_magnetic_moment(self):
magmom = None
if not self.get_spin_polarized():
magmom = 0.0
else: # only for spinpolarized system Magnetisation is printed
for line in open(self.label + '.txt'):
if line.find('Magnetisation') != -1: # last one
magmom = float(line.split('=')[-1].strip())
return magmom
def get_fermi_level(self):
return self.read_fermi()
def get_eigenvalues(self, kpt=0, spin=0):
return self.read_eigenvalues(kpt, spin, 'eigenvalues')
def get_occupations(self, kpt=0, spin=0):
return self.read_eigenvalues(kpt, spin, 'occupations')
def read_fermi(self):
E_f = None
lines = open(self.out, 'r').readlines()
for n, line in enumerate(lines):
if line.rfind('| Chemical potential (Fermi level) in eV') > -1:
E_f = float(line.split(':')[-1].strip())
return E_f
def read_kpts(self, mode='ibz_k_points'):
""" Returns list of kpts weights or kpts coordinates. """
values = []
assert mode in ['ibz_k_points' , 'k_point_weights'], 'mode not in [\'ibz_k_points\' , \'k_point_weights\']'
lines = open(self.out, 'r').readlines()
kpts = None
for n, line in enumerate(lines):
if line.rfind('K-points in task') > -1:
kpts = int(line.split(':')[-1].strip())
kptsstart = n
break
assert not kpts is None
text = lines[kptsstart + 1:]
values = []
for line in text[:kpts]:
if mode == 'ibz_k_points':
b = [float(c.strip()) for c in line.split()[4:7]]
else:
b = float(line.split()[-1])
values.append(b)
if len(values) == 0:
values = None
return np.array(values)
def read_eigenvalues(self, kpt=0, spin=0, mode='eigenvalues'):
""" Returns list of last eigenvalues, occupations
for given kpt and spin. """
values = []
assert mode in ['eigenvalues' , 'occupations'], 'mode not in [\'eigenvalues\' , \'occupations\']'
lines = open(self.out, 'r').readlines()
# number of kpts
kpts = None
for n, line in enumerate(lines):
if line.rfind('K-points in task') > -1:
kpts = int(line.split(':')[-1].strip())
break
assert not kpts is None
assert kpt + 1 <= kpts
# find last (eigenvalues)
eigvalstart = None
for n, line in enumerate(lines):
if line.rfind('Preliminary charge convergence reached') > -1:
eigvalstart = n
break
assert not eigvalstart is None
lines = lines[eigvalstart:]
for n, line in enumerate(lines):
if line.rfind('Writing Kohn-Sham eigenvalues') > -1:
eigvalstart = n
break
assert not eigvalstart is None
text = lines[eigvalstart + 1:] # remove first 1 line
# find the requested k-point
nbands = self.read_number_of_bands()
sppol = self.get_spin_polarized()
beg = (nbands + 4 + int(sppol)*1) * kpt * (sppol + 1) + 3 + sppol * 2 + kpt * sppol
if self.get_spin_polarized():
if spin == 0:
beg = beg
end = beg + nbands
else:
beg = beg + nbands + 5
end = beg + nbands
else:
end = beg + nbands
values = []
for line in text[beg:end]:
# aims prints stars for large values ...
line = line.replace('**************', ' 10000')
b = [float(c.strip()) for c in line.split()[1:]]
values.append(b)
if mode == 'eigenvalues':
values = [Hartree*v[1] for v in values]
else:
values = [v[0] for v in values]
if len(values) == 0:
values = None
return np.array(values)
class AimsCube:
"Object to ensure the output of cube files, can be attached to Aims object"
def __init__(self, origin=(0, 0, 0),
edges=[(0.1, 0.0, 0.0), (0.0, 0.1, 0.0), (0.0, 0.0, 0.1)],
points=(50, 50, 50), plots=None):
"""parameters:
origin, edges, points = same as in the FHI-aims output
plots: what to print, same names as in FHI-aims """
self.name = 'AimsCube'
self.origin = origin
self.edges = edges
self.points = points
self.plots = plots
def ncubes(self):
"""returns the number of cube files to output """
if self.plots:
number = len(self.plots)
else:
number = 0
return number
def set(self, **kwargs):
""" set any of the parameters ... """
# NOT IMPLEMENTED AT THE MOMENT!
def move_to_base_name(self, basename):
""" when output tracking is on or the base namem is not standard,
this routine will rename add the base to the cube file output for
easier tracking """
for plot in self.plots:
found = False
cube = plot.split()
if (cube[0] == 'total_density' or
cube[0] == 'spin_density' or
cube[0] == 'delta_density'):
found = True
old_name = cube[0] + '.cube'
new_name = basename + '.' + old_name
if cube[0] == 'eigenstate' or cube[0] == 'eigenstate_density':
found = True
state = int(cube[1])
s_state = cube[1]
for i in [10, 100, 1000, 10000]:
if state < i:
s_state = '0' + s_state
old_name = cube[0] + '_' + s_state + '_spin_1.cube'
new_name = basename + '.' + old_name
if found:
os.system('mv ' + old_name + ' ' + new_name)
def add_plot(self, name):
""" in case you forgot one ... """
self.plots += [name]
def write(self, file):
""" write the necessary output to the already opened control.in """
file.write('output cube ' + self.plots[0] + '\n')
file.write(' cube origin ')
for ival in self.origin:
file.write(str(ival) + ' ')
file.write('\n')
for i in range(3):
file.write(' cube edge ' + str(self.points[i]) + ' ')
for ival in self.edges[i]:
file.write(str(ival) + ' ')
file.write('\n')
if self.ncubes() > 1:
for i in range(self.ncubes() - 1):
file.write('output cube ' + self.plots[i + 1] + '\n')
| gpl-2.0 | 8,393,126,757,321,245,000 | 35.208759 | 115 | 0.528041 | false |
gxxjjj/QuantEcon.py | quantecon/tests/test_lae.py | 7 | 1148 | """
Tests for lae.py
@author : Spencer Lyon
@date : 2014-08-02
TODO: write (economically) meaningful tests for this module
"""
from __future__ import division
from nose.tools import assert_equal
import numpy as np
from scipy.stats import lognorm
from quantecon import LAE
# copied from the lae lecture
s = 0.2
delta = 0.1
a_sigma = 0.4 # A = exp(B) where B ~ N(0, a_sigma)
alpha = 0.4 # We set f(k) = k**alpha
phi = lognorm(a_sigma)
def p(x, y):
d = s * x**alpha
return phi.pdf((y - (1 - delta) * x) / d) / d
# other data
n_a, n_b, n_y = 50, (5, 5), 20
a = np.random.rand(n_a) + 0.01
b = np.random.rand(*n_b) + 0.01
y = np.linspace(0, 10, 20)
lae_a = LAE(p, a)
lae_b = LAE(p, b)
def test_x_flattened():
"lae: is x flattened and reshaped"
# should have a trailing singleton dimension
assert_equal(lae_b.X.shape[-1], 1)
assert_equal(lae_a.X.shape[-1], 1)
def test_x_2d():
"lae: is x 2d"
assert_equal(lae_a.X.ndim, 2)
assert_equal(lae_b.X.ndim, 2)
def test_call_shapes():
"lae: shape of call to lae"
assert_equal(lae_a(y).shape, (n_y,))
assert_equal(lae_b(y).shape, (n_y,))
| bsd-3-clause | 4,004,986,228,655,874,600 | 19.872727 | 59 | 0.609756 | false |
shoopio/shoop | shuup/core/order_creator/_source_modifier.py | 2 | 1327 | # This file is part of Shuup.
#
# Copyright (c) 2012-2019, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from shuup.apps.provides import load_module_instances
def get_order_source_modifier_modules():
"""
Get a list of configured order source modifier module instances.
:rtype: list[OrderSourceModifierModule]
"""
return load_module_instances(
"SHUUP_ORDER_SOURCE_MODIFIER_MODULES", "order_source_modifier_module")
def is_code_usable(order_source, code):
return any(
module.can_use_code(order_source, code)
for module in get_order_source_modifier_modules()
)
class OrderSourceModifierModule(object):
def get_new_lines(self, order_source, lines):
"""
Get new lines to be added to order source.
:type order_source: shuup.core.order_creator.OrderSource
:type lines: list[shuup.core.order_creator.SourceLine]
:rtype: Iterable[shuup.core.order_creator.SourceLine]
"""
return []
def can_use_code(self, order_source, code):
return False
def use_code(self, order, code):
pass
def clear_codes(self, order):
pass
| agpl-3.0 | 2,572,933,287,643,319,000 | 27.234043 | 78 | 0.677468 | false |
YingHsuan/termite_data_server | web2py/scripts/extract_oracle_models.py | 17 | 10618 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create web2py model (python code) to represent Oracle 11g tables.
Features:
* Uses Oracle's metadata tables
* Detects legacy "keyed" tables (not having an "id" PK)
* Connects directly to running databases, no need to do a SQL dump
* Handles notnull, unique and referential constraints
* Detects most common datatypes and default values
* Documents alternative datatypes as comments
Requeriments:
* Needs Oracle cx_Oracle python connector (same as web2py)
Created by Oscar Fonts, based on extract_pgsql_models by Mariano Reingart,
based in turn on a script to "generate schemas from dbs" (mysql)
by Alexandre Andrade
"""
_author__ = "Oscar Fonts <[email protected]>"
HELP = """
USAGE: extract_oracle_models db host port user passwd
Call with Oracle database connection parameters,
web2py model will be printed on standard output.
EXAMPLE: python extract_oracle_models.py ORCL localhost 1521 user password
"""
# Config options
DEBUG = False # print debug messages to STDERR
# Constant for Field keyword parameter order (and filter):
KWARGS = ('type', 'length', 'default', 'required', 'ondelete',
'notnull', 'unique', 'label', 'comment')
import sys
def query(conn, sql, *args):
"Execute a SQL query and return rows as a list of dicts"
cur = conn.cursor()
ret = []
try:
if DEBUG:
print >> sys.stderr, "QUERY: ", sql , args
cur.execute(sql, args)
for row in cur:
dic = {}
for i, value in enumerate(row):
field = cur.description[i][0]
dic[field] = value
if DEBUG:
print >> sys.stderr, "RET: ", dic
ret.append(dic)
return ret
except cx_Oracle.DatabaseError, exc:
error, = exc.args
print >> sys.stderr, "Oracle-Error-Message:", error.message
finally:
cur.close()
def get_tables(conn):
"List table names in a given schema"
rows = query(conn, """SELECT TABLE_NAME FROM USER_TABLES
ORDER BY TABLE_NAME""")
return [row['TABLE_NAME'] for row in rows]
def get_fields(conn, table):
"Retrieve field list for a given table"
if DEBUG:
print >> sys.stderr, "Processing TABLE", table
rows = query(conn, """
SELECT COLUMN_NAME, DATA_TYPE,
NULLABLE AS IS_NULLABLE,
CHAR_LENGTH AS CHARACTER_MAXIMUM_LENGTH,
DATA_PRECISION AS NUMERIC_PRECISION,
DATA_SCALE AS NUMERIC_SCALE,
DATA_DEFAULT AS COLUMN_DEFAULT
FROM USER_TAB_COLUMNS
WHERE TABLE_NAME=:t
""", table)
return rows
def define_field(conn, table, field, pks):
"Determine field type, default value, references, etc."
f = {}
ref = references(conn, table, field['COLUMN_NAME'])
# Foreign Keys
if ref:
f.update(ref)
# PK & Numeric & autoincrement => id
elif field['COLUMN_NAME'] in pks and \
field['DATA_TYPE'] in ('INT', 'NUMBER') and \
is_autoincrement(conn, table, field):
f['type'] = "'id'"
# Other data types
elif field['DATA_TYPE'] in ('BINARY_DOUBLE'):
f['type'] = "'double'"
elif field['DATA_TYPE'] in ('CHAR','NCHAR'):
f['type'] = "'string'"
f['comment'] = "'Alternative types: boolean, time'"
elif field['DATA_TYPE'] in ('BLOB', 'CLOB'):
f['type'] = "'blob'"
f['comment'] = "'Alternative types: text, json, list:*'"
elif field['DATA_TYPE'] in ('DATE'):
f['type'] = "'datetime'"
f['comment'] = "'Alternative types: date'"
elif field['DATA_TYPE'] in ('FLOAT'):
f['type'] = "'float'"
elif field['DATA_TYPE'] in ('INT'):
f['type'] = "'integer'"
elif field['DATA_TYPE'] in ('NUMBER'):
f['type'] = "'bigint'"
elif field['DATA_TYPE'] in ('NUMERIC'):
f['type'] = "'decimal'"
f['precision'] = field['NUMERIC_PRECISION']
f['scale'] = field['NUMERIC_SCALE'] or 0
elif field['DATA_TYPE'] in ('VARCHAR2','NVARCHAR2'):
f['type'] = "'string'"
if field['CHARACTER_MAXIMUM_LENGTH']:
f['length'] = field['CHARACTER_MAXIMUM_LENGTH']
f['comment'] = "'Other possible types: password, upload'"
else:
f['type'] = "'blob'"
f['comment'] = "'WARNING: Oracle Data Type %s was not mapped." % \
str(field['DATA_TYPE']) + " Using 'blob' as fallback.'"
try:
if field['COLUMN_DEFAULT']:
if field['COLUMN_DEFAULT'] == "sysdate":
d = "request.now"
elif field['COLUMN_DEFAULT'].upper() == "T":
d = "True"
elif field['COLUMN_DEFAULT'].upper() == "F":
d = "False"
else:
d = repr(eval(field['COLUMN_DEFAULT']))
f['default'] = str(d)
except (ValueError, SyntaxError):
pass
except Exception, e:
raise RuntimeError(
"Default unsupported '%s'" % field['COLUMN_DEFAULT'])
if not field['IS_NULLABLE']:
f['notnull'] = "True"
return f
def is_unique(conn, table, field):
"Find unique columns"
rows = query(conn, """
SELECT COLS.COLUMN_NAME
FROM USER_CONSTRAINTS CONS, ALL_CONS_COLUMNS COLS
WHERE CONS.OWNER = COLS.OWNER
AND CONS.CONSTRAINT_NAME = COLS.CONSTRAINT_NAME
AND CONS.CONSTRAINT_TYPE = 'U'
AND COLS.TABLE_NAME = :t
AND COLS.COLUMN_NAME = :c
""", table, field['COLUMN_NAME'])
return rows and True or False
# Returns True when a "BEFORE EACH ROW INSERT" trigger is found and:
# a) it mentions the "NEXTVAL" keyword (used by sequences)
# b) it operates on the given table and column
#
# On some (inelegant) database designs, SEQUENCE.NEXTVAL is called directly
# from each "insert" statement, instead of using triggers. Such cases cannot
# be detected by inspecting Oracle's metadata tables, as sequences are not
# logically bound to any specific table or field.
def is_autoincrement(conn, table, field):
"Find auto increment fields (best effort)"
rows = query(conn, """
SELECT TRIGGER_NAME
FROM USER_TRIGGERS,
(SELECT NAME, LISTAGG(TEXT, ' ') WITHIN GROUP (ORDER BY LINE) TEXT
FROM USER_SOURCE
WHERE TYPE = 'TRIGGER'
GROUP BY NAME
) TRIGGER_DEFINITION
WHERE TRIGGER_NAME = NAME
AND TRIGGERING_EVENT = 'INSERT'
AND TRIGGER_TYPE = 'BEFORE EACH ROW'
AND TABLE_NAME = :t
AND UPPER(TEXT) LIKE UPPER('%.NEXTVAL%')
AND UPPER(TEXT) LIKE UPPER('%:NEW.' || :c || '%')
""", table, field['COLUMN_NAME'])
return rows and True or False
def primarykeys(conn, table):
"Find primary keys"
rows = query(conn, """
SELECT COLS.COLUMN_NAME
FROM USER_CONSTRAINTS CONS, ALL_CONS_COLUMNS COLS
WHERE COLS.TABLE_NAME = :t
AND CONS.CONSTRAINT_TYPE = 'P'
AND CONS.OWNER = COLS.OWNER
AND CONS.CONSTRAINT_NAME = COLS.CONSTRAINT_NAME
""", table)
return [row['COLUMN_NAME'] for row in rows]
def references(conn, table, field):
"Find a FK (fails if multiple)"
rows1 = query(conn, """
SELECT COLS.CONSTRAINT_NAME,
CONS.DELETE_RULE,
COLS.POSITION AS ORDINAL_POSITION
FROM USER_CONSTRAINTS CONS, ALL_CONS_COLUMNS COLS
WHERE COLS.TABLE_NAME = :t
AND COLS.COLUMN_NAME = :c
AND CONS.CONSTRAINT_TYPE = 'R'
AND CONS.OWNER = COLS.OWNER
AND CONS.CONSTRAINT_NAME = COLS.CONSTRAINT_NAME
""", table, field)
if len(rows1) == 1:
rows2 = query(conn, """
SELECT COLS.TABLE_NAME, COLS.COLUMN_NAME
FROM USER_CONSTRAINTS CONS, ALL_CONS_COLUMNS COLS
WHERE CONS.CONSTRAINT_NAME = :k
AND CONS.R_CONSTRAINT_NAME = COLS.CONSTRAINT_NAME
ORDER BY COLS.POSITION ASC
""", rows1[0]['CONSTRAINT_NAME'])
row = None
if len(rows2) > 1:
row = rows2[int(rows1[0]['ORDINAL_POSITION']) - 1]
keyed = True
if len(rows2) == 1:
row = rows2[0]
keyed = False
if row:
if keyed: # THIS IS BAD, DON'T MIX "id" and primarykey!!!
ref = {'type': "'reference %s.%s'" % (row['TABLE_NAME'],
row['COLUMN_NAME'])}
else:
ref = {'type': "'reference %s'" % (row['TABLE_NAME'],)}
if rows1[0]['DELETE_RULE'] != "NO ACTION":
ref['ondelete'] = repr(rows1[0]['DELETE_RULE'])
return ref
elif rows2:
raise RuntimeError("Unsupported foreign key reference: %s" %
str(rows2))
elif rows1:
raise RuntimeError("Unsupported referential constraint: %s" %
str(rows1))
def define_table(conn, table):
"Output single table definition"
fields = get_fields(conn, table)
pks = primarykeys(conn, table)
print "db.define_table('%s'," % (table, )
for field in fields:
fname = field['COLUMN_NAME']
fdef = define_field(conn, table, field, pks)
if fname not in pks and is_unique(conn, table, field):
fdef['unique'] = "True"
if fdef['type'] == "'id'" and fname in pks:
pks.pop(pks.index(fname))
print " Field('%s', %s)," % (fname,
', '.join(["%s=%s" % (k, fdef[k]) for k in KWARGS
if k in fdef and fdef[k]]))
if pks:
print " primarykey=[%s]," % ", ".join(["'%s'" % pk for pk in pks])
print " migrate=migrate)"
print
def define_db(conn, db, host, port, user, passwd):
"Output database definition (model)"
dal = 'db = DAL("oracle://%s/%s@%s:%s/%s", pool_size=10)'
print dal % (user, passwd, host, port, db)
print
print "migrate = False"
print
for table in get_tables(conn):
define_table(conn, table)
if __name__ == "__main__":
if len(sys.argv) < 6:
print HELP
else:
# Parse arguments from command line:
db, host, port, user, passwd = sys.argv[1:6]
# Make the database connection (change driver if required)
import cx_Oracle
dsn = cx_Oracle.makedsn(host, port, db)
cnn = cx_Oracle.connect(user, passwd, dsn)
# Start model code generation:
define_db(cnn, db, host, port, user, passwd)
| bsd-3-clause | -4,626,304,678,129,125,000 | 33.032051 | 77 | 0.567621 | false |
tommy-u/enable | enable/gadgets/vu_meter.py | 1 | 10669 |
import math
from traits.api import Float, Property, List, Str, Range
from enable.api import Component
from kiva.trait_defs.kiva_font_trait import KivaFont
from kiva import affine
def percent_to_db(percent):
if percent == 0.0:
db = float('-inf')
else:
db = 20 * math.log10(percent / 100.0)
return db
def db_to_percent(db):
percent = math.pow(10, db / 20.0 + 2)
return percent
class VUMeter(Component):
# Value expressed in dB
db = Property(Float)
# Value expressed as a percent.
percent = Range(low=0.0)
# The maximum value to be display in the VU Meter, expressed as a percent.
max_percent = Float(150.0)
# Angle (in degrees) from a horizontal line through the hinge of the
# needle to the edge of the meter axis.
angle = Float(45.0)
# Values of the percentage-based ticks; these are drawn and labeled along
# the bottom of the curve axis.
percent_ticks = List(range(0, 101, 20))
# Text to write in the middle of the VU Meter.
text = Str("VU")
# Font used to draw `text`.
text_font = KivaFont("modern 48")
# Font for the db tick labels.
db_tick_font = KivaFont("modern 16")
# Font for the percent tick labels.
percent_tick_font = KivaFont("modern 12")
# beta is the fraction of the of needle that is "hidden".
# beta == 0 puts the hinge point of the needle on the bottom
# edge of the window. Values that result in a decent looking
# meter are 0 < beta < .65.
# XXX needs a better name!
_beta = Float(0.3)
# _outer_radial_margin is the radial extent beyond the circular axis
# to include in calculations of the space required for the meter.
# This allows room for the ticks and labels.
_outer_radial_margin = Float(60.0)
# The angle (in radians) of the span of the curve axis.
_phi = Property(Float, depends_on=['angle'])
# This is the radius of the circular axis (in screen coordinates).
_axis_radius = Property(Float, depends_on=['_phi', 'width', 'height'])
#---------------------------------------------------------------------
# Trait Property methods
#---------------------------------------------------------------------
def _get_db(self):
db = percent_to_db(self.percent)
return db
def _set_db(self, value):
self.percent = db_to_percent(value)
def _get__phi(self):
phi = math.pi * (180.0 - 2 * self.angle) / 180.0
return phi
def _get__axis_radius(self):
M = self._outer_radial_margin
beta = self._beta
w = self.width
h = self.height
phi = self._phi
R1 = w / (2 * math.sin(phi / 2)) - M
R2 = (h - M) / (1 - beta * math.cos(phi / 2))
R = min(R1, R2)
return R
#---------------------------------------------------------------------
# Trait change handlers
#---------------------------------------------------------------------
def _anytrait_changed(self):
self.request_redraw()
#---------------------------------------------------------------------
# Component API
#---------------------------------------------------------------------
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"):
beta = self._beta
phi = self._phi
w = self.width
M = self._outer_radial_margin
R = self._axis_radius
# (ox, oy) is the position of the "hinge point" of the needle
# (i.e. the center of rotation). For beta > ~0, oy is negative,
# so this point is below the visible region.
ox = self.x + self.width // 2
oy = -beta * R * math.cos(phi / 2) + 1
left_theta = math.radians(180 - self.angle)
right_theta = math.radians(self.angle)
# The angle of the 100% position.
nominal_theta = self._percent_to_theta(100.0)
# The color of the axis for percent > 100.
red = (0.8, 0, 0)
with gc:
gc.set_antialias(True)
# Draw everything relative to the center of the circles.
gc.translate_ctm(ox, oy)
# Draw the primary ticks and tick labels on the curved axis.
gc.set_fill_color((0, 0, 0))
gc.set_font(self.db_tick_font)
for db in [-20, -10, -7, -5, -3, -2, -1, 0, 1, 2, 3]:
db_percent = db_to_percent(db)
theta = self._percent_to_theta(db_percent)
x1 = R * math.cos(theta)
y1 = R * math.sin(theta)
x2 = (R + 0.3 * M) * math.cos(theta)
y2 = (R + 0.3 * M) * math.sin(theta)
gc.set_line_width(2.5)
gc.move_to(x1, y1)
gc.line_to(x2, y2)
gc.stroke_path()
text = str(db)
if db > 0:
text = '+' + text
self._draw_rotated_label(gc, text, theta, R + 0.4 * M)
# Draw the secondary ticks on the curve axis.
for db in [-15, -9, -8, -6, -4, -0.5, 0.5]:
##db_percent = 100 * math.pow(10.0, db / 20.0)
db_percent = db_to_percent(db)
theta = self._percent_to_theta(db_percent)
x1 = R * math.cos(theta)
y1 = R * math.sin(theta)
x2 = (R + 0.2 * M) * math.cos(theta)
y2 = (R + 0.2 * M) * math.sin(theta)
gc.set_line_width(1.0)
gc.move_to(x1, y1)
gc.line_to(x2, y2)
gc.stroke_path()
# Draw the percent ticks and label on the bottom of the
# curved axis.
gc.set_font(self.percent_tick_font)
gc.set_fill_color((0.5, 0.5, 0.5))
gc.set_stroke_color((0.5, 0.5, 0.5))
percents = self.percent_ticks
for tick_percent in percents:
theta = self._percent_to_theta(tick_percent)
x1 = (R - 0.15 * M) * math.cos(theta)
y1 = (R - 0.15 * M) * math.sin(theta)
x2 = R * math.cos(theta)
y2 = R * math.sin(theta)
gc.set_line_width(2.0)
gc.move_to(x1, y1)
gc.line_to(x2, y2)
gc.stroke_path()
text = str(tick_percent)
if tick_percent == percents[-1]:
text = text + "%"
self._draw_rotated_label(gc, text, theta, R - 0.3 * M)
if self.text:
gc.set_font(self.text_font)
tx, ty, tw, th = gc.get_text_extent(self.text)
gc.set_fill_color((0, 0, 0, 0.25))
gc.set_text_matrix(affine.affine_from_rotation(0))
gc.set_text_position(-0.5 * tw,
(0.75 * beta + 0.25) * R)
gc.show_text(self.text)
# Draw the red curved axis.
gc.set_stroke_color(red)
w = 10
gc.set_line_width(w)
gc.arc(0, 0, R + 0.5 * w - 1, right_theta, nominal_theta)
gc.stroke_path()
# Draw the black curved axis.
w = 4
gc.set_line_width(w)
gc.set_stroke_color((0, 0, 0))
gc.arc(0, 0, R + 0.5 * w - 1, nominal_theta, left_theta)
gc.stroke_path()
# Draw the filled arc at the bottom.
gc.set_line_width(2)
gc.set_stroke_color((0, 0, 0))
gc.arc(0, 0, beta * R, math.radians(self.angle),
math.radians(180 - self.angle))
gc.stroke_path()
gc.set_fill_color((0, 0, 0, 0.25))
gc.arc(0, 0, beta * R, math.radians(self.angle),
math.radians(180 - self.angle))
gc.fill_path()
# Draw the needle.
percent = self.percent
# If percent exceeds max_percent, the needle is drawn at max_percent.
if percent > self.max_percent:
percent = self.max_percent
needle_theta = self._percent_to_theta(percent)
gc.rotate_ctm(needle_theta - 0.5 * math.pi)
self._draw_vertical_needle(gc)
#---------------------------------------------------------------------
# Private methods
#---------------------------------------------------------------------
def _draw_vertical_needle(self, gc):
""" Draw the needle of the meter, pointing straight up. """
beta = self._beta
R = self._axis_radius
end_y = beta * R
blob_y = R - 0.6 * self._outer_radial_margin
tip_y = R + 0.2 * self._outer_radial_margin
lw = 5
with gc:
gc.set_alpha(1)
gc.set_fill_color((0, 0, 0))
# Draw the needle from the bottom to the blob.
gc.set_line_width(lw)
gc.move_to(0, end_y)
gc.line_to(0, blob_y)
gc.stroke_path()
# Draw the thin part of the needle from the blob to the tip.
gc.move_to(lw, blob_y)
control_y = blob_y + 0.25 * (tip_y - blob_y)
gc.quad_curve_to( 0.2 * lw, control_y, 0, tip_y)
gc.quad_curve_to(-0.2 * lw, control_y, -lw, blob_y)
gc.line_to(lw, blob_y)
gc.fill_path()
# Draw the blob on the needle.
gc.arc(0, blob_y, 6.0, 0, 2 * math.pi)
gc.fill_path()
def _draw_rotated_label(self, gc, text, theta, radius):
tx, ty, tw, th = gc.get_text_extent(text)
rr = math.sqrt(radius ** 2 + (0.5 * tw) ** 2)
dtheta = math.atan2(0.5 * tw, radius)
text_theta = theta + dtheta
x = rr * math.cos(text_theta)
y = rr * math.sin(text_theta)
rot_theta = theta - 0.5 * math.pi
with gc:
gc.set_text_matrix(affine.affine_from_rotation(rot_theta))
gc.set_text_position(x, y)
gc.show_text(text)
def _percent_to_theta(self, percent):
""" Convert percent to the angle theta, in radians.
theta is the angle of the needle measured counterclockwise from
the horizontal (i.e. the traditional angle of polar coordinates).
"""
angle = (self.angle + (180.0 - 2 * self.angle) *
(self.max_percent - percent) / self.max_percent)
theta = math.radians(angle)
return theta
def _db_to_theta(self, db):
""" Convert db to the angle theta, in radians. """
percent = db_to_percent(db)
theta = self._percent_to_theta(percent)
return theta
| bsd-3-clause | -7,737,232,950,608,942,000 | 33.752443 | 81 | 0.489174 | false |
int19h/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/ctypes/test/test_as_parameter.py | 14 | 6919 | import unittest
from ctypes import *
from ctypes.test import need_symbol
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
try:
CALLBACK_FUNCTYPE = WINFUNCTYPE
except NameError:
# fake to enable this test on Linux
CALLBACK_FUNCTYPE = CFUNCTYPE
class POINT(Structure):
_fields_ = [("x", c_int), ("y", c_int)]
class BasicWrapTestCase(unittest.TestCase):
def wrap(self, param):
return param
@need_symbol('c_wchar')
def test_wchar_parm(self):
f = dll._testfunc_i_bhilfd
f.argtypes = [c_byte, c_wchar, c_int, c_long, c_float, c_double]
result = f(self.wrap(1), self.wrap("x"), self.wrap(3), self.wrap(4), self.wrap(5.0), self.wrap(6.0))
self.assertEqual(result, 139)
self.assertIs(type(result), int)
def test_pointers(self):
f = dll._testfunc_p_p
f.restype = POINTER(c_int)
f.argtypes = [POINTER(c_int)]
# This only works if the value c_int(42) passed to the
# function is still alive while the pointer (the result) is
# used.
v = c_int(42)
self.assertEqual(pointer(v).contents.value, 42)
result = f(self.wrap(pointer(v)))
self.assertEqual(type(result), POINTER(c_int))
self.assertEqual(result.contents.value, 42)
# This on works...
result = f(self.wrap(pointer(v)))
self.assertEqual(result.contents.value, v.value)
p = pointer(c_int(99))
result = f(self.wrap(p))
self.assertEqual(result.contents.value, 99)
def test_shorts(self):
f = dll._testfunc_callback_i_if
args = []
expected = [262144, 131072, 65536, 32768, 16384, 8192, 4096, 2048,
1024, 512, 256, 128, 64, 32, 16, 8, 4, 2, 1]
def callback(v):
args.append(v)
return v
CallBack = CFUNCTYPE(c_int, c_int)
cb = CallBack(callback)
f(self.wrap(2**18), self.wrap(cb))
self.assertEqual(args, expected)
################################################################
def test_callbacks(self):
f = dll._testfunc_callback_i_if
f.restype = c_int
f.argtypes = None
MyCallback = CFUNCTYPE(c_int, c_int)
def callback(value):
#print "called back with", value
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
# test with prototype
f.argtypes = [c_int, MyCallback]
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
AnotherCallback = CALLBACK_FUNCTYPE(c_int, c_int, c_int, c_int, c_int)
# check that the prototype works: we call f with wrong
# argument types
cb = AnotherCallback(callback)
self.assertRaises(ArgumentError, f, self.wrap(-10), self.wrap(cb))
def test_callbacks_2(self):
# Can also use simple datatypes as argument type specifiers
# for the callback function.
# In this case the call receives an instance of that type
f = dll._testfunc_callback_i_if
f.restype = c_int
MyCallback = CFUNCTYPE(c_int, c_int)
f.argtypes = [c_int, MyCallback]
def callback(value):
#print "called back with", value
self.assertEqual(type(value), int)
return value
cb = MyCallback(callback)
result = f(self.wrap(-10), self.wrap(cb))
self.assertEqual(result, -18)
def test_longlong_callbacks(self):
f = dll._testfunc_callback_q_qf
f.restype = c_longlong
MyCallback = CFUNCTYPE(c_longlong, c_longlong)
f.argtypes = [c_longlong, MyCallback]
def callback(value):
self.assertIsInstance(value, int)
return value & 0x7FFFFFFF
cb = MyCallback(callback)
self.assertEqual(13577625587, int(f(self.wrap(1000000000000), self.wrap(cb))))
def test_byval(self):
# without prototype
ptin = POINT(1, 2)
ptout = POINT()
# EXPORT int _testfunc_byval(point in, point *pout)
result = dll._testfunc_byval(ptin, byref(ptout))
got = result, ptout.x, ptout.y
expected = 3, 1, 2
self.assertEqual(got, expected)
# with prototype
ptin = POINT(101, 102)
ptout = POINT()
dll._testfunc_byval.argtypes = (POINT, POINTER(POINT))
dll._testfunc_byval.restype = c_int
result = dll._testfunc_byval(self.wrap(ptin), byref(ptout))
got = result, ptout.x, ptout.y
expected = 203, 101, 102
self.assertEqual(got, expected)
def test_struct_return_2H(self):
class S2H(Structure):
_fields_ = [("x", c_short),
("y", c_short)]
dll.ret_2h_func.restype = S2H
dll.ret_2h_func.argtypes = [S2H]
inp = S2H(99, 88)
s2h = dll.ret_2h_func(self.wrap(inp))
self.assertEqual((s2h.x, s2h.y), (99*2, 88*3))
# Test also that the original struct was unmodified (i.e. was passed by
# value)
self.assertEqual((inp.x, inp.y), (99, 88))
def test_struct_return_8H(self):
class S8I(Structure):
_fields_ = [("a", c_int),
("b", c_int),
("c", c_int),
("d", c_int),
("e", c_int),
("f", c_int),
("g", c_int),
("h", c_int)]
dll.ret_8i_func.restype = S8I
dll.ret_8i_func.argtypes = [S8I]
inp = S8I(9, 8, 7, 6, 5, 4, 3, 2)
s8i = dll.ret_8i_func(self.wrap(inp))
self.assertEqual((s8i.a, s8i.b, s8i.c, s8i.d, s8i.e, s8i.f, s8i.g, s8i.h),
(9*2, 8*3, 7*4, 6*5, 5*6, 4*7, 3*8, 2*9))
def test_recursive_as_param(self):
from ctypes import c_int
class A(object):
pass
a = A()
a._as_parameter_ = a
with self.assertRaises(RecursionError):
c_int.from_param(a)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamWrapper(object):
def __init__(self, param):
self._as_parameter_ = param
class AsParamWrapperTestCase(BasicWrapTestCase):
wrap = AsParamWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class AsParamPropertyWrapper(object):
def __init__(self, param):
self._param = param
def getParameter(self):
return self._param
_as_parameter_ = property(getParameter)
class AsParamPropertyWrapperTestCase(BasicWrapTestCase):
wrap = AsParamPropertyWrapper
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -6,932,741,810,779,203,000 | 29.082609 | 108 | 0.543142 | false |
chrisvans/roasttron | roasttron/config/common.py | 1 | 9425 | # -*- coding: utf-8 -*-
"""
Django settings for roasttron project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join, dirname
from configurations import Configuration, values
BASE_DIR = dirname(dirname(__file__))
class Common(Configuration):
# APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'avatar', # for user avatars
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'rest_framework', # django rest framework
)
# Apps specific for this project go here.
LOCAL_APPS = (
'apps.users', # custom users app
'apps.roast',
'apps.temppoint',
'apps.coffee',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# END APP CONFIGURATION
# MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# END MIDDLEWARE CONFIGURATION
# MIGRATIONS CONFIGURATION
MIGRATION_MODULES = {
'sites': 'contrib.sites.migrations'
}
# END MIGRATIONS CONFIGURATION
# DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(False)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
# END DEBUG
# SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = 'CHANGEME!!!'
# END SECRET CONFIGURATION
# FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
# END FIXTURE CONFIGURATION
# EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
# END EMAIL CONFIGURATION
# MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Chris VanSchyndel""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# END MANAGER CONFIGURATION
# DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres:///roasttron')
# END DATABASE CONFIGURATION
# CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify (used on heroku) is painful to install on windows.
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# END CACHING
# GENERAL CONFIGURATION
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# END GENERAL CONFIGURATION
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# END TEMPLATE CONFIGURATION
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
# URL Configuration
ROOT_URLCONF = 'urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'wsgi.application'
# End URL Configuration
# AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# END AUTHENTICATION CONFIGURATION
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# END Custom user app defaults
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# END SLUGLIFIER
# LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# END LOGGING CONFIGURATION
@classmethod
def post_setup(cls):
cls.DATABASES['default']['ATOMIC_REQUESTS'] = True
# Your common stuff: Below this line define 3rd party library settings
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticated',),
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',),
'PAGINATE_BY': 10,
}
| mit | -6,516,160,724,629,399,000 | 32.303887 | 102 | 0.656658 | false |
googlearchive/big-rig | app/src/thirdparty/telemetry/web_perf/metrics/rendering_stats.py | 9 | 12851 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
from operator import attrgetter
from telemetry.web_perf.metrics import rendering_frame
# These are LatencyInfo component names indicating the various components
# that the input event has travelled through.
# This is when the input event first reaches chrome.
UI_COMP_NAME = 'INPUT_EVENT_LATENCY_UI_COMPONENT'
# This is when the input event was originally created by OS.
ORIGINAL_COMP_NAME = 'INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT'
# This is when the input event was sent from browser to renderer.
BEGIN_COMP_NAME = 'INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT'
# This is when an input event is turned into a scroll update.
BEGIN_SCROLL_UPDATE_COMP_NAME = (
'LATENCY_BEGIN_SCROLL_LISTENER_UPDATE_MAIN_COMPONENT')
# This is when a scroll update is forwarded to the main thread.
FORWARD_SCROLL_UPDATE_COMP_NAME = (
'INPUT_EVENT_LATENCY_FORWARD_SCROLL_UPDATE_TO_MAIN_COMPONENT')
# This is when the input event has reached swap buffer.
END_COMP_NAME = 'INPUT_EVENT_GPU_SWAP_BUFFER_COMPONENT'
# Name for a main thread scroll update latency event.
SCROLL_UPDATE_EVENT_NAME = 'Latency::ScrollUpdate'
# Name for a gesture scroll update latency event.
GESTURE_SCROLL_UPDATE_EVENT_NAME = 'InputLatency::GestureScrollUpdate'
# These are keys used in the 'data' field dictionary located in
# BenchmarkInstrumentation::ImplThreadRenderingStats.
VISIBLE_CONTENT_DATA = 'visible_content_area'
APPROXIMATED_VISIBLE_CONTENT_DATA = 'approximated_visible_content_area'
CHECKERBOARDED_VISIBLE_CONTENT_DATA = 'checkerboarded_visible_content_area'
# These are keys used in the 'errors' field dictionary located in
# RenderingStats in this file.
APPROXIMATED_PIXEL_ERROR = 'approximated_pixel_percentages'
CHECKERBOARDED_PIXEL_ERROR = 'checkerboarded_pixel_percentages'
def GetLatencyEvents(process, timeline_range):
"""Get LatencyInfo trace events from the process's trace buffer that are
within the timeline_range.
Input events dump their LatencyInfo into trace buffer as async trace event
of name starting with "InputLatency". Non-input events with name starting
with "Latency". The trace event has a memeber 'data' containing its latency
history.
"""
latency_events = []
if not process:
return latency_events
for event in itertools.chain(
process.IterAllAsyncSlicesStartsWithName('InputLatency'),
process.IterAllAsyncSlicesStartsWithName('Latency')):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
for ss in event.sub_slices:
if 'data' in ss.args:
latency_events.append(ss)
return latency_events
def ComputeEventLatencies(input_events):
""" Compute input event latencies.
Input event latency is the time from when the input event is created to
when its resulted page is swap buffered.
Input event on differnt platforms uses different LatencyInfo component to
record its creation timestamp. We go through the following component list
to find the creation timestamp:
1. INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT -- when event is created in OS
2. INPUT_EVENT_LATENCY_UI_COMPONENT -- when event reaches Chrome
3. INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT -- when event reaches RenderWidget
If the latency starts with a
LATENCY_BEGIN_SCROLL_UPDATE_MAIN_COMPONENT component, then it is
classified as a scroll update instead of a normal input latency measure.
Returns:
A list sorted by increasing start time of latencies which are tuples of
(input_event_name, latency_in_ms).
"""
input_event_latencies = []
for event in input_events:
data = event.args['data']
if END_COMP_NAME in data:
end_time = data[END_COMP_NAME]['time']
if ORIGINAL_COMP_NAME in data:
start_time = data[ORIGINAL_COMP_NAME]['time']
elif UI_COMP_NAME in data:
start_time = data[UI_COMP_NAME]['time']
elif BEGIN_COMP_NAME in data:
start_time = data[BEGIN_COMP_NAME]['time']
elif BEGIN_SCROLL_UPDATE_COMP_NAME in data:
start_time = data[BEGIN_SCROLL_UPDATE_COMP_NAME]['time']
else:
raise ValueError('LatencyInfo has no begin component')
latency = (end_time - start_time) / 1000.0
input_event_latencies.append((start_time, event.name, latency))
input_event_latencies.sort()
return [(name, latency) for _, name, latency in input_event_latencies]
def HasRenderingStats(process):
""" Returns True if the process contains at least one
BenchmarkInstrumentation::*RenderingStats event with a frame.
"""
if not process:
return False
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::DisplayRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::ImplThreadRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
return False
def GetTimestampEventName(process):
""" Returns the name of the events used to count frame timestamps. """
if process.name == 'SurfaceFlinger':
return 'vsync_before'
event_name = 'BenchmarkInstrumentation::DisplayRenderingStats'
for event in process.IterAllSlicesOfName(event_name):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return event_name
return 'BenchmarkInstrumentation::ImplThreadRenderingStats'
class RenderingStats(object):
def __init__(self, renderer_process, browser_process, surface_flinger_process,
timeline_ranges):
"""
Utility class for extracting rendering statistics from the timeline (or
other loggin facilities), and providing them in a common format to classes
that compute benchmark metrics from this data.
Stats are lists of lists of numbers. The outer list stores one list per
timeline range.
All *_time values are measured in milliseconds.
"""
assert len(timeline_ranges) > 0
self.refresh_period = None
# Find the top level process with rendering stats (browser or renderer).
if surface_flinger_process:
timestamp_process = surface_flinger_process
self._GetRefreshPeriodFromSurfaceFlingerProcess(surface_flinger_process)
elif HasRenderingStats(browser_process):
timestamp_process = browser_process
else:
timestamp_process = renderer_process
timestamp_event_name = GetTimestampEventName(timestamp_process)
# A lookup from list names below to any errors or exceptions encountered
# in attempting to generate that list.
self.errors = {}
self.frame_timestamps = []
self.frame_times = []
self.approximated_pixel_percentages = []
self.checkerboarded_pixel_percentages = []
# End-to-end latency for input event - from when input event is
# generated to when the its resulted page is swap buffered.
self.input_event_latency = []
self.frame_queueing_durations = []
# Latency from when a scroll update is sent to the main thread until the
# resulting frame is swapped.
self.scroll_update_latency = []
# Latency for a GestureScrollUpdate input event.
self.gesture_scroll_update_latency = []
for timeline_range in timeline_ranges:
self.frame_timestamps.append([])
self.frame_times.append([])
self.approximated_pixel_percentages.append([])
self.checkerboarded_pixel_percentages.append([])
self.input_event_latency.append([])
self.scroll_update_latency.append([])
self.gesture_scroll_update_latency.append([])
if timeline_range.is_empty:
continue
self._InitFrameTimestampsFromTimeline(
timestamp_process, timestamp_event_name, timeline_range)
self._InitImplThreadRenderingStatsFromTimeline(
renderer_process, timeline_range)
self._InitInputLatencyStatsFromTimeline(
browser_process, renderer_process, timeline_range)
self._InitFrameQueueingDurationsFromTimeline(
renderer_process, timeline_range)
def _GetRefreshPeriodFromSurfaceFlingerProcess(self, surface_flinger_process):
for event in surface_flinger_process.IterAllEventsOfName('vsync_before'):
self.refresh_period = event.args['data']['refresh_period']
return
def _InitInputLatencyStatsFromTimeline(
self, browser_process, renderer_process, timeline_range):
latency_events = GetLatencyEvents(browser_process, timeline_range)
# Plugin input event's latency slice is generated in renderer process.
latency_events.extend(GetLatencyEvents(renderer_process, timeline_range))
event_latencies = ComputeEventLatencies(latency_events)
# Don't include scroll updates in the overall input latency measurement,
# because scroll updates can take much more time to process than other
# input events and would therefore add noise to overall latency numbers.
self.input_event_latency[-1] = [
latency for name, latency in event_latencies
if name != SCROLL_UPDATE_EVENT_NAME]
self.scroll_update_latency[-1] = [
latency for name, latency in event_latencies
if name == SCROLL_UPDATE_EVENT_NAME]
self.gesture_scroll_update_latency[-1] = [
latency for name, latency in event_latencies
if name == GESTURE_SCROLL_UPDATE_EVENT_NAME]
def _GatherEvents(self, event_name, process, timeline_range):
events = []
for event in process.IterAllSlicesOfName(event_name):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
if 'data' not in event.args:
continue
events.append(event)
events.sort(key=attrgetter('start'))
return events
def _AddFrameTimestamp(self, event):
frame_count = event.args['data']['frame_count']
if frame_count > 1:
raise ValueError('trace contains multi-frame render stats')
if frame_count == 1:
self.frame_timestamps[-1].append(
event.start)
if len(self.frame_timestamps[-1]) >= 2:
self.frame_times[-1].append(
self.frame_timestamps[-1][-1] - self.frame_timestamps[-1][-2])
def _InitFrameTimestampsFromTimeline(
self, process, timestamp_event_name, timeline_range):
for event in self._GatherEvents(
timestamp_event_name, process, timeline_range):
self._AddFrameTimestamp(event)
def _InitImplThreadRenderingStatsFromTimeline(self, process, timeline_range):
event_name = 'BenchmarkInstrumentation::ImplThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
data = event.args['data']
if VISIBLE_CONTENT_DATA not in data:
self.errors[APPROXIMATED_PIXEL_ERROR] = (
'Calculating approximated_pixel_percentages not possible because '
'visible_content_area was missing.')
self.errors[CHECKERBOARDED_PIXEL_ERROR] = (
'Calculating checkerboarded_pixel_percentages not possible because '
'visible_content_area was missing.')
return
visible_content_area = data[VISIBLE_CONTENT_DATA]
if visible_content_area == 0:
self.errors[APPROXIMATED_PIXEL_ERROR] = (
'Calculating approximated_pixel_percentages would have caused '
'a divide-by-zero')
self.errors[CHECKERBOARDED_PIXEL_ERROR] = (
'Calculating checkerboarded_pixel_percentages would have caused '
'a divide-by-zero')
return
if APPROXIMATED_VISIBLE_CONTENT_DATA in data:
self.approximated_pixel_percentages[-1].append(
round(float(data[APPROXIMATED_VISIBLE_CONTENT_DATA]) /
float(data[VISIBLE_CONTENT_DATA]) * 100.0, 3))
else:
self.errors[APPROXIMATED_PIXEL_ERROR] = (
'approximated_pixel_percentages was not recorded')
if CHECKERBOARDED_VISIBLE_CONTENT_DATA in data:
self.checkerboarded_pixel_percentages[-1].append(
round(float(data[CHECKERBOARDED_VISIBLE_CONTENT_DATA]) /
float(data[VISIBLE_CONTENT_DATA]) * 100.0, 3))
else:
self.errors[CHECKERBOARDED_PIXEL_ERROR] = (
'checkerboarded_pixel_percentages was not recorded')
def _InitFrameQueueingDurationsFromTimeline(self, process, timeline_range):
try:
events = rendering_frame.GetFrameEventsInsideRange(process,
timeline_range)
new_frame_queueing_durations = [e.queueing_duration for e in events]
self.frame_queueing_durations.append(new_frame_queueing_durations)
except rendering_frame.NoBeginFrameIdException:
self.errors['frame_queueing_durations'] = (
'Current chrome version does not support the queueing delay metric.')
| apache-2.0 | 1,759,412,441,875,406,600 | 42.415541 | 80 | 0.714108 | false |
V155/qutebrowser | scripts/dev/cleanup.py | 5 | 2177 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Script to clean up the mess made by Python/setuptools/PyInstaller."""
import os
import os.path
import sys
import glob
import shutil
import fnmatch
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
os.pardir))
from scripts import utils
recursive_lint = ('__pycache__', '*.pyc')
lint = ('build', 'dist', 'pkg/pkg', 'pkg/qutebrowser-*.pkg.tar.xz', 'pkg/src',
'pkg/qutebrowser', 'qutebrowser.egg-info', 'setuptools-*.egg',
'setuptools-*.zip', 'doc/qutebrowser.asciidoc', 'doc/*.html',
'doc/qutebrowser.1', 'README.html', 'qutebrowser/html/doc')
def remove(path):
"""Remove either a file or directory unless --dry-run is given."""
if os.path.isdir(path):
print("rm -r '{}'".format(path))
if '--dry-run' not in sys.argv:
shutil.rmtree(path)
else:
print("rm '{}'".format(path))
if '--dry-run' not in sys.argv:
os.remove(path)
def main():
"""Clean up lint in the current dir."""
utils.change_cwd()
for elem in lint:
for f in glob.glob(elem):
remove(f)
for root, _dirs, _files in os.walk(os.getcwd()):
path = os.path.basename(root)
if any(fnmatch.fnmatch(path, e) for e in recursive_lint):
remove(root)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,943,098,083,641,704,000 | 30.550725 | 78 | 0.648599 | false |
navoj/ecell4 | ecell4/egfrd/legacy/test/freeFunctions_test.py | 3 | 4521 | #!/usr/bin/env python
__author__ = 'Koichi Takahashi <[email protected]>'
__license__ = 'GPL'
__copyright__ = 'Copyright The Molecular Sciences Institute 2006-2007'
import unittest
import _greens_functions as mod
import math
import numpy
class FreeFunctionsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_int_p_theta_free_is_ip_theta_free(self):
import scipy.integrate
D = 1e-12
t = 1e-5
sigma = 1e-9
r0 = 1e-9
r = r0
kf = 1e-18
ip = mod.ip_theta_free(0.0, r, r0, t, D)
self.assertEqual(0.0, ip)
resolution = 10
for i in range(1, resolution):
theta = i * numpy.pi / resolution
ip = mod.ip_theta_free(theta, r, r0, t, D)
result = scipy.integrate.quad(mod.p_theta_free, 0.0, theta,
args=(r, r0, t, D))
np = result[0]
self.assertAlmostEqual(0.0, (np-ip)/ip)
def test_int_p_irr_is_p_survival_irr(self):
import scipy.integrate
D = 1e-12
t = 1e-5
sigma = 1e-9
r0 = 1e-9
kf = 1e-18
for i in range(1, 20):
S = mod.p_survival_irr(t, r0 * i, kf, D, sigma)
result = scipy.integrate.quad(mod.p_irr, sigma, sigma * 1e3,
args=(t, r0 * i, kf, D, sigma))
ip = result[0]
self.failIf(ip == 0)
self.assertAlmostEqual(0.0, (S-ip)/ip)
def test_int_g_bd_is_I_bd(self):
import scipy.integrate
import math
D = 1e-12
t = 1e-6
sigma = 1e-8
r0 = 1e-9
ibd = mod.I_bd(sigma, t, D)
#print ibd
result = scipy.integrate.quad(mod.g_bd, sigma,
sigma + 6 * math.sqrt(6 * D * t),
args=(sigma, t, D))
igbd = result[0]
#print igbd
self.failIf(ibd == 0)
self.assertAlmostEqual(0.0, (ibd-igbd)/ibd)
def test_int_g_bd_is_I_bd_smallt(self):
import scipy.integrate
D = 1e-12
t = 1e-20
sigma = 1e-8
r0 = 1e-9
ibd = mod.I_bd(sigma, t, D)
#print ibd
result = scipy.integrate.quad(mod.g_bd, sigma, sigma +
6 * math.sqrt(6 * D * t),
args=(sigma, t, D))
igbd = result[0]
#print igbd
self.failIf(ibd == 0)
self.assertAlmostEqual(0.0, (ibd-igbd)/ibd)
def test_I_bd_r_large_is_I_bd(self):
D = 1e-12
t = 1e-10
sigma = 1e-8
r0 = 1e-9
ibd = mod.I_bd(sigma, t, D)
ibdr = mod.I_bd_r(sigma + 6 * math.sqrt(6 * D * t), sigma, t, D)
#print ibd, ibdr
self.assertAlmostEqual(0.0, (ibd-ibdr)/ibd)
def test_int_g_bd_is_I_bd_r(self):
import scipy.integrate
import math
D = 1e-12
t = 1e-7
sigma = 1e-8
r_max = 6 * math.sqrt(6 * D * t)
ibd = mod.I_bd_r(sigma, sigma, t, D)
self.failIf(ibd != 0.0)
N = 20
for i in range(1, N):
r = sigma + r_max / N * i
ibd = mod.I_bd_r(r, sigma, t, D)
result = scipy.integrate.quad(mod.g_bd, sigma, r,
args=(sigma, t, D))
igbd = result[0]
self.failIf(ibd == 0)
self.assertAlmostEqual(0.0, (ibd-igbd)/ibd)
def test_drawR_gbd(self):
import scipy.integrate
import math
D = 1e-12
t = 1e-8
sigma = 1e-8
r = mod.drawR_gbd(0.0, sigma, t, D)
self.assertEqual(r, sigma)
r = mod.drawR_gbd(0.5, sigma, t, D)
self.failIf(r <= sigma)
#print 'rr', r
r = mod.drawR_gbd(1.0, sigma, t, D)
self.failIf(r <= sigma)
#print 'rr', r
def test_p_reaction_irr_t_inf(self):
D = 1e-12
t = numpy.inf
sigma = 1e-8
r0 = 1.1e-8
kf = 1e-16
kr = 10
kD = 4 * numpy.pi * sigma * D
alpha = (1 + (kr / kD)) * math.sqrt(D) / sigma
pr = mod.p_reaction_irr(t, r0, kf, D, sigma, alpha, kD)
prinf = mod.p_reaction_irr_t_inf(r0, kf, sigma, kD)
#print pr, prinf
self.assertAlmostEqual(pr, prinf)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 8,980,968,726,905,812,000 | 22.794737 | 73 | 0.463172 | false |
teltek/edx-platform | cms/djangoapps/contentstore/courseware_index.py | 2 | 27592 | """ Code to allow module store to interface with courseware index """
from __future__ import absolute_import
import logging
import re
from abc import ABCMeta, abstractmethod
from datetime import timedelta
from django.conf import settings
from django.urls import resolve
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from search.search_engine_base import SearchEngine
from six import add_metaclass
from contentstore.course_group_config import GroupConfiguration
from course_modes.models import CourseMode
from eventtracking import tracker
from openedx.core.lib.courses import course_image_url
from xmodule.annotator_mixin import html_to_text
from xmodule.library_tools import normalize_key_for_search
from xmodule.modulestore import ModuleStoreEnum
# REINDEX_AGE is the default amount of time that we look back for changes
# that might have happened. If we are provided with a time at which the
# indexing is triggered, then we know it is safe to only index items
# recently changed at that time. This is the time period that represents
# how far back from the trigger point to look back in order to index
REINDEX_AGE = timedelta(0, 60) # 60 seconds
log = logging.getLogger('edx.modulestore')
def strip_html_content_to_text(html_content):
""" Gets only the textual part for html content - useful for building text to be searched """
# Removing HTML-encoded non-breaking space characters
text_content = re.sub(r"(\s| |//)+", " ", html_to_text(html_content))
# Removing HTML CDATA
text_content = re.sub(r"<!\[CDATA\[.*\]\]>", "", text_content)
# Removing HTML comments
text_content = re.sub(r"<!--.*-->", "", text_content)
return text_content
def indexing_is_enabled():
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False)
class SearchIndexingError(Exception):
""" Indicates some error(s) occured during indexing """
def __init__(self, message, error_list):
super(SearchIndexingError, self).__init__(message)
self.error_list = error_list
@add_metaclass(ABCMeta)
class SearchIndexerBase(object):
"""
Base class to perform indexing for courseware or library search from different modulestores
"""
__metaclass__ = ABCMeta
INDEX_NAME = None
DOCUMENT_TYPE = None
ENABLE_INDEXING_KEY = None
INDEX_EVENT = {
'name': None,
'category': None
}
@classmethod
def indexing_is_enabled(cls):
"""
Checks to see if the indexing feature is enabled
"""
return settings.FEATURES.get(cls.ENABLE_INDEXING_KEY, False)
@classmethod
@abstractmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
@classmethod
@abstractmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
@classmethod
@abstractmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id
@classmethod
def remove_deleted_items(cls, searcher, structure_key, exclude_items):
"""
remove any item that is present in the search index that is not present in updated list of indexed items
as we find items we can shorten the set of items to keep
"""
response = searcher.search(
doc_type=cls.DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key),
exclude_dictionary={"id": list(exclude_items)}
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DOCUMENT_TYPE, result_ids)
@classmethod
def index(cls, modulestore, structure_key, triggered_at=None, reindex_age=REINDEX_AGE):
"""
Process course for indexing
Arguments:
modulestore - modulestore object to use for operations
structure_key (CourseKey|LibraryKey) - course or library identifier
triggered_at (datetime) - provides time at which indexing was triggered;
useful for index updates - only things changed recently from that date
(within REINDEX_AGE above ^^) will have their index updated, others skip
updating their index but are still walked through in order to identify
which items may need to be removed from the index
If None, then a full reindex takes place
Returns:
Number of items that have been added to the index
"""
error_list = []
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
structure_key = cls.normalize_structure_key(structure_key)
location_info = cls._get_location_info(structure_key)
# Wrap counter in dictionary - otherwise we seem to lose scope inside the embedded function `prepare_item_index`
indexed_count = {
"count": 0
}
# indexed_items is a list of all the items that we wish to remain in the
# index, whether or not we are planning to actually update their index.
# This is used in order to build a query to remove those items not in this
# list - those are ready to be destroyed
indexed_items = set()
# items_index is a list of all the items index dictionaries.
# it is used to collect all indexes and index them using bulk API,
# instead of per item index API call.
items_index = []
def get_item_location(item):
"""
Gets the version agnostic item location
"""
return item.location.version_agnostic().replace(branch=None)
def prepare_item_index(item, skip_index=False, groups_usage_info=None):
"""
Add this item to the items_index and indexed_items list
Arguments:
item - item to add to index, its children will be processed recursively
skip_index - simply walk the children in the tree, the content change is
older than the REINDEX_AGE window and would have been already indexed.
This should really only be passed from the recursive child calls when
this method has determined that it is safe to do so
Returns:
item_content_groups - content groups assigned to indexed item
"""
is_indexable = hasattr(item, "index_dictionary")
item_index_dictionary = item.index_dictionary() if is_indexable else None
# if it's not indexable and it does not have children, then ignore
if not item_index_dictionary and not item.has_children:
return
item_content_groups = None
if item.category == "split_test":
split_partition = item.get_selected_partition()
for split_test_child in item.get_children():
if split_partition:
for group in split_partition.groups:
group_id = unicode(group.id)
child_location = item.group_id_to_child.get(group_id, None)
if child_location == split_test_child.location:
groups_usage_info.update({
unicode(get_item_location(split_test_child)): [group_id],
})
for component in split_test_child.get_children():
groups_usage_info.update({
unicode(get_item_location(component)): [group_id]
})
if groups_usage_info:
item_location = get_item_location(item)
item_content_groups = groups_usage_info.get(unicode(item_location), None)
item_id = unicode(cls._id_modifier(item.scope_ids.usage_id))
indexed_items.add(item_id)
if item.has_children:
# determine if it's okay to skip adding the children herein based upon how recently any may have changed
skip_child_index = skip_index or \
(triggered_at is not None and (triggered_at - item.subtree_edited_on) > reindex_age)
children_groups_usage = []
for child_item in item.get_children():
if modulestore.has_published_version(child_item):
children_groups_usage.append(
prepare_item_index(
child_item,
skip_index=skip_child_index,
groups_usage_info=groups_usage_info
)
)
if None in children_groups_usage:
item_content_groups = None
if skip_index or not item_index_dictionary:
return
item_index = {}
# if it has something to add to the index, then add it
try:
item_index.update(location_info)
item_index.update(item_index_dictionary)
item_index['id'] = item_id
if item.start:
item_index['start_date'] = item.start
item_index['content_groups'] = item_content_groups if item_content_groups else None
item_index.update(cls.supplemental_fields(item))
items_index.append(item_index)
indexed_count["count"] += 1
return item_content_groups
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not fail on one item of many
log.warning('Could not index item: %s - %r', item.location, err)
error_list.append(_('Could not index item: {}').format(item.location))
try:
with modulestore.branch_setting(ModuleStoreEnum.RevisionOption.published_only):
structure = cls._fetch_top_level(modulestore, structure_key)
groups_usage_info = cls.fetch_group_usage(modulestore, structure)
# First perform any additional indexing from the structure object
cls.supplemental_index_information(modulestore, structure)
# Now index the content
for item in structure.get_children():
prepare_item_index(item, groups_usage_info=groups_usage_info)
searcher.index(cls.DOCUMENT_TYPE, items_index)
cls.remove_deleted_items(searcher, structure_key, indexed_items)
except Exception as err: # pylint: disable=broad-except
# broad exception so that index operation does not prevent the rest of the application from working
log.exception(
"Indexing error encountered, courseware index may be out of date %s - %r",
structure_key,
err
)
error_list.append(_('General indexing error occurred'))
if error_list:
raise SearchIndexingError('Error(s) present during indexing', error_list)
return indexed_count["count"]
@classmethod
def _do_reindex(cls, modulestore, structure_key):
"""
(Re)index all content within the given structure (course or library),
tracking the fact that a full reindex has taken place
"""
indexed_count = cls.index(modulestore, structure_key)
if indexed_count:
cls._track_index_request(cls.INDEX_EVENT['name'], cls.INDEX_EVENT['category'], indexed_count)
return indexed_count
@classmethod
def _track_index_request(cls, event_name, category, indexed_count):
"""Track content index requests.
Arguments:
event_name (str): Name of the event to be logged.
category (str): category of indexed items
indexed_count (int): number of indexed items
Returns:
None
"""
data = {
"indexed_count": indexed_count,
'category': category,
}
tracker.emit(
event_name,
data
)
@classmethod
def fetch_group_usage(cls, modulestore, structure): # pylint: disable=unused-argument
"""
Base implementation of fetch group usage on course/library.
"""
return None
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform any supplemental indexing given that the structure object has
already been loaded. Base implementation performs no operation.
Arguments:
modulestore - modulestore object used during the indexing operation
structure - structure object loaded during the indexing job
Returns:
None
"""
pass
@classmethod
def supplemental_fields(cls, item): # pylint: disable=unused-argument
"""
Any supplemental fields that get added to the index for the specified
item. Base implementation returns an empty dictionary
"""
return {}
class CoursewareSearchIndexer(SearchIndexerBase):
"""
Class to perform indexing for courseware search from different modulestores
"""
INDEX_NAME = "courseware_index"
DOCUMENT_TYPE = "courseware_content"
ENABLE_INDEXING_KEY = 'ENABLE_COURSEWARE_INDEX'
INDEX_EVENT = {
'name': 'edx.course.index.reindexed',
'category': 'courseware_index'
}
UNNAMED_MODULE_NAME = ugettext_lazy("(Unnamed)")
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return structure_key
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_course(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def do_course_reindex(cls, modulestore, course_key):
"""
(Re)index all content within the given course, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, course_key)
@classmethod
def fetch_group_usage(cls, modulestore, structure):
groups_usage_dict = {}
groups_usage_info = GroupConfiguration.get_partitions_usage_info(modulestore, structure).items()
groups_usage_info.extend(
GroupConfiguration.get_content_groups_items_usage_info(
modulestore,
structure
).items()
)
if groups_usage_info:
for name, group in groups_usage_info:
for module in group:
view, args, kwargs = resolve(module['url']) # pylint: disable=unused-variable
usage_key_string = unicode(kwargs['usage_key_string'])
if groups_usage_dict.get(usage_key_string, None):
groups_usage_dict[usage_key_string].append(name)
else:
groups_usage_dict[usage_key_string] = [name]
return groups_usage_dict
@classmethod
def supplemental_index_information(cls, modulestore, structure):
"""
Perform additional indexing from loaded structure object
"""
CourseAboutSearchIndexer.index_about_information(modulestore, structure)
@classmethod
def supplemental_fields(cls, item):
"""
Add location path to the item object
Once we've established the path of names, the first name is the course
name, and the next 3 names are the navigable path within the edx
application. Notice that we stop at that level because a full path to
deep children would be confusing.
"""
location_path = []
parent = item
while parent is not None:
path_component_name = parent.display_name
if not path_component_name:
path_component_name = unicode(cls.UNNAMED_MODULE_NAME)
location_path.append(path_component_name)
parent = parent.get_parent()
location_path.reverse()
return {
"course_name": location_path[0],
"location": location_path[1:4]
}
class LibrarySearchIndexer(SearchIndexerBase):
"""
Base class to perform indexing for library search from different modulestores
"""
INDEX_NAME = "library_index"
DOCUMENT_TYPE = "library_content"
ENABLE_INDEXING_KEY = 'ENABLE_LIBRARY_INDEX'
INDEX_EVENT = {
'name': 'edx.library.index.reindexed',
'category': 'library_index'
}
@classmethod
def normalize_structure_key(cls, structure_key):
""" Normalizes structure key for use in indexing """
return normalize_key_for_search(structure_key)
@classmethod
def _fetch_top_level(cls, modulestore, structure_key):
""" Fetch the item from the modulestore location """
return modulestore.get_library(structure_key, depth=None)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"library": unicode(normalized_structure_key)}
@classmethod
def _id_modifier(cls, usage_id):
""" Modifies usage_id to submit to index """
return usage_id.replace(library_key=(usage_id.library_key.replace(version_guid=None, branch=None)))
@classmethod
def do_library_reindex(cls, modulestore, library_key):
"""
(Re)index all content within the given library, tracking the fact that a full reindex has taken place
"""
return cls._do_reindex(modulestore, library_key)
class AboutInfo(object):
""" About info structure to contain
1) Property name to use
2) Where to add in the index (using flags above)
3) Where to source the properties value
"""
# Bitwise Flags for where to index the information
#
# ANALYSE - states that the property text contains content that we wish to be able to find matched within
# e.g. "joe" should yield a result for "I'd like to drink a cup of joe"
#
# PROPERTY - states that the property text should be a property of the indexed document, to be returned with the
# results: search matches will only be made on exact string matches
# e.g. "joe" will only match on "joe"
#
# We are using bitwise flags because one may want to add the property to EITHER or BOTH parts of the index
# e.g. university name is desired to be analysed, so that a search on "Oxford" will match
# property values "University of Oxford" and "Oxford Brookes University",
# but it is also a useful property, because within a (future) filtered search a user
# may have chosen to filter courses from "University of Oxford"
#
# see https://wiki.python.org/moin/BitwiseOperators for information about bitwise shift operator used below
#
ANALYSE = 1 << 0 # Add the information to the analysed content of the index
PROPERTY = 1 << 1 # Add the information as a property of the object being indexed (not analysed)
def __init__(self, property_name, index_flags, source_from):
self.property_name = property_name
self.index_flags = index_flags
self.source_from = source_from
def get_value(self, **kwargs):
""" get the value for this piece of information, using the correct source """
return self.source_from(self, **kwargs)
def from_about_dictionary(self, **kwargs):
""" gets the value from the kwargs provided 'about_dictionary' """
about_dictionary = kwargs.get('about_dictionary', None)
if not about_dictionary:
raise ValueError("Context dictionary does not contain expected argument 'about_dictionary'")
return about_dictionary.get(self.property_name, None)
def from_course_property(self, **kwargs):
""" gets the value from the kwargs provided 'course' """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return getattr(course, self.property_name, None)
def from_course_mode(self, **kwargs):
""" fetches the available course modes from the CourseMode model """
course = kwargs.get('course', None)
if not course:
raise ValueError("Context dictionary does not contain expected argument 'course'")
return [mode.slug for mode in CourseMode.modes_for_course(course.id)]
# Source location options - either from the course or the about info
FROM_ABOUT_INFO = from_about_dictionary
FROM_COURSE_PROPERTY = from_course_property
FROM_COURSE_MODE = from_course_mode
class CourseAboutSearchIndexer(object):
"""
Class to perform indexing of about information from course object
"""
DISCOVERY_DOCUMENT_TYPE = "course_info"
INDEX_NAME = CoursewareSearchIndexer.INDEX_NAME
# List of properties to add to the index - each item in the list is an instance of AboutInfo object
ABOUT_INFORMATION_TO_INCLUDE = [
AboutInfo("advertised_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("announcement", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("effort", AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("display_name", AboutInfo.ANALYSE, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("overview", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("title", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("university", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("number", AboutInfo.ANALYSE | AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("short_description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("description", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("key_dates", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("video", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_short", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("course_staff_extended", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("requirements", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("syllabus", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("textbook", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("faq", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("more_info", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("ocw_links", AboutInfo.ANALYSE, AboutInfo.FROM_ABOUT_INFO),
AboutInfo("enrollment_start", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("enrollment_end", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("org", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
AboutInfo("modes", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_MODE),
AboutInfo("language", AboutInfo.PROPERTY, AboutInfo.FROM_COURSE_PROPERTY),
]
@classmethod
def index_about_information(cls, modulestore, course):
"""
Add the given course to the course discovery index
Arguments:
modulestore - modulestore object to use for operations
course - course object from which to take properties, locate about information
"""
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
course_id = unicode(course.id)
course_info = {
'id': course_id,
'course': course_id,
'content': {},
'image_url': course_image_url(course),
}
# load data for all of the 'about' modules for this course into a dictionary
about_dictionary = {
item.location.block_id: item.data
for item in modulestore.get_items(course.id, qualifiers={"category": "about"})
}
about_context = {
"course": course,
"about_dictionary": about_dictionary,
}
for about_information in cls.ABOUT_INFORMATION_TO_INCLUDE:
# Broad exception handler so that a single bad property does not scupper the collection of others
try:
section_content = about_information.get_value(**about_context)
except: # pylint: disable=bare-except
section_content = None
log.warning(
"Course discovery could not collect property %s for course %s",
about_information.property_name,
course_id,
exc_info=True,
)
if section_content:
if about_information.index_flags & AboutInfo.ANALYSE:
analyse_content = section_content
if isinstance(section_content, basestring):
analyse_content = strip_html_content_to_text(section_content)
course_info['content'][about_information.property_name] = analyse_content
if about_information.index_flags & AboutInfo.PROPERTY:
course_info[about_information.property_name] = section_content
# Broad exception handler to protect around and report problems with indexing
try:
searcher.index(cls.DISCOVERY_DOCUMENT_TYPE, [course_info])
except:
log.exception(
"Course discovery indexing error encountered, course discovery index may be out of date %s",
course_id,
)
raise
log.debug(
"Successfully added %s course to the course discovery index",
course_id
)
@classmethod
def _get_location_info(cls, normalized_structure_key):
""" Builds location info dictionary """
return {"course": unicode(normalized_structure_key), "org": normalized_structure_key.org}
@classmethod
def remove_deleted_items(cls, structure_key):
""" Remove item from Course About Search_index """
searcher = SearchEngine.get_search_engine(cls.INDEX_NAME)
if not searcher:
return
response = searcher.search(
doc_type=cls.DISCOVERY_DOCUMENT_TYPE,
field_dictionary=cls._get_location_info(structure_key)
)
result_ids = [result["data"]["id"] for result in response["results"]]
searcher.remove(cls.DISCOVERY_DOCUMENT_TYPE, result_ids)
| agpl-3.0 | -4,244,961,523,196,492,300 | 40.616893 | 120 | 0.629422 | false |
mgerhardy/fips | mod/log.py | 3 | 2092 | """logging functions"""
import sys
# log colors
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
DEF = '\033[39m'
#-------------------------------------------------------------------------------
def error(msg, fatal=True) :
"""
Print error message and exit with error code 10
unless 'fatal' is False.
:param msg: string message
:param fatal: exit program with error code 10 if True (default is true)
"""
print('{}[ERROR]{} {}'.format(RED, DEF, msg))
if fatal :
sys.exit(10)
#-------------------------------------------------------------------------------
def warn(msg) :
"""print a warning message"""
print('{}[WARNING]{} {}'.format(YELLOW, DEF, msg))
#-------------------------------------------------------------------------------
def ok(item, status) :
"""print a green 'ok' message
:param item: first part of message
:param status: status (colored green)
"""
print('{}:\t{}{}{}'.format(item, GREEN, status, DEF))
#-------------------------------------------------------------------------------
def failed(item, status) :
"""print a red 'fail' message
:param item: first part of message
:param status: status (colored red)
"""
print('{}:\t{}{}{}'.format(item, RED, status, DEF))
#-------------------------------------------------------------------------------
def optional(item, status) :
"""print a yellow 'optional' message
:param item: first part of message
:param status: status (colored red)
"""
print('{}:\t{}{}{}'.format(item, YELLOW, status, DEF))
#-------------------------------------------------------------------------------
def info(msg) :
"""print a normal log message
:param msg: message
"""
print(msg)
#-------------------------------------------------------------------------------
def colored(color, msg) :
"""print a colored log message
:param color: color escape sequence (e.g. log.YELLOW)
:param msg: text message
"""
print('{}{}{}'.format(color, msg, DEF))
| mit | -3,748,004,492,298,908,000 | 28.055556 | 80 | 0.425908 | false |
rcbops/nova-buildpackage | nova/api/openstack/wsgi.py | 1 | 37515 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
from xml.dom import minidom
from xml.parsers import expat
from lxml import etree
import webob
from webob import exc
from nova import exception
from nova import log as logging
from nova import utils
from nova import wsgi
XMLNS_V10 = 'http://docs.rackspacecloud.com/servers/api/v1.0'
XMLNS_V11 = 'http://docs.openstack.org/compute/api/v1.1'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
LOG = logging.getLogger('nova.api.openstack.wsgi')
# The vendor content types should serialize identically to the non-vendor
# content types. So to avoid littering the code with both options, we
# map the vendor to the other when looking up the type
_CONTENT_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'application/json',
'application/vnd.openstack.compute+xml': 'application/xml',
}
SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
'application/xml',
'application/vnd.openstack.compute+xml',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
'application/vnd.openstack.compute+xml': 'xml',
'application/xml': 'xml',
'application/atom+xml': 'atom',
}
class Request(webob.Request):
"""Add some Openstack API-specific logic to the base webob.Request."""
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in SUPPORTED_CONTENT_TYPES:
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(SUPPORTED_CONTENT_TYPES)
self.environ['nova.best_content_type'] = content_type or \
'application/json'
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if not "Content-Type" in self.headers:
return None
allowed_types = SUPPORTED_CONTENT_TYPES
content_type = self.content_type
if content_type not in allowed_types:
raise exception.InvalidContentType(content_type=content_type)
return content_type
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization"""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return utils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class XMLDeserializer(TextDeserializer):
def __init__(self, metadata=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
"""
super(XMLDeserializer, self).__init__()
self.metadata = metadata or {}
def _from_xml(self, datastring):
plurals = set(self.metadata.get('plurals', {}))
try:
node = minidom.parseString(datastring).childNodes[0]
return {node.nodeName: self._from_xml_node(node, plurals)}
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
def _from_xml_node(self, node, listnames):
"""Convert a minidom node to a simple Python type.
:param listnames: list of XML node names whose subnodes should
be considered list items.
"""
if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3:
return node.childNodes[0].nodeValue
elif node.nodeName in listnames:
return [self._from_xml_node(n, listnames) for n in node.childNodes]
else:
result = dict()
for attr in node.attributes.keys():
result[attr] = node.attributes[attr].nodeValue
for child in node.childNodes:
if child.nodeType != node.TEXT_NODE:
result[child.nodeName] = self._from_xml_node(child,
listnames)
return result
def find_first_child_named(self, parent, name):
"""Search a nodes children for the first child with a given name"""
for node in parent.childNodes:
if node.nodeName == name:
return node
return None
def find_children_named(self, parent, name):
"""Return all of a nodes children who have the given name"""
for node in parent.childNodes:
if node.nodeName == name:
yield node
def extract_text(self, node):
"""Get the text field contained by the given node"""
if len(node.childNodes) == 1:
child = node.childNodes[0]
if child.nodeType == child.TEXT_NODE:
return child.nodeValue
return ""
def default(self, datastring):
return {'body': self._from_xml(datastring)}
class MetadataXMLDeserializer(XMLDeserializer):
def extract_metadata(self, metadata_node):
"""Marshal the metadata attribute of a parsed request"""
metadata = {}
if metadata_node is not None:
for meta_node in self.find_children_named(metadata_node, "meta"):
key = meta_node.getAttribute("key")
metadata[key] = self.extract_text(meta_node)
return metadata
class DictSerializer(ActionDispatcher):
"""Default request body serialization"""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization"""
def default(self, data):
return utils.dumps(data)
class XMLDictSerializer(DictSerializer):
def __init__(self, metadata=None, xmlns=None):
"""
:param metadata: information needed to deserialize xml into
a dictionary.
:param xmlns: XML namespace to include with serialized xml
"""
super(XMLDictSerializer, self).__init__()
self.metadata = metadata or {}
self.xmlns = xmlns
def default(self, data):
# We expect data to contain a single key which is the XML root.
root_key = data.keys()[0]
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
return self.to_xml_string(node)
def to_xml_string(self, node, has_atom=False):
self._add_xmlns(node, has_atom)
return node.toxml('UTF-8')
#NOTE (ameade): the has_atom should be removed after all of the
# xml serializers and view builders have been updated to the current
# spec that required all responses include the xmlns:atom, the has_atom
# flag is to prevent current tests from breaking
def _add_xmlns(self, node, has_atom=False):
if self.xmlns is not None:
node.setAttribute('xmlns', self.xmlns)
if has_atom:
node.setAttribute('xmlns:atom', "http://www.w3.org/2005/Atom")
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
# Set the xml namespace if one is specified
# TODO(justinsb): We could also use prefixes on the keys
xmlns = metadata.get('xmlns', None)
if xmlns:
result.setAttribute('xmlns', xmlns)
#TODO(bcwaldon): accomplish this without a type-check
if isinstance(data, list):
collections = metadata.get('list_collections', {})
if nodename in collections:
metadata = collections[nodename]
for item in data:
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(item))
result.appendChild(node)
return result
singular = metadata.get('plurals', {}).get(nodename, None)
if singular is None:
if nodename.endswith('s'):
singular = nodename[:-1]
else:
singular = 'item'
for item in data:
node = self._to_xml_node(doc, metadata, singular, item)
result.appendChild(node)
#TODO(bcwaldon): accomplish this without a type-check
elif isinstance(data, dict):
collections = metadata.get('dict_collections', {})
if nodename in collections:
metadata = collections[nodename]
for k, v in data.items():
node = doc.createElement(metadata['item_name'])
node.setAttribute(metadata['item_key'], str(k))
text = doc.createTextNode(str(v))
node.appendChild(text)
result.appendChild(node)
return result
attrs = metadata.get('attributes', {}).get(nodename, {})
for k, v in data.items():
if k in attrs:
result.setAttribute(k, str(v))
else:
node = self._to_xml_node(doc, metadata, k, v)
result.appendChild(node)
else:
# Type is atom
node = doc.createTextNode(str(data))
result.appendChild(node)
return result
def _create_link_nodes(self, xml_doc, links):
link_nodes = []
for link in links:
link_node = xml_doc.createElement('atom:link')
link_node.setAttribute('rel', link['rel'])
link_node.setAttribute('href', link['href'])
if 'type' in link:
link_node.setAttribute('type', link['type'])
link_nodes.append(link_node)
return link_nodes
def _to_xml(self, root):
"""Convert the xml object to an xml string."""
return etree.tostring(root, encoding='UTF-8', xml_declaration=True)
@utils.deprecated("The lazy serialization middleware is no longer necessary.")
class LazySerializationMiddleware(wsgi.Middleware):
"""Lazy serialization middleware.
Provided only for backwards compatibility with existing
api-paste.ini files. This middleware will be removed in future
versions of nova.
"""
pass
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = value
response.headers['Content-Type'] = content_type
if self.obj is not None:
response.body = serializer.serialize(self.obj)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = utils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0]
def action_peek_xml(body):
"""Determine action to invoke."""
dom = minidom.parseString(body)
action_node = dom.childNodes[0]
return action_node.tagName
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.NotAuthorized):
msg = unicode(ex_value)
raise Fault(webob.exc.HTTPForbidden(explanation=msg))
elif isinstance(ex_value, TypeError):
LOG.exception(ex_value)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_("Fault thrown: %s"), unicode(ex_value))
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_("HTTP exception thrown: %s"), unicode(ex_value))
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
def __init__(self, controller, action_peek=None, **deserializers):
"""
:param controller: object that implement methods created by routes lib
:param action_peek: dictionary of routines for peeking into an action
request body to determine the desired action
"""
self.controller = controller
default_deserializers = dict(xml=XMLDeserializer,
json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(xml=XMLDictSerializer,
json=JSONDictSerializer)
self.action_peek = dict(xml=action_peek_xml,
json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
return None, ''
if not content_type:
LOG.debug(_("No Content-Type provided in request"))
return None, ''
if len(request.body) <= 0:
LOG.debug(_("Empty body provided in request"))
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = _MEDIA_TYPE_MAP.get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = gen.next()
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Now, deserialize the request body...
try:
if content_type:
contents = self.deserialize(meth, content_type, body)
else:
contents = {}
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
if ('nova.context' in request.environ and project_id
and project_id != request.environ['nova.context'].project_id):
msg = _("Malformed request url")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
try:
msg_dict = dict(url=request.url, status=response.status_int)
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
except AttributeError, e:
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
LOG.info(msg)
return response
def get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError as ex:
if (not self.wsgi_actions or
action not in ['action', 'create', 'delete']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = _MEDIA_TYPE_MAP.get(content_type)
action_name = self.action_peek[mtype](body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
return method(req=request, **action_args)
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
class Controller(object):
"""Default controller."""
__metaclass__ = ControllerMetaclass
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "resizeNotAllowed",
404: "itemNotFound",
405: "badMethod",
409: "inProgress", # FIXME(comstud): This doesn't seem right
413: "overLimit",
415: "badMediaType",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
fault_data = {
fault_name: {
'code': code,
'message': self.wrapped_exc.explanation}}
if code == 413:
retry = self.wrapped_exc.headers['Retry-After']
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {fault_name: 'code'}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
content_type = req.best_match_content_type()
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class OverLimitFault(webob.exc.HTTPException):
"""
Rate-limited request response.
"""
def __init__(self, message, details, retry_time):
"""
Initialize new `OverLimitFault` with relevant information.
"""
self.wrapped_exc = webob.exc.HTTPRequestEntityTooLarge()
self.content = {
"overLimitFault": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
},
}
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""
Return the wrapped exception with a serialized body conforming to our
error format.
"""
content_type = request.best_match_content_type()
metadata = {"attributes": {"overLimitFault": "code"}}
xml_serializer = XMLDictSerializer(metadata, XMLNS_V11)
serializer = {
'application/xml': xml_serializer,
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
return self.wrapped_exc
| apache-2.0 | 7,713,902,429,385,710,000 | 33.385885 | 79 | 0.594962 | false |
vc3-project/vc3-info-service | vc3infoservice/infoservice.py | 1 | 29417 | #! /usr/bin/env python
__author__ = "John Hover, Jose Caballero"
__copyright__ = "2017 John Hover"
__credits__ = []
__license__ = "GPL"
__version__ = "0.9.1"
__maintainer__ = "John Hover"
__email__ = "[email protected]"
__status__ = "Production"
import cherrypy
import logging
import logging.handlers
import os
import platform
import pwd
import random
import json
import string
import socket
import sys
import threading
import time
import traceback
from optparse import OptionParser
from ConfigParser import ConfigParser
from vc3infoservice.core import InfoEntityExistsException, InfoEntityMissingException
# Since script is in package "vc3" we can know what to add to path for
# running directly during development
(libpath,tail) = os.path.split(sys.path[0])
sys.path.append(libpath)
import pluginmanager as pm
class InfoHandler(object):
'''
Handles low-level operations and persistence of information
from service using back-end plugin.
Inbound arguments of InfoServiceAPI are JSON strings. These are converted to Python
primitive objects for persistence plugin calls.
inbound entities are expected to be in the form of JSON with entity.name indices, e.g.
'{ "name" : { "name" : "namevalue", "key1" : "val1" }}'
returned entities are in the form of unindexed entity JSON dictionaries , e.g.
'{ "name" : "namevalue", "key1" : "val1" }'
'''
def __init__(self, config):
self.log = logging.getLogger()
self.log.debug("Initializing Info Handler...")
self.config = config
# Get persistence plugin
pluginname = config.get('persistence','plugin')
psect = "plugin-%s" % pluginname.lower()
self.log.debug("Creating persistence plugin...")
self.persist = pm.getplugin(parent=self,
paths=['vc3infoservice', 'plugins', 'persist'],
name=pluginname,
config=self.config,
section=psect)
self.log.debug("Done initializing InfoHandler")
################################################################################
# Entity-oriented methods
################################################################################
def storeentity(self, key, entityname, edoc):
'''
Stores contents of JSON doc string by entity level. If entity already exists, does not
do so.
Entity doc:
{"username": {"last": "Last",
"name": "username",
"acl": null,
}
}
'''
self.log.debug("input JSON doc to merge is %s" % edoc)
entitydict = json.loads(edoc)
self.persist.lock.acquire()
try:
currentdoc = self.persist.getdocument(key)
try:
existingentity = currentdoc[entityname]
cherrypy.response.status = 405
return "Attempt to create (POST) already-existing Entity. Name: %s. " % entityname
except KeyError:
self.log.debug("No existing entity %s. As expected..." % entityname)
pass
self.log.debug("Merging entity with existing document.")
newdoc = self.merge( entitydict, currentdoc)
self.persist.storedocument(key, newdoc)
self.log.debug("Successfully stored entity.")
finally:
self.persist.lock.release()
def mergeentity(self, key, entityname, edoc):
'''
merges contents of (update-only) JSON doc string by entity level.
Within entity, uses merge that replaces attributes with new values.
'''
self.log.debug("input entity doc to merge is %s" % edoc)
# e.g. {"SPT": {"allocations": ["lincolnb.uchicago-midway"]}}
self.log.debug("input JSON doc to merge is type %s" % type(edoc))
entitydict = json.loads(edoc)
self.persist.lock.acquire()
try:
currentdoc = self.persist.getdocument(key)
existingentity = currentdoc[entityname]
newentity = entitydict[entityname]
#self.log.debug("Existing entity: %s" % existingentity)
#self.log.debug("New entity:" % newentity)
#self.log.debug("Merging newentity with existing entity.")
self.entitymerge(newentity, existingentity)
#self.log.debug("Resulting existing: %s" % existingentity)
self.persist.storedocument(key, currentdoc)
self.log.debug("Successfully stored entity.")
except KeyError:
cherrypy.response.status = 405
return "Attempt to update (PUT) non-existent Entity. Name: %s. " % entityname
finally:
self.persist.lock.release()
def entitymerge(self, src, dest):
'''
Merges bare src entity into dest entity, unconditionally replacing *attribute*
values at entity-attribute level. Intended to be used in-place, so dest is not returned.
{ u'allocations': [u'lincolnb.uchicago-midway']}
+
{ u'allocations': [],
u'name': u'SPT',
u'blueprints': [] }
=
{u'allocations': [u'lincolnb.uchicago-midway'],
u'name': u'SPT',
u'blueprints': []}
'''
self.log.debug("Handling merging %s into %s " % (src, dest))
for attributename in src.keys():
dest[attributename] = src[attributename]
def getentity(self, key, entityname):
'''
Gets JSON representation of entity.
{ 'name' : <entityname>',
'key1' : '<val1>'
}
'''
currentdoc = self.persist.getdocument(key)
self.log.debug("Current doc for %s is %s" % (key, currentdoc))
try:
ed = currentdoc[entityname]
je = json.dumps(ed)
self.log.debug("JSON entity is %s" % str(je))
return je
except KeyError:
cherrypy.response.status = 405
return "Attempt to GET non-existent Entity. Name: %s. " % entityname
#raise InfoEntityMissingException("Attempt to update or get a non-existent Entity.")
def deleteentity(self, key, entityname):
'''
deletes relevant entity, if it exists.
'''
self.persist.lock.acquire()
try:
doc = self.persist.getdocument(key)
self.log.debug("Deleting entity %s in key %s" % (entityname, key))
doc.pop(entityname)
self.persist.storedocument(key, doc)
self.log.debug("Successfully stored.")
except KeyError:
cherrypy.response.status = 405
return "Entity %s not found, so can't delete it." % entityname
finally:
self.persist.lock.release()
################################################################################
# Category document-oriented methods
################################################################################
def storedocument(self, key, doc):
'''
Overwrites existing document with new.
'''
self.log.debug("Storing document for key %s" % key)
pd = json.loads(doc)
self.persist.lock.acquire()
try:
self.persist.storedocument(key, pd)
finally:
self.persist.lock.release()
def mergedocument(self, key, doc):
self.log.debug("Merging document for key %s" % key)
self.persist.lock.acquire()
try:
dcurrent = self.persist.getdocument(key)
#pd = json.loads(doc)
self.persist.storedocument(key, pd)
finally:
self.persist.lock.release()
self.persist.lock.acquire()
try:
dcurrent = self.persist.getdocument(key)
self.log.debug("current retrieved doc is type %s" % type(dcurrent))
md = json.loads(doc)
self.log.debug("doc to merge is type %s" % type(md))
newdoc = self.merge( md, dcurrent)
self.log.debug("Merging document for key %s" % key)
self.persist.storedocument(key, newdoc)
finally:
self.persist.lock.release()
def deletedocument(self, key):
self.log.debug("Deleting document for key %s" % key)
#pd = json.loads(doc)
self.persist.lock.acquire()
emptydict = {}
try:
self.persist.storedocument(key, emptydict)
finally:
self.persist.lock.release()
def getdocument(self, key):
'''
Gets JSON representation of document.
'''
pd = self.persist.getdocument(key)
jd = json.dumps(pd)
self.log.debug("d is type %s" % type(jd))
return jd
################################################################################
# Utility methods
################################################################################
# def _getpythondocument(self, key):
# '''
# Gets Python object.
# '''
# d = self.persist.getdocument(key)
# self.log.debug("d is type %s with value %s" % (type(d), d))
# return d
# def _storepythondocument(self, key, pd):
# self.log.debug("Storing document for key %s" % key)
# self.persist.storedocument(key, pd)
# def deletesubtree(self, path):
# lst = path.split('.')
# try:
# self.persist.deletesubtree(lst)
# except IndexError:
# raise Exception('path should have more than one key')
def merge(self, src, dest):
'''
Merges python primitive object src into dest and returns merged result.
Lists are appended.
Dictionaries are merged.
Primitive values are overwritten.
NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen
https://stackoverflow.com/questions/7204805/dictionaries-of-dictionaries-merge/15836901
'''
key = None
# ## debug output
# sys.stderr.write("DEBUG: %s to %s\n" %(b,a))
self.log.debug("Handling merging %s into %s " % (src, dest))
try:
if dest is None or isinstance(dest, str) or isinstance(dest, unicode) or isinstance(dest, int) \
or isinstance(dest, long) or isinstance(dest, float):
# border case for first run or if a is a primitive
dest = src
elif isinstance(dest, list):
# lists can be only appended
if isinstance(src, list):
# merge lists
for item in src:
if item not in dest:
dest.append(item)
#dest.extend(src)
else:
self.log.error("Refusing to add non-list %s to list %s" % (src, dest))
# append to list
#dest.append(src)
elif isinstance(dest, dict):
# dicts must be merged
if isinstance(src, dict):
for key in src:
if key in dest:
dest[key] = self.merge(src[key], dest[key])
else:
dest[key] = src[key]
elif src is None:
dest = None
else:
self.log.warning("Cannot merge non-dict %s into dict %s" % (src, dest))
else:
raise Exception('NOT IMPLEMENTED "%s" into "%s"' % (src, dest))
except TypeError, e:
raise Exception('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, src, dest))
return dest
##################################################################################
# Infrastructural methods
##################################################################################
def getpairing(self, key, pairingcode):
'''
Pull pairing document, check each entry to see if <entry>.pairingcode = pairingcode.
If so, and cert and key are not none, prepare to return them, delete entry, return Pairing
'''
failmsg="Invalid pairing code or not satisfied yet. Try in 30 seconds."
prd = None
pd = self._getpythondocument(key)
self.log.debug("Received dict: %s" % pd)
try:
self.log.debug("Entries are %s" % pd[key] )
for p in pd[key].keys():
self.log.debug("Checking entry %s for pairingcode..." % p)
if pd[key][p]['pairingcode'] == pairingcode:
self.log.debug("Found matching entry %s value %s" % (p, pd[key][p]))
if pd[key][p]['cert'] is not None:
prd = json.dumps(pd[key][p])
try:
self.log.debug("Attempting to delete entry %s from pairing." % p)
pd[key].pop(p, None)
self.log.debug("Deleted entry %s from pairing. Re-storing.." % p)
except KeyError:
self.log.warning("Failed to delete entry %s from pairing." % p)
self._storepythondocument(key, pd)
else:
self.log.info("Certificate for requested pairing not generated yet.")
self.log.debug("Returning pairing entry JSON %s" % prd)
if prd is None:
cherrypy.response.headers["Status"] = "404"
return failmsg
return prd
except KeyError:
cherrypy.response.headers["Status"] = "404"
return failmsg
def getCAChain(self):
'''
'''
pass
class InfoRoot(object):
@cherrypy.expose
def index(self):
return "Nothing to see. Go to /info"
@cherrypy.expose
def generate(self, length=8):
return ''.join(random.sample(string.hexdigits, int(length)))
class InfoServiceAPI(object):
'''
Data at this level is assumed to be JSON text/plain.
'''
exposed = True
def __init__(self, config):
self.log = logging.getLogger()
self.log.debug("Initting InfoServiceAPI...")
self.infohandler = InfoHandler(config)
self.log.debug("InfoServiceAPI init done." )
def GET(self, key, pairingcode=None, entityname=None):
if pairingcode is None and entityname is None:
d = self.infohandler.getdocument(key)
self.log.debug("Document retrieved for key %s " % key)
return d
elif pairingcode is None:
e = self.infohandler.getentity(key, entityname)
self.log.debug("Entity retrieved for key %s and name %s" % (key,entityname))
return e
else:
self.log.debug("Handling pairing retrieval")
d = self.infohandler.getpairing(key, pairingcode)
self.log.debug("Pairing retrieved for code %s with val %s" % (pairingcode,d))
return d
@cherrypy.tools.accept(media='text/plain')
def PUT(self, key, entityname=None, data=None):
rtext = "Something went wrong..."
if entityname is None:
self.log.debug("Storing document %s" % data)
self.infohandler.mergedocument(key, data)
self.log.debug("Document stored for key %s" % key)
rtext= "Document stored for key %s\n" % key
else:
self.log.debug("Storing key %s entityname %s " % (key, entityname))
self.infohandler.mergeentity(key, entityname, data)
rtext= "Entity %s stored in key %s\n" % (entityname, key )
return rtext
def POST(self, key, entityname=None, data=None):
rtext = "Something went wrong..."
if entityname is None:
self.log.debug("Storing document %s" % data)
self.infohandler.storedocument(key, data)
self.log.debug("Document stored for key %s" % key)
rtext= "Document stored for key %s\n" % key
else:
self.log.debug("Storing key %s entityname %s " % (key, entityname))
self.infohandler.storeentity(key, entityname, data)
rtext= "Entity %s stored in key %s\n" % (entityname, key )
return rtext
def DELETE(self, key, entityname ):
'''
Deletes specified entity from <key> document.
'''
self.infohandler.deleteentity(key, entityname)
rtext= "Entity %s deleted in key %s\n" % (entityname, key )
return rtext
def stripquotes(self,s):
rs = s.replace("'","")
return rs
class InfoService(object):
def __init__(self, config):
self.log = logging.getLogger()
self.log.debug('InfoService class init...')
self.config = config
self.certfile = os.path.expanduser(config.get('netcomm','certfile'))
self.keyfile = os.path.expanduser(config.get('netcomm', 'keyfile'))
self.chainfile = os.path.expanduser(config.get('netcomm','chainfile'))
self.httpport = int(config.get('netcomm','httpport'))
self.httpsport = int(config.get('netcomm','httpsport'))
self.sslmodule = config.get('netcomm','sslmodule')
self.log.debug("certfile=%s" % self.certfile)
self.log.debug("keyfile=%s" % self.keyfile)
self.log.debug("chainfile=%s" % self.chainfile)
self.log.debug('InfoService class done.')
def run(self):
self.log.debug('Infoservice running...')
cherrypy.tree.mount(InfoRoot())
cherrypy.tree.mount(InfoServiceAPI(self.config),'/info',
{'/':
{'request.dispatch': cherrypy.dispatch.MethodDispatcher()}
})
#cherrypy.tree.mount(InfoServiceAPI(self.config))
cherrypy.server.unsubscribe()
server1 = cherrypy._cpserver.Server()
server1.socket_port=self.httpsport
server1._socket_host='0.0.0.0'
server1.thread_pool=30
server1.ssl_module = self.sslmodule
server1.ssl_certificate = self.certfile
server1.ssl_private_key = self.keyfile
server1.ssl_certificate_chain = self.chainfile
server1.subscribe()
#server2 = cherrypy._cpserver.Server()
#server2.socket_port=self.httpport
#server2._socket_host="0.0.0.0"
#server2.thread_pool=30
#server2.subscribe()
cherrypy.engine.start()
cherrypy.engine.block()
class InfoServiceCLI(object):
"""class to handle the command line invocation of service.
parse the input options,
setup everything, and run InfoService class
"""
def __init__(self):
self.options = None
self.args = None
self.log = None
self.config = None
self.__presetups()
self.__parseopts()
self.__setuplogging()
self.__platforminfo()
self.__checkroot()
self.__createconfig()
def __presetups(self):
'''
we put here some preliminary steps that
for one reason or another
must be done before anything else
'''
def __parseopts(self):
parser = OptionParser(usage='''%prog [OPTIONS]
vc3-infoservice is a information store for VC3
This program is licenced under the GPL, as set out in LICENSE file.
Author(s):
John Hover <[email protected]>
''', version="%prog $Id: infoservice.py 1-13-17 23:58:06Z jhover $" )
parser.add_option("-d", "--debug",
dest="logLevel",
default=logging.WARNING,
action="store_const",
const=logging.DEBUG,
help="Set logging level to DEBUG [default WARNING]")
parser.add_option("-v", "--info",
dest="logLevel",
default=logging.WARNING,
action="store_const",
const=logging.INFO,
help="Set logging level to INFO [default WARNING]")
parser.add_option("--console",
dest="console",
default=False,
action="store_true",
help="Forces debug and info messages to be sent to the console")
parser.add_option("--quiet", dest="logLevel",
default=logging.WARNING,
action="store_const",
const=logging.WARNING,
help="Set logging level to WARNING [default]")
parser.add_option("--maxlogsize", dest="maxlogsize",
default=4096,
action="store",
type="int",
help="Max log size, in MB.")
parser.add_option("--logrotations", dest="logrotations",
default=2,
action="store",
type="int",
help="Number of log backups to keep.")
default_conf = "/etc/vc3/vc3-infoservice.conf"
default_conf = ','.join([default_conf, os.path.expanduser('~/git/vc3-infoservice/etc/vc3-infoservice.conf')])
if 'VC3_SERVICES_HOME' in os.environ:
# if running inside the builder...
default_conf = ','.join([default_conf, os.path.expanduser('~/vc3-services/etc/vc3-infoservice.conf'), os.path.expanduser('~/vc3-services/etc/vc3-infoservice-local.conf')])
parser.add_option("--conf", dest="confFiles",
default=default_conf,
action="store",
metavar="FILE1[,FILE2,FILE3]",
help="Load configuration from FILEs (comma separated list)")
parser.add_option("--log", dest="logfile",
default="stdout",
metavar="LOGFILE",
action="store",
help="Send logging output to LOGFILE or SYSLOG or stdout [default <syslog>]")
parser.add_option("--runas", dest="runAs",
#
# By default
#
default=pwd.getpwuid(os.getuid())[0],
action="store",
metavar="USERNAME",
help="If run as root, drop privileges to USER")
(self.options, self.args) = parser.parse_args()
self.options.confFiles = self.options.confFiles.split(',')
def __setuplogging(self):
"""
Setup logging
"""
self.log = logging.getLogger()
if self.options.logfile == "stdout":
logStream = logging.StreamHandler()
else:
lf = os.path.expanduser(self.options.logfile)
logdir = os.path.dirname(lf)
if not os.path.exists(logdir):
os.makedirs(logdir)
runuid = pwd.getpwnam(self.options.runAs).pw_uid
rungid = pwd.getpwnam(self.options.runAs).pw_gid
os.chown(logdir, runuid, rungid)
#logStream = logging.FileHandler(filename=lf)
logStream = logging.handlers.RotatingFileHandler(filename=lf, maxBytes=1024 * 1024 * self.options.maxlogsize, backupCount=self.options.logrotations)
# Check python version
major, minor, release, st, num = sys.version_info
if major == 2 and minor == 4:
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(name)s %(filename)s:%(lineno)d : %(message)s'
else:
FORMAT='%(asctime)s (UTC) [ %(levelname)s ] %(name)s %(filename)s:%(lineno)d %(funcName)s(): %(message)s'
formatter = logging.Formatter(FORMAT)
formatter.converter = time.gmtime # to convert timestamps to UTC
logStream.setFormatter(formatter)
self.log.addHandler(logStream)
# adding a new Handler for the console,
# to be used only for DEBUG and INFO modes.
if self.options.logLevel in [logging.DEBUG, logging.INFO]:
if self.options.console:
console = logging.StreamHandler(sys.stdout)
console.setFormatter(formatter)
console.setLevel(self.options.logLevel)
self.log.addHandler(console)
self.log.setLevel(self.options.logLevel)
self.log.info('Logging initialized at level %s.' % self.options.logLevel)
def _printenv(self):
envmsg = ''
for k in sorted(os.environ.keys()):
envmsg += '\n%s=%s' %(k, os.environ[k])
self.log.debug('Environment : %s' %envmsg)
def __platforminfo(self):
'''
display basic info about the platform, for debugging purposes
'''
self.log.info('platform: uname = %s %s %s %s %s %s' %platform.uname())
self.log.info('platform: platform = %s' %platform.platform())
self.log.info('platform: python version = %s' %platform.python_version())
self._printenv()
def __checkroot(self):
"""
If running as root, drop privileges to --runas' account.
"""
starting_uid = os.getuid()
starting_gid = os.getgid()
starting_uid_name = pwd.getpwuid(starting_uid)[0]
hostname = socket.gethostname()
if os.getuid() != 0:
self.log.info("Already running as unprivileged user %s at %s" % (starting_uid_name, hostname))
if os.getuid() == 0:
try:
runuid = pwd.getpwnam(self.options.runAs).pw_uid
rungid = pwd.getpwnam(self.options.runAs).pw_gid
os.chown(self.options.logfile, runuid, rungid)
os.setgid(rungid)
os.setuid(runuid)
os.seteuid(runuid)
os.setegid(rungid)
self._changehome()
self._changewd()
self.log.info("Now running as user %d:%d at %s..." % (runuid, rungid, hostname))
self._printenv()
except KeyError, e:
self.log.error('No such user %s, unable run properly. Error: %s' % (self.options.runAs, e))
sys.exit(1)
except OSError, e:
self.log.error('Could not set user or group id to %s:%s. Error: %s' % (runuid, rungid, e))
sys.exit(1)
def _changehome(self):
'''
Set environment HOME to user HOME.
'''
runAs_home = pwd.getpwnam(self.options.runAs).pw_dir
os.environ['HOME'] = runAs_home
self.log.debug('Setting up environment variable HOME to %s' %runAs_home)
def _changewd(self):
'''
changing working directory to the HOME directory of the new user,
'''
runAs_home = pwd.getpwnam(self.options.runAs).pw_dir
os.chdir(runAs_home)
self.log.debug('Switching working directory to %s' %runAs_home)
def __createconfig(self):
"""Create config, add in options...
"""
if self.options.confFiles != None:
try:
self.log.debug("Conf file list %s" % self.options.confFiles)
self.config = ConfigParser()
rfs = self.config.read(self.options.confFiles)
self.log.debug("Read config file(s) %s" % rfs)
except Exception, e:
self.log.error('Config failure')
sys.exit(1)
#self.config.set("global", "configfiles", self.options.confFiles)
def run(self):
"""
Create Daemon and enter main loop
"""
try:
self.log.info('Creating Daemon and entering main loop...')
infosrv = InfoService(self.config)
infosrv.run()
except KeyboardInterrupt:
self.log.info('Caught keyboard interrupt - exitting')
f.join()
sys.exit(0)
except ImportError, errorMsg:
self.log.error('Failed to import necessary python module: %s' % errorMsg)
sys.exit(1)
except:
self.log.error('''Unexpected exception!''')
# The following line prints the exception to the logging module
self.log.error(traceback.format_exc(None))
print(traceback.format_exc(None))
sys.exit(1)
if __name__ == '__main__':
iscli = InfoServiceCLI()
iscli.run()
| gpl-3.0 | 2,180,025,884,097,617,200 | 37.962914 | 183 | 0.526464 | false |
PhilipSkinner/PiR | hive/api.py | 1 | 4615 | import httplib
import urllib
import json
import devices.light
class API:
def __init__(self, username, password):
self.username = username
self.password = password
self.token = None
self.lights = []
self.login()
self.getLights()
def setBrightness(self, id, value):
conn = self.getAPIConnection()
conn.request('PUT', '/omnia/nodes/%s' % id, json.dumps({
"nodes" : [
{
"attributes" : {
"brightness" : {
"targetValue" : value
}
}
}
]
}), {
'Host' : 'api-prod.bgchprod.info',
'Accept' : 'application/vnd.alertme.zoo-6.4+json',
'X-AlertMe-Client' : 'Hive Web Dashboard',
'Content-Type' : 'application/json',
'X-Omnia-Access-Token' : self.token
})
response = conn.getresponse()
if response.status != 200:
return False
return True
def lightOff(self, id):
conn = self.getAPIConnection()
conn.request('PUT', '/omnia/nodes/%s' % id, json.dumps({
"nodes" : [
{
"attributes" : {
"state" : {
"targetValue" : "OFF"
}
}
}
]
}), {
'Host' : 'api-prod.bgchprod.info',
'Accept' : 'application/vnd.alertme.zoo-6.4+json',
'X-AlertMe-Client' : 'Hive Web Dashboard',
'Content-Type' : 'application/json',
'X-Omnia-Access-Token' : self.token
})
response = conn.getresponse()
if response.status != 200:
return False
return True
def lightOn(self, id):
conn = self.getAPIConnection()
conn.request('PUT', '/omnia/nodes/%s' % id, json.dumps({
"nodes" : [
{
"attributes" : {
"state" : {
"targetValue" : "ON"
}
}
}
]
}), {
'Host' : 'api-prod.bgchprod.info',
'Accept' : 'application/vnd.alertme.zoo-6.4+json',
'X-AlertMe-Client' : 'Hive Web Dashboard',
'Content-Type' : 'application/json',
'X-Omnia-Access-Token' : self.token
})
response = conn.getresponse()
if response.status != 200:
return False
return True
def getLights(self):
#get a list of all of our lights
conn = self.getAPIConnection()
conn.request('GET', '/omnia/nodes', '', {
'Host' : 'api-prod.bgchprod.info',
'Accept' : 'application/vnd.alertme.zoo-6.4+json',
'X-AlertMe-Client' : 'Hive Web Dashboard',
'X-Omnia-Access-Token' : self.token
})
response = conn.getresponse()
if response.status != 200:
return
data = json.loads(response.read())
for node in data['nodes']:
#is it a light?
if node['nodeType'] == 'http://alertme.com/schema/json/node.class.light.json#':
#yes, init a light object from this node
self.lights.append(devices.light.HiveLight(node))
def getAuthConnection(self):
return httplib.HTTPSConnection("beekeeper.hivehome.com")
def getAPIConnection(self):
return httplib.HTTPSConnection("api-prod.bgchprod.info")
def login(self):
conn = self.getAuthConnection()
conn.request('POST', '/1.0/gateway/login', json.dumps({
'username' : self.username,
'password' : self.password
}), {
'Host' : 'beekeeper.hivehome.com',
'Accept' : 'application/json, text/plain, */*',
'Content-Type' : 'application/json',
})
response = conn.getresponse()
if response.status != 200:
return False
data = json.loads(response.read())
#get our access token
self.token = data['token']
return True | mit | -1,310,094,038,668,729,900 | 29.195946 | 91 | 0.438787 | false |
adamcataldo/djscrooge | djscrooge/util/data_types.py | 1 | 5912 | """This module contains the utility data types of the DJ Scrooge Backtesting API.
Copyright (C) 2012 James Adam Cataldo
This file is part of DJ Scrooge.
DJ Scrooge is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DJ Scrooge. If not, see <http://www.gnu.org/licenses/>.
"""
class Node(object):
"""A doubly-linkned list node object"""
next_node = None
prev_node = None
def __init__(self, value):
"""Construct a new Node with the given value."""
self.value = value
class OrderedSet(object):
"""An ordered set with O(1) insert and delete operations.
Note that the order of the elements is irrelevant, but there
cannot contain duplicate elements. Trying to add an existing
element to the set with cause an error.
"""
def __init__(self):
"""Construct a new OrderedSet"""
self.__hash_map = {}
self.__head = None
self.__tail = None
self.__current = None
self.__length = 0
def __create_node(self, element):
if self.__hash_map.has_key(element):
raise KeyError('The set already contains the given element.')
node = Node(element)
self.__hash_map[element] = node
self.__length += 1
return node
def __test_and_set_head(self, node):
if self.__head is None:
self.__head = node
self.__tail = node
return True
return False
def append(self, element):
"""Append an item to the end of the ordered set."""
node = self.__create_node(element)
if not self.__test_and_set_head(node):
self.__tail.next_node = node
node.prev_node = self.__tail
self.__tail = node
def prepend(self, element):
"""Prepend an item to the beginning of the ordered set."""
node = self.__create_node(element)
if not self.__test_and_set_head(node):
self.__head.prev_node = node
node.next_node = self.__head
self.__head = node
def insert_after(self, after_element, element):
"""Insert the given element after the after_element in the ordered set."""
if not self.__hash_map.has_key(after_element):
raise KeyError('The after_element is not in the OrderedSet.')
node = self.__create_node(element)
prev_node = self.__hash_map[after_element]
node.prev_node = prev_node
next_node = prev_node.next_node
prev_node.next_node = node
if next_node is None:
self.__tail = node
else:
next_node.prev_node = node
node.next_node = next_node
def has_element(self, element):
"""Returns True if and only if the element is in the set."""
return self.__hash_map.has_key(element)
def remove(self, element):
"""Remove the givne element form the set."""
if not self.__hash_map.has_key(element):
raise KeyError('The given element is not in the set.')
node = self.__hash_map[element]
self.__length -= 1
if node is self.__head and node is self.__tail:
self.__head = None
self.__tail = None
elif node is self.__head:
self.__head = self.__head.next_node
self.__head.prev_node = None
elif node is self.__tail:
self.__tail = self.__tail.prev_node
self.__tail.next_node = None
else:
next_node = node.next_node
prev_node = node.prev_node
prev_node.next_node = next_node
next_node.prev_node = prev_node
del self.__hash_map[element]
def __iter__(self):
"""Identify this object as an iterator."""
return self
def next(self):
"""Returned the next item when this sequence is being iterated."""
if self.__current is None:
self.__current = self.__head
else:
self.__current = self.__current.next_node
if self.__current is None:
raise StopIteration
return self.__current.value
def __len__(self):
"""Returnes the number of elements in teh set."""
return self.__length
def iterator_to_list(iterator):
"""Given an arbitrary iterator, create a list of the values.
Note that this should not be used for infinite iterators (streams).
"""
result = []
for x in iterator:
result.append(x)
return result
def index_in_sorted_list(element, sorted_list, left=0, right=None):
"""Returns the index of the given element in the sorted list, or -1 if the element is not found.
This uses a binary search, so the list must be sorted on a data type for which comparison
operators are well defined.
"""
if right is None:
right = len(sorted_list)
while left < right:
mid = (left+right) / 2
midval = sorted_list[mid]
if midval < element:
left = mid+1
elif midval > element:
right = mid
else:
return mid
return -1
def glb_index_in_sorted_list(element, sorted_list, left=0, right=None):
"""Returns the index of the greatest-lower-bound of the element in
the sorted list, or -1 if the element is less than all elements in the list.
This uses a binary search, so the list must be sorted on a data type for which comparison
operators are well defined.
"""
if right is None:
right = len(sorted_list) - 1
if len(sorted_list) == 0 or sorted_list[left] > element:
return -1
while left < right:
if element >= sorted_list[right]:
left = right
elif element >= sorted_list[(right - left) / 2 + left + 1]:
left = (right - left) / 2 + left + 1
else:
right = (right - left) / 2 + left
return right | gpl-3.0 | 6,722,678,776,473,595,000 | 31.489011 | 98 | 0.644621 | false |
robin900/sqlalchemy | lib/sqlalchemy/util/_collections.py | 26 | 27889 | # util/_collections.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Collection classes and helpers."""
from __future__ import absolute_import
import weakref
import operator
from .compat import threading, itertools_filterfalse, string_types, \
binary_types
from . import py2k
import types
import collections
EMPTY_SET = frozenset()
class AbstractKeyedTuple(tuple):
__slots__ = ()
def keys(self):
"""Return a list of string key names for this :class:`.KeyedTuple`.
.. seealso::
:attr:`.KeyedTuple._fields`
"""
return list(self._fields)
class KeyedTuple(AbstractKeyedTuple):
"""``tuple`` subclass that adds labeled names.
E.g.::
>>> k = KeyedTuple([1, 2, 3], labels=["one", "two", "three"])
>>> k.one
1
>>> k.two
2
Result rows returned by :class:`.Query` that contain multiple
ORM entities and/or column expressions make use of this
class to return rows.
The :class:`.KeyedTuple` exhibits similar behavior to the
``collections.namedtuple()`` construct provided in the Python
standard library, however is architected very differently.
Unlike ``collections.namedtuple()``, :class:`.KeyedTuple` is
does not rely on creation of custom subtypes in order to represent
a new series of keys, instead each :class:`.KeyedTuple` instance
receives its list of keys in place. The subtype approach
of ``collections.namedtuple()`` introduces significant complexity
and performance overhead, which is not necessary for the
:class:`.Query` object's use case.
.. versionchanged:: 0.8
Compatibility methods with ``collections.namedtuple()`` have been
added including :attr:`.KeyedTuple._fields` and
:meth:`.KeyedTuple._asdict`.
.. seealso::
:ref:`ormtutorial_querying`
"""
def __new__(cls, vals, labels=None):
t = tuple.__new__(cls, vals)
if labels:
t.__dict__.update(zip(labels, vals))
else:
labels = []
t.__dict__['_labels'] = labels
return t
@property
def _fields(self):
"""Return a tuple of string key names for this :class:`.KeyedTuple`.
This method provides compatibility with ``collections.namedtuple()``.
.. versionadded:: 0.8
.. seealso::
:meth:`.KeyedTuple.keys`
"""
return tuple([l for l in self._labels if l is not None])
def __setattr__(self, key, value):
raise AttributeError("Can't set attribute: %s" % key)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary.
This method provides compatibility with ``collections.namedtuple()``,
with the exception that the dictionary returned is **not** ordered.
.. versionadded:: 0.8
"""
return dict((key, self.__dict__[key]) for key in self.keys())
class _LW(AbstractKeyedTuple):
__slots__ = ()
def __new__(cls, vals):
return tuple.__new__(cls, vals)
def __reduce__(self):
# for pickling, degrade down to the regular
# KeyedTuple, thus avoiding anonymous class pickling
# difficulties
return KeyedTuple, (list(self), self._real_fields)
def _asdict(self):
"""Return the contents of this :class:`.KeyedTuple` as a dictionary."""
d = dict(zip(self._real_fields, self))
d.pop(None, None)
return d
class ImmutableContainer(object):
def _immutable(self, *arg, **kw):
raise TypeError("%s object is immutable" % self.__class__.__name__)
__delitem__ = __setitem__ = __setattr__ = _immutable
class immutabledict(ImmutableContainer, dict):
clear = pop = popitem = setdefault = \
update = ImmutableContainer._immutable
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
return new
def __init__(self, *args):
pass
def __reduce__(self):
return immutabledict, (dict(self), )
def union(self, d):
if not d:
return self
elif not self:
if isinstance(d, immutabledict):
return d
else:
return immutabledict(d)
else:
d2 = immutabledict(self)
dict.update(d2, d)
return d2
def __repr__(self):
return "immutabledict(%s)" % dict.__repr__(self)
class Properties(object):
"""Provide a __getattr__/__setattr__ interface over a dict."""
__slots__ = '_data',
def __init__(self, data):
object.__setattr__(self, '_data', data)
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(list(self._data.values()))
def __add__(self, other):
return list(self) + list(other)
def __setitem__(self, key, object):
self._data[key] = object
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
def __setattr__(self, key, obj):
self._data[key] = obj
def __getstate__(self):
return {'_data': self._data}
def __setstate__(self, state):
object.__setattr__(self, '_data', state['_data'])
def __getattr__(self, key):
try:
return self._data[key]
except KeyError:
raise AttributeError(key)
def __contains__(self, key):
return key in self._data
def as_immutable(self):
"""Return an immutable proxy for this :class:`.Properties`."""
return ImmutableProperties(self._data)
def update(self, value):
self._data.update(value)
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def keys(self):
return list(self._data)
def values(self):
return list(self._data.values())
def items(self):
return list(self._data.items())
def has_key(self, key):
return key in self._data
def clear(self):
self._data.clear()
class OrderedProperties(Properties):
"""Provide a __getattr__/__setattr__ interface with an OrderedDict
as backing store."""
__slots__ = ()
def __init__(self):
Properties.__init__(self, OrderedDict())
class ImmutableProperties(ImmutableContainer, Properties):
"""Provide immutable dict/object attribute to an underlying dictionary."""
__slots__ = ()
class OrderedDict(dict):
"""A dict that returns keys/values/items in the order they were added."""
__slots__ = '_list',
def __reduce__(self):
return OrderedDict, (self.items(),)
def __init__(self, ____sequence=None, **kwargs):
self._list = []
if ____sequence is None:
if kwargs:
self.update(**kwargs)
else:
self.update(____sequence, **kwargs)
def clear(self):
self._list = []
dict.clear(self)
def copy(self):
return self.__copy__()
def __copy__(self):
return OrderedDict(self)
def sort(self, *arg, **kw):
self._list.sort(*arg, **kw)
def update(self, ____sequence=None, **kwargs):
if ____sequence is not None:
if hasattr(____sequence, 'keys'):
for key in ____sequence.keys():
self.__setitem__(key, ____sequence[key])
else:
for key, value in ____sequence:
self[key] = value
if kwargs:
self.update(kwargs)
def setdefault(self, key, value):
if key not in self:
self.__setitem__(key, value)
return value
else:
return self.__getitem__(key)
def __iter__(self):
return iter(self._list)
def keys(self):
return list(self)
def values(self):
return [self[key] for key in self._list]
def items(self):
return [(key, self[key]) for key in self._list]
if py2k:
def itervalues(self):
return iter(self.values())
def iterkeys(self):
return iter(self)
def iteritems(self):
return iter(self.items())
def __setitem__(self, key, object):
if key not in self:
try:
self._list.append(key)
except AttributeError:
# work around Python pickle loads() with
# dict subclass (seems to ignore __setstate__?)
self._list = [key]
dict.__setitem__(self, key, object)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
def pop(self, key, *default):
present = key in self
value = dict.pop(self, key, *default)
if present:
self._list.remove(key)
return value
def popitem(self):
item = dict.popitem(self)
self._list.remove(item[0])
return item
class OrderedSet(set):
def __init__(self, d=None):
set.__init__(self)
self._list = []
if d is not None:
self._list = unique_list(d)
set.update(self, self._list)
else:
self._list = []
def add(self, element):
if element not in self:
self._list.append(element)
set.add(self, element)
def remove(self, element):
set.remove(self, element)
self._list.remove(element)
def insert(self, pos, element):
if element not in self:
self._list.insert(pos, element)
set.add(self, element)
def discard(self, element):
if element in self:
self._list.remove(element)
set.remove(self, element)
def clear(self):
set.clear(self)
self._list = []
def __getitem__(self, key):
return self._list[key]
def __iter__(self):
return iter(self._list)
def __add__(self, other):
return self.union(other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._list)
__str__ = __repr__
def update(self, iterable):
for e in iterable:
if e not in self:
self._list.append(e)
set.add(self, e)
return self
__ior__ = update
def union(self, other):
result = self.__class__(self)
result.update(other)
return result
__or__ = union
def intersection(self, other):
other = set(other)
return self.__class__(a for a in self if a in other)
__and__ = intersection
def symmetric_difference(self, other):
other = set(other)
result = self.__class__(a for a in self if a not in other)
result.update(a for a in other if a not in self)
return result
__xor__ = symmetric_difference
def difference(self, other):
other = set(other)
return self.__class__(a for a in self if a not in other)
__sub__ = difference
def intersection_update(self, other):
other = set(other)
set.intersection_update(self, other)
self._list = [a for a in self._list if a in other]
return self
__iand__ = intersection_update
def symmetric_difference_update(self, other):
set.symmetric_difference_update(self, other)
self._list = [a for a in self._list if a in self]
self._list += [a for a in other._list if a in self]
return self
__ixor__ = symmetric_difference_update
def difference_update(self, other):
set.difference_update(self, other)
self._list = [a for a in self._list if a in self]
return self
__isub__ = difference_update
class IdentitySet(object):
"""A set that considers only object id() for uniqueness.
This strategy has edge cases for builtin types- it's possible to have
two 'foo' strings in one of these sets, for example. Use sparingly.
"""
_working_set = set
def __init__(self, iterable=None):
self._members = dict()
if iterable:
for o in iterable:
self.add(o)
def add(self, value):
self._members[id(value)] = value
def __contains__(self, value):
return id(value) in self._members
def remove(self, value):
del self._members[id(value)]
def discard(self, value):
try:
self.remove(value)
except KeyError:
pass
def pop(self):
try:
pair = self._members.popitem()
return pair[1]
except KeyError:
raise KeyError('pop from an empty set')
def clear(self):
self._members.clear()
def __cmp__(self, other):
raise TypeError('cannot compare sets using cmp()')
def __eq__(self, other):
if isinstance(other, IdentitySet):
return self._members == other._members
else:
return False
def __ne__(self, other):
if isinstance(other, IdentitySet):
return self._members != other._members
else:
return True
def issubset(self, iterable):
other = type(self)(iterable)
if len(self) > len(other):
return False
for m in itertools_filterfalse(other._members.__contains__,
iter(self._members.keys())):
return False
return True
def __le__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issubset(other)
def __lt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) < len(other) and self.issubset(other)
def issuperset(self, iterable):
other = type(self)(iterable)
if len(self) < len(other):
return False
for m in itertools_filterfalse(self._members.__contains__,
iter(other._members.keys())):
return False
return True
def __ge__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.issuperset(other)
def __gt__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return len(self) > len(other) and self.issuperset(other)
def union(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).union(other))
return result
def __or__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.union(other)
def update(self, iterable):
self._members = self.union(iterable)._members
def __ior__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.update(other)
return self
def difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).difference(other))
return result
def __sub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.difference(other)
def difference_update(self, iterable):
self._members = self.difference(iterable)._members
def __isub__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.difference_update(other)
return self
def intersection(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(self._working_set(members).intersection(other))
return result
def __and__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.intersection(other)
def intersection_update(self, iterable):
self._members = self.intersection(iterable)._members
def __iand__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.intersection_update(other)
return self
def symmetric_difference(self, iterable):
result = type(self)()
# testlib.pragma exempt:__hash__
members = self._member_id_tuples()
other = _iter_id(iterable)
result._members.update(
self._working_set(members).symmetric_difference(other))
return result
def _member_id_tuples(self):
return ((id(v), v) for v in self._members.values())
def __xor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
return self.symmetric_difference(other)
def symmetric_difference_update(self, iterable):
self._members = self.symmetric_difference(iterable)._members
def __ixor__(self, other):
if not isinstance(other, IdentitySet):
return NotImplemented
self.symmetric_difference(other)
return self
def copy(self):
return type(self)(iter(self._members.values()))
__copy__ = copy
def __len__(self):
return len(self._members)
def __iter__(self):
return iter(self._members.values())
def __hash__(self):
raise TypeError('set objects are unhashable')
def __repr__(self):
return '%s(%r)' % (type(self).__name__, list(self._members.values()))
class WeakSequence(object):
def __init__(self, __elements=()):
self._storage = [
weakref.ref(element, self._remove) for element in __elements
]
def append(self, item):
self._storage.append(weakref.ref(item, self._remove))
def _remove(self, ref):
self._storage.remove(ref)
def __len__(self):
return len(self._storage)
def __iter__(self):
return (obj for obj in
(ref() for ref in self._storage) if obj is not None)
def __getitem__(self, index):
try:
obj = self._storage[index]
except KeyError:
raise IndexError("Index %s out of range" % index)
else:
return obj()
class OrderedIdentitySet(IdentitySet):
class _working_set(OrderedSet):
# a testing pragma: exempt the OIDS working set from the test suite's
# "never call the user's __hash__" assertions. this is a big hammer,
# but it's safe here: IDS operates on (id, instance) tuples in the
# working set.
__sa_hash_exempt__ = True
def __init__(self, iterable=None):
IdentitySet.__init__(self)
self._members = OrderedDict()
if iterable:
for o in iterable:
self.add(o)
class PopulateDict(dict):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator):
self.creator = creator
def __missing__(self, key):
self[key] = val = self.creator(key)
return val
# Define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
# At this point, these are mostly historical, things
# used to be more complicated.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
populate_column_dict = PopulateDict
_getters = PopulateDict(operator.itemgetter)
_property_getters = PopulateDict(
lambda idx: property(operator.itemgetter(idx)))
def unique_list(seq, hashfunc=None):
seen = set()
seen_add = seen.add
if not hashfunc:
return [x for x in seq
if x not in seen
and not seen_add(x)]
else:
return [x for x in seq
if hashfunc(x) not in seen
and not seen_add(hashfunc(x))]
class UniqueAppender(object):
"""Appends items to a collection ensuring uniqueness.
Additional appends() of the same object are ignored. Membership is
determined by identity (``is a``) not equality (``==``).
"""
def __init__(self, data, via=None):
self.data = data
self._unique = {}
if via:
self._data_appender = getattr(data, via)
elif hasattr(data, 'append'):
self._data_appender = data.append
elif hasattr(data, 'add'):
self._data_appender = data.add
def append(self, item):
id_ = id(item)
if id_ not in self._unique:
self._data_appender(item)
self._unique[id_] = True
def __iter__(self):
return iter(self.data)
def coerce_generator_arg(arg):
if len(arg) == 1 and isinstance(arg[0], types.GeneratorType):
return list(arg[0])
else:
return arg
def to_list(x, default=None):
if x is None:
return default
if not isinstance(x, collections.Iterable) or \
isinstance(x, string_types + binary_types):
return [x]
elif isinstance(x, list):
return x
else:
return list(x)
def has_intersection(set_, iterable):
"""return True if any items of set_ are present in iterable.
Goes through special effort to ensure __hash__ is not called
on items in iterable that don't support it.
"""
# TODO: optimize, write in C, etc.
return bool(
set_.intersection([i for i in iterable if i.__hash__])
)
def to_set(x):
if x is None:
return set()
if not isinstance(x, set):
return set(to_list(x))
else:
return x
def to_column_set(x):
if x is None:
return column_set()
if not isinstance(x, column_set):
return column_set(to_list(x))
else:
return x
def update_copy(d, _new=None, **kw):
"""Copy the given dict and update with the given values."""
d = d.copy()
if _new:
d.update(_new)
d.update(**kw)
return d
def flatten_iterator(x):
"""Given an iterator of which further sub-elements may also be
iterators, flatten the sub-elements into a single iterator.
"""
for elem in x:
if not isinstance(elem, str) and hasattr(elem, '__iter__'):
for y in flatten_iterator(elem):
yield y
else:
yield elem
class LRUCache(dict):
"""Dictionary with 'squishy' removal of least
recently used items.
Note that either get() or [] should be used here, but
generally its not safe to do an "in" check first as the dictionary
can change subsequent to that call.
"""
def __init__(self, capacity=100, threshold=.5):
self.capacity = capacity
self.threshold = threshold
self._counter = 0
self._mutex = threading.Lock()
def _inc_counter(self):
self._counter += 1
return self._counter
def get(self, key, default=None):
item = dict.get(self, key, default)
if item is not default:
item[2] = self._inc_counter()
return item[1]
else:
return default
def __getitem__(self, key):
item = dict.__getitem__(self, key)
item[2] = self._inc_counter()
return item[1]
def values(self):
return [i[1] for i in dict.values(self)]
def setdefault(self, key, value):
if key in self:
return self[key]
else:
self[key] = value
return value
def __setitem__(self, key, value):
item = dict.get(self, key)
if item is None:
item = [key, value, self._inc_counter()]
dict.__setitem__(self, key, item)
else:
item[1] = value
self._manage_size()
def _manage_size(self):
if not self._mutex.acquire(False):
return
try:
while len(self) > self.capacity + self.capacity * self.threshold:
by_counter = sorted(dict.values(self),
key=operator.itemgetter(2),
reverse=True)
for item in by_counter[self.capacity:]:
try:
del self[item[0]]
except KeyError:
# deleted elsewhere; skip
continue
finally:
self._mutex.release()
_lw_tuples = LRUCache(100)
def lightweight_named_tuple(name, fields):
hash_ = (name, ) + tuple(fields)
tp_cls = _lw_tuples.get(hash_)
if tp_cls:
return tp_cls
tp_cls = type(
name, (_LW,),
dict([
(field, _property_getters[idx])
for idx, field in enumerate(fields) if field is not None
] + [('__slots__', ())])
)
tp_cls._real_fields = fields
tp_cls._fields = tuple([f for f in fields if f is not None])
_lw_tuples[hash_] = tp_cls
return tp_cls
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
class on the basis of a "scope" function.
The object implements ``__call__`` as the "getter", so by
calling ``myregistry()`` the contained object is returned
for the current scope.
:param createfunc:
a callable that returns a new object to be placed in the registry
:param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
"""Construct a new :class:`.ScopedRegistry`.
:param createfunc: A creation function that will generate
a new value for the current scope, if none is present.
:param scopefunc: A function that returns a hashable
token representing the current scope (such as, current
thread identifier).
"""
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
def __call__(self):
key = self.scopefunc()
try:
return self.registry[key]
except KeyError:
return self.registry.setdefault(key, self.createfunc())
def has(self):
"""Return True if an object is present in the current scope."""
return self.scopefunc() in self.registry
def set(self, obj):
"""Set the value for the current scope."""
self.registry[self.scopefunc()] = obj
def clear(self):
"""Clear the current scope, if any."""
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
"""A :class:`.ScopedRegistry` that uses a ``threading.local()``
variable for storage.
"""
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
def __call__(self):
try:
return self.registry.value
except AttributeError:
val = self.registry.value = self.createfunc()
return val
def has(self):
return hasattr(self.registry, "value")
def set(self, obj):
self.registry.value = obj
def clear(self):
try:
del self.registry.value
except AttributeError:
pass
def _iter_id(iterable):
"""Generator: ((id(o), o) for o in iterable)."""
for item in iterable:
yield id(item), item
| mit | -3,541,001,410,477,029,000 | 25.688038 | 79 | 0.570655 | false |
adaitche/luigi | test/contrib/_webhdfs_test.py | 6 | 2124 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from helpers import unittest
from luigi.contrib import webhdfs
from nose.plugins.attrib import attr
@attr('apache')
class TestWebHdfsTarget(unittest.TestCase):
'''
This test requires a running Hadoop cluster with WebHdfs enabled
This test requires the luigi.cfg file to have a `hdfs` section
with the namenode_host, namenode_port and user settings.
'''
def setUp(self):
self.testDir = "/tmp/luigi-test".format()
self.path = os.path.join(self.testDir, 'out.txt')
self.client = webhdfs.WebHdfsClient()
self.target = webhdfs.WebHdfsTarget(self.path)
def tearDown(self):
if self.client.exists(self.testDir):
self.client.remove(self.testDir, recursive=True)
def test_write(self):
self.assertFalse(self.client.exists(self.path))
output = self.target.open('w')
output.write('this is line 1\n')
output.write('this is line #2\n')
output.close()
self.assertTrue(self.client.exists(self.path))
def test_read(self):
self.test_write()
input_ = self.target.open('r')
all_test = 'this is line 1\nthis is line #2\n'
self.assertEqual(all_test, input_.read())
input_.close()
def test_read_lines(self):
self.test_write()
input_ = self.target.open('r')
lines = list(input_.readlines())
self.assertEqual(lines[0], 'this is line 1')
self.assertEqual(lines[1], 'this is line #2')
input_.close()
| apache-2.0 | -6,699,413,453,768,513,000 | 31.181818 | 74 | 0.664783 | false |
mirageglobe/upp-tracker | tracer/ftraceadvance.py | 1 | 6390 | from ftracer_script import *
## note the capture details file is
## warnings is needed to suppress errors from mousehook tracker
## Function: This is the main script file which houses all the scripts
import sys
import pythoncom, pyHook
import win32con, win32com.client
import win32gui, win32api
import codecs
import wmi
import chardet
import time, pickle
import warnings
import sqlite3
## ----------------------------------
## Global Var
## ----------------------------------
ftraceadvance_lastaction = 'start' # this stores the last action of the user
ftraceadvance_sqlitedb = 'tracemouse.sqlite' # structure is id int and (mousetimeepoch, mousetimeboot, mousepos, winfile, winhandle, winname, winforename)
## ----------------------------------
## Mouse / Keyboard Tracing Functions
## ----------------------------------
def winEnumHandler( hwnd, ctx ):
if win32gui.IsWindowVisible( hwnd ):
print hex(hwnd), win32gui.GetWindowText( hwnd )
def strutf8encode(sstring):
rtnString = ""
if sstring != None:
## codecs.lookup(sstring)
## ustring = unicode(sstring,'utf_8')
## print ustring
rtn_encoding = chardet.detect(sstring)
if rtn_encoding['encoding'] != None:
rtnString = sstring.decode(rtn_encoding['encoding'],'replace')
return rtnString.encode('utf_8')
def OnKeyboardEvent(event):
## Function: Allows escape key to be pressed to exit any script is running
if event.Key == "Escape":
exit()
return True
def OnMouseEvent(event):
## this function uses mouse to trace the user input of applications
global ftraceadvance_lastaction
global ftraceadvance_sqlitedb
# called when mouse events are received. prints out mouse events
if event.MessageName != "mouse move":
print 'MessageName:', event.MessageName
print 'Message:', event.Message
print 'Time:', event.Time
print 'WindowHandler:', hex(event.Window)
print 'WindowName:', strutf8encode(event.WindowName)
print 'Position:', event.Position
print 'Wheel:', event.Wheel #not used in wheel detection
print 'Injected:', event.Injected #rarely used
print time.time()
if event.WindowName == None:
window_name = 'None'
else:
window_name = event.WindowName
ftemp_wfore = strutf8encode(win32gui.GetWindowText(win32gui.GetForegroundWindow())) # This special method captures window name
ftemp_wname = AppDetector(strutf8encode(event.WindowName))
ftemp_whand = str(event.Window) #window handler
ftemp_mpos = str(event.Position)
ftemp_mact = str(event.MessageName)
ftemp_mnum = int(event.Message)
ftemp_epoc = time.time() #epoch time of mouse
ftemp_rtime = event.Time #running counter of mouse
ftemp_wfile = str('')
conn = sqlite3.connect(ftraceadvance_sqlitedb)
conn.text_factory = str
curs = conn.cursor()
if ftraceadvance_lastaction != window_name:
print ftraceadvance_lastaction
curs.execute('insert into ftrace(mousetimeepoch, mousetimeboot, mousepos, mouseact, mousenum, winfile, winhandle, winname, winforename) values(?, ?, ?, ?, ?, ?, ?, ?, ?)',(ftemp_epoc,ftemp_rtime,ftemp_mpos,ftemp_mact,ftemp_mnum,ftemp_wfile,ftemp_whand,ftemp_wname,ftemp_wfore))
ftraceadvance_lastaction = strutf8encode(event.WindowName)
print ftraceadvance_lastaction
conn.commit()
curs.close()
return True # return true is always needed, otherwise it will show an error
def AppDetector(data_window=''):
## This novel function tries to put in a value for the application detected
values = {
'': 'Unknown',
'Unknown': 'Unknown',
'C:\Python27\python.exe': 'Python',
'C:\Python26\python.exe': 'Python',
'FolderView': 'Windows Explorer - Folderview',
'Downloads': 'Windows Explorer - downloads',
'OpenOffice.org Writer': 'OpenOffice Writer'
}
return values.get(data_window, 'Unknown')
## ----------------------------------
## SQLite Writing Functions
## ----------------------------------
def sqlite_table(file_write='tracemouse.sqlite'):
# function creates sqlite 3 db and connects to a new file
conn = connect(file_write)
curs = conn.cursor()
curs.execute('''create table if not exists ftrace (id integer primary key, mousetimeepoch float, mousetimeboot float, mousepos text, mouseact text, mousenum integer, winfile text, winhandle text, winname text, winforename text)''')
curs.execute('''create table if not exists fswitch (id integer primary key, objsource text, objtarget text, rstrength integer)''')
conn.commit()
curs.close()
return True
def sqlite_query(mquery, file_write='tracemouse.sqlite'):
# function inserts into a sqlite table
conn = connect(file_write)
curs = conn.cursor()
curs.execute(mquery)
conn.commit()
curs.close()
return True
def sqlite_cleardb():
conn = connect('tracemouse.sqlite')
curs = conn.cursor()
conn.commit()
curs.close()
return True
## ----------------------------------
## Other handy Functions
## ----------------------------------
def rem_duplicates(seq, idfun=None):
# order preserving
# remove duplicates from a list
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def fxn():
warnings.warn("depreciated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
if __name__ == "__main__":
# Captures mouse events and writes into log file (common_script.py). Use win32 py to hook on mouse events
# This trace module will run in a continual loop until application is stopped
sqlite_table('tracemouse.sqlite')
bm = pyHook.HookManager()
bm.MouseAll = OnMouseEvent
bm.HookMouse()
bk = pyHook.HookManager()
bk.KeyDown = OnKeyboardEvent
bk.HookKeyboard()
pythoncom.PumpMessages()
#shell = win32com.client.Dispatch("WScript.Shell")
#shell.AppActivate('Command Prompt') # this sets window to focus
#x1 = win32com.client.DispatchEx("PDFcreator.Application")
| apache-2.0 | -7,871,875,556,618,615,000 | 29.869565 | 283 | 0.651174 | false |
praekelt/jmbo-foundry | foundry/feeds.py | 1 | 2132 | from collections import namedtuple
from PIL import Image
from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from django.utils import feedgenerator
from django.contrib.sites.models import get_current_site
from jmbo.models import ModelBase
from foundry.models import Listing, Page
class ListingFeed(Feed):
"""Feed for items in a listing"""
feed_type = feedgenerator.Rss201rev2Feed
description = ""
def get_object(self, request, slug):
self.request = request
return get_object_or_404(Listing, slug=slug)
def title(self, obj):
return obj.title
def link(self, obj):
return obj.get_absolute_url()
def items(self, obj):
if not obj.enable_syndication:
return ModelBase.objects.none()
qs = obj.queryset()
limit = obj.items_per_page or 10
return qs[:limit]
def get_image_info(self, item):
# Cache image attributes on item to avoid multiple calls to load image
cached = getattr(item, '_image_info', None)
if cached is not None:
return cached
info = namedtuple('Info', ['url', 'length', 'mime_type'])('', 0, '')
if item.image:
blob = None
try:
blob = Image.open(item.image.path)
except IOError:
pass
if blob:
info = feedgenerator.Enclosure(
self.request.build_absolute_uri(item.image_detail_url),
len(blob.tobytes()),
'image/%s' % blob.format.lower()
)
setattr(item, '_image_info', info)
return info
def item_title(self, item):
return item.title
def item_description(self, item):
return item.description
def item_enclosure_url(self, item):
return self.get_image_info(item).url
def item_enclosure_length(self, item):
return self.get_image_info(item).length
def item_enclosure_mime_type(self, item):
return self.get_image_info(item).mime_type
listing_feed = ListingFeed()
| bsd-3-clause | -8,969,731,339,729,982,000 | 27.052632 | 78 | 0.609756 | false |
abhishekgahlot/kivy | kivy/adapters/simplelistadapter.py | 3 | 2228 | '''
SimpleListAdapter
=================
.. versionadded:: 1.5
.. warning::
This code is still experimental, and its API is subject to change in a
future version.
The :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is used for
basic lists. For example, it can be used for displaying a list of read-only
strings that do not require user interaction.
'''
__all__ = ('SimpleListAdapter', )
from kivy.adapters.adapter import Adapter
from kivy.properties import ListProperty
from kivy.lang import Builder
class SimpleListAdapter(Adapter):
'''A :class:`~kivy.adapters.simplelistadapter.SimpleListAdapter` is an
adapter around a Python list.
From :class:`~kivy.adapters.adapter.Adapter`, the
:class:`~kivy.adapters.simplelistadapter.ListAdapter` gets cls, template,
and args_converter properties.
'''
data = ListProperty([])
'''The data list property contains a list of objects (which can be strings)
that will be used directly if no args_converter function is provided. If
there is an args_converter, the data objects will be passed to it for
instantiating the item view class instances.
:data:`data` is a :class:`~kivy.properties.ListProperty` and
defaults to [].
'''
def __init__(self, **kwargs):
if 'data' not in kwargs:
raise Exception('list adapter: input must include data argument')
if not isinstance(kwargs['data'], list) and \
not isinstance(kwargs['data'], tuple):
raise Exception('list adapter: data must be a tuple or list')
super(SimpleListAdapter, self).__init__(**kwargs)
def get_count(self):
return len(self.data)
def get_data_item(self, index):
if index < 0 or index >= len(self.data):
return None
return self.data[index]
# Returns a view instance for an item.
def get_view(self, index):
item = self.get_data_item(index)
if item is None:
return None
item_args = self.args_converter(index, item)
if self.cls:
instance = self.cls(**item_args)
return instance
else:
return Builder.template(self.template, **item_args)
| mit | 5,756,030,449,973,285,000 | 29.520548 | 79 | 0.65395 | false |
roim/PyTranscribe | plotting/hps.py | 1 | 3155 | # Copyright 2015 Rodrigo Roim Ferreira
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
""" Contains functions to plot graphs related to the Harmonic Product Spectrum. """
import matplotlib.pyplot as _pl
import numpy as _np
import scipy.signal as _sig
import mathhelper as _mh
import mtheory as _mt
import pda.hps as _hps
import soundfiles as _sf
def plothps(audiopath, title="Harmonic Product Spectrum", horizontal_harmonics=7, plotpath=None):
""" Plots a visual representation of the HPS with 3 harmonics. """
samplerate, samples = _sf.readfile(audiopath)
X = _np.fft.fft(samples, samplerate)
# amplitude to decibel
dBX = 20.*_np.log10(_np.abs(X)/10e-6) - 120
# remove mirror
dBX = dBX[0:dBX.size/2]
f, (ax0, ax1, ax2, ax3) = _pl.subplots(4, sharex=True, sharey=True)
axs = (ax0, ax1, ax2, ax3)
sum = _np.zeros_like(dBX)
for i in range(3):
dec = _sig.decimate(dBX, i + 1)
sum[:dec.size] += dec
axs[i].plot(dec, 'b')
sum = _np.divide(sum, 3)
ax3.plot(sum, 'b')
ax0.set_title(title)
reference = _np.argmax(sum)
xlocs = _np.float32([n * reference for n in range(1 + horizontal_harmonics)])
ax3.set_xlabel("Frequency (Hz)")
ax3.set_xlim([0, _np.max(xlocs)])
ax3.set_xticks(xlocs)
ax3.set_xticklabels(["%.0f" % l for l in xlocs])
ax0.set_ylabel("Amplitude (dB)")
ax1.set_ylabel("Decimated by 2")
ax2.set_ylabel("Decimated by 3")
ax3.set_ylabel("Mean")
ax3.set_ylim([40, 1.15*_np.max(sum)])
if plotpath:
_pl.savefig(plotpath, bbox_inches="tight")
else:
_pl.show()
_pl.clf()
def plot_tracking(audiopath, title="", binsize=1470, tune=False, plotpath=None, repetitions=10):
""" Plots the HPS tracking of an audio file. """
samplerate, samples = _sf.readfile(audiopath)
detections = samples.size//binsize
p = _np.zeros(repetitions*detections)
for i in range(detections):
f = _hps.hps(samples[i*binsize:(i+1)*binsize])
if tune:
f = _mh.find_nearest_value(_mt.notes, f)
p = _np.repeat(p, repetitions)
_pl.plot(p)
_pl.title(title)
xlocs = _np.linspace(0, 10*detections, 5)
_pl.xlabel("Time (s)")
_pl.xlim([0, _np.max(xlocs)])
_pl.xticks(xlocs, ["%.2f" % l for l in _np.multiply(xlocs, binsize/(repetitions*samplerate))])
_pl.ylabel("Fundamental Frequency (Hz)")
_pl.ylim((0.9*_np.min(p), 1.05*_np.max(p)))
if plotpath:
_pl.savefig(plotpath, bbox_inches="tight")
else:
_pl.show()
_pl.clf()
| gpl-3.0 | -2,342,360,364,448,503,000 | 28.764151 | 98 | 0.647861 | false |
RandomWireTechnologies/ctrl-o-node | nfc.py | 1 | 2078 | #!/usr/bin/python
import pn532
import hashlib
import binascii
import time
import config
def init():
pn532.init()
def close():
pn532.close()
try:
log
except:
def log(data):
print time.strftime("%Y-%b-%d:%H:%M:%S"),":",data
def getCard():
card = None
card = pn532.read()
if (card == None or card == 0):
return None
log("Found Serial # %s" % (card[0]))
log(" -> hash = %s" % (binascii.hexlify(card[1])))
return (card[0],binascii.hexlify(card[1]))
def generateNewHash(oldHash):
newhash = hashlib.sha256("%s %s %s" % (oldHash,config.hashSecret,str(time.time()))).digest()
return (newhash,binascii.hexlify(newhash))
def writeHash(hash):
status = pn532.write(hash)
if (status==0):
log(" -> Write successful")
else:
log(" -> Write failed: err no "+status)
return False
card = pn532.read()
if (card == None or card == 0):
log(" -> Failed to read after write")
return False
if (card[1] != hash):
log(" -> Failed to verify hash")
return False
# If all that didn't fail, we must have succeeded!
return True
def initCard():
log("Request to init new card")
card = None
while (card == None or card == 0):
card = pn532.read()
if (pn532.format() != 1):
log("Failed to format card")
return None
newhash = hashlib.sha256("%s %s %s" % (card[0],config.hashSecret,str(time.time()))).digest()
status = pn532.write(newhash)
if (status != 0):
log("Write failed: %s",(status))
return None
retries = 0
while(True):
card2 = pn532.read()
if ((card2[0] == card[0]) & (card2[1]==newhash)):
break
retries = retries + 1
if (retries > 5):
break
if ((card2[0] != card[0]) | (card2[1]!=newhash)):
log("Verify failed")
return None
log ("New card written serial=%s : hash=%s" % (card[0],binascii.hexlify(newhash)))
return(card[0],binascii.hexlify(newhash))
| artistic-2.0 | -6,521,574,130,992,980,000 | 26.706667 | 96 | 0.553417 | false |
CarolinaSGomes/jaikuengine | common/management/commands/config.py | 34 | 1239 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand
import build
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--write-to-file',
action='store_true',
dest='write_to_file',
default=False,
help='Write output directly to a file'
),
)
help = 'Config helper for installation'
args = ''
requires_model_validation = False
def handle(self, *test_labels, **options):
write_to_file = options.get('write_to_file', False)
build.config(write_to_file=write_to_file)
| apache-2.0 | 7,461,110,668,484,676,000 | 30.769231 | 74 | 0.68523 | false |
pavel-odintsov/shogun | examples/undocumented/python_modular/graphical/inverse_covariance_estimation_demo.py | 26 | 2520 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from pylab import show, imshow
def simulate_data (n,p):
from modshogun import SparseInverseCovariance
import numpy as np
#create a random pxp covariance matrix
cov = np.random.normal(size=(p,p))
#generate data set with multivariate Gaussian distribution
mean = [0] * p
data = np.random.multivariate_normal(mean, cov, n)
return data
def inverse_covariance (data,lc):
from modshogun import SparseInverseCovariance
from numpy import dot
sic = SparseInverseCovariance()
#by default cov() expects each row to represent a variable, with observations in the columns
cov = np.cov(data.T)
max_cov = cov.max()
min_cov = cov.min()
#compute inverse conariance matrix
Si = sic.estimate(cov,lc)
return Si
def draw_graph(sic, subplot):
import numpy as np
import networkx as nx
#create list of edges
#an egde means there is a dependency between variables
#0 value in sic matrix mean independent variables given all the other variables
p = sic.shape[0]
X, Y = np.meshgrid(range(p), range(p))
graph = np.array((X[sic != 0], Y[sic != 0])).T
# extract nodes from graph
nodes = set([n1 for n1, n2 in graph] + [n2 for n1, n2 in graph])
# create networkx graph
G=nx.Graph()
# add nodes
for node in nodes:
G.add_node(node)
# add edges
for edge in graph:
G.add_edge(edge[0], edge[1])
# draw graph
nx.draw(G, ax=subplot)
# show graph
return graph
if __name__=='__main__':
#edit here for your own simulation
num_observations = 100
num_variables = 11
penalties = [0.00001, 0.05, 0.1, 0.5, 1, 2]
columns = len(penalties)
#plot the heat map and the graphs of dependency between variables
#for different penaltiy values
f, axarr = plt.subplots(2, columns)
f.suptitle('Inverse Covariance Estimation\nfor ' +str(num_variables)+' variables and '+str(num_observations)+' observations', fontsize=20)
data = simulate_data (num_observations, num_variables)
print data.shape
column = -1;
for p in penalties:
column = column + 1
sic = inverse_covariance (data,p)
i = axarr[0, column].imshow(sic, cmap="hot", interpolation='nearest')
axarr[0, column].set_title('penalty='+str(p), fontsize=10)
graph = draw_graph(sic, plt.subplot(2, columns, column + columns + 1))
axarr[1, column].set_title(str((len(graph) - num_variables)/2) + ' depedences', fontsize=10)
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
f.colorbar(i, cax=cbar_ax)
show();
| gpl-3.0 | -3,327,735,013,342,847,500 | 22.773585 | 139 | 0.697222 | false |
thecumets/cumets-backend | migrations/versions/46e2e82b373_.py | 1 | 1780 | """empty message
Revision ID: 46e2e82b373
Revises: 59cd47c905a
Create Date: 2015-10-10 16:21:15.167968
"""
# revision identifiers, used by Alembic.
revision = '46e2e82b373'
down_revision = '59cd47c905a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('relationships',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('relationship_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['relationship_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.drop_constraint('users_house_id_fkey', 'users', type_='foreignkey')
op.drop_column('users', 'house_id')
op.drop_table('house')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('house_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('users_house_id_fkey', 'users', 'house', ['house_id'], ['id'], ondelete='SET NULL')
op.create_table('house',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('name', sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column('owner', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('latitude', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=False),
sa.Column('longitude', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['owner'], ['users.id'], name='house_owner_fkey'),
sa.PrimaryKeyConstraint('id', name='house_pkey')
)
op.drop_table('relationships')
### end Alembic commands ###
| gpl-2.0 | 2,222,994,903,495,138,800 | 37.695652 | 109 | 0.685393 | false |
azumimuo/family-xbmc-addon | plugin.video.bubbles/resources/lib/externals/hachoir/hachoir_parser/parser.py | 1 | 5843 | import resources.lib.externals.hachoir.hachoir_core.config as config
from resources.lib.externals.hachoir.hachoir_core.field import Parser as GenericParser
from resources.lib.externals.hachoir.hachoir_core.error import HACHOIR_ERRORS, HachoirError, error
from resources.lib.externals.hachoir.hachoir_core.tools import makeUnicode
from resources.lib.externals.hachoir.hachoir_core.i18n import _
from inspect import getmro
class ValidateError(HachoirError):
pass
class HachoirParser(object):
"""
A parser is the root of all other fields. It create first level of fields
and have special attributes and methods:
- tags: dictionnary with keys:
- "file_ext": classical file extensions (string or tuple of strings) ;
- "mime": MIME type(s) (string or tuple of strings) ;
- "description": String describing the parser.
- endian: Byte order (L{BIG_ENDIAN} or L{LITTLE_ENDIAN}) of input data ;
- stream: Data input stream (set in L{__init__()}).
Default values:
- size: Field set size will be size of input stream ;
- mime_type: First MIME type of tags["mime"] (if it does exist,
None otherwise).
"""
_autofix = False
def __init__(self, stream, **args):
validate = args.pop("validate", False)
self._mime_type = None
while validate:
nbits = self.getParserTags()["min_size"]
if stream.sizeGe(nbits):
res = self.validate()
if res is True:
break
res = makeUnicode(res)
else:
res = _("stream is smaller than %s.%s bytes" % divmod(nbits, 8))
raise ValidateError(res or _("no reason given"))
self._autofix = True
#--- Methods that can be overridden -------------------------------------
def createDescription(self):
"""
Create an Unicode description
"""
return self.PARSER_TAGS["description"]
def createMimeType(self):
"""
Create MIME type (string), eg. "image/png"
If it returns None, "application/octet-stream" is used.
"""
if "mime" in self.PARSER_TAGS:
return self.PARSER_TAGS["mime"][0]
return None
def validate(self):
"""
Check that the parser is able to parse the stream. Valid results:
- True: stream looks valid ;
- False: stream is invalid ;
- str: string describing the error.
"""
raise NotImplementedError()
#--- Getter methods -----------------------------------------------------
def _getDescription(self):
if self._description is None:
try:
self._description = self.createDescription()
if isinstance(self._description, str):
self._description = makeUnicode(self._description)
except HACHOIR_ERRORS, err:
error("Error getting description of %s: %s" \
% (self.path, unicode(err)))
self._description = self.PARSER_TAGS["description"]
return self._description
description = property(_getDescription,
doc="Description of the parser")
def _getMimeType(self):
if not self._mime_type:
try:
self._mime_type = self.createMimeType()
except HACHOIR_ERRORS, err:
self.error("Error when creating MIME type: %s" % unicode(err))
if not self._mime_type \
and self.createMimeType != Parser.createMimeType:
self._mime_type = Parser.createMimeType(self)
if not self._mime_type:
self._mime_type = u"application/octet-stream"
return self._mime_type
mime_type = property(_getMimeType)
def createContentSize(self):
return None
def _getContentSize(self):
if not hasattr(self, "_content_size"):
try:
self._content_size = self.createContentSize()
except HACHOIR_ERRORS, err:
error("Unable to compute %s content size: %s" % (self.__class__.__name__, err))
self._content_size = None
return self._content_size
content_size = property(_getContentSize)
def createFilenameSuffix(self):
"""
Create filename suffix: "." + first value of self.PARSER_TAGS["file_ext"],
or None if self.PARSER_TAGS["file_ext"] doesn't exist.
"""
file_ext = self.getParserTags().get("file_ext")
if isinstance(file_ext, (tuple, list)):
file_ext = file_ext[0]
return file_ext and '.' + file_ext
def _getFilenameSuffix(self):
if not hasattr(self, "_filename_suffix"):
self._filename_extension = self.createFilenameSuffix()
return self._filename_extension
filename_suffix = property(_getFilenameSuffix)
@classmethod
def getParserTags(cls):
tags = {}
for cls in reversed(getmro(cls)):
if hasattr(cls, "PARSER_TAGS"):
tags.update(cls.PARSER_TAGS)
return tags
@classmethod
def print_(cls, out, verbose):
tags = cls.getParserTags()
print >>out, "- %s: %s" % (tags["id"], tags["description"])
if verbose:
if "mime" in tags:
print >>out, " MIME type: %s" % (", ".join(tags["mime"]))
if "file_ext" in tags:
file_ext = ", ".join(
".%s" % file_ext for file_ext in tags["file_ext"])
print >>out, " File extension: %s" % file_ext
autofix = property(lambda self: self._autofix and config.autofix)
class Parser(HachoirParser, GenericParser):
def __init__(self, stream, **args):
GenericParser.__init__(self, stream)
HachoirParser.__init__(self, stream, **args)
| gpl-2.0 | -8,077,751,571,801,594,000 | 36.941558 | 98 | 0.578299 | false |
deanishe/alfred-appscripts | src/workflow/workflow3.py | 10 | 21229 | # encoding: utf-8
#
# Copyright (c) 2016 Dean Jackson <[email protected]>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2016-06-25
#
"""An Alfred 3+ version of :class:`~workflow.Workflow`.
:class:`~workflow.Workflow3` supports new features, such as
setting :ref:`workflow-variables` and
:class:`the more advanced modifiers <Modifier>` supported by Alfred 3+.
In order for the feedback mechanism to work correctly, it's important
to create :class:`Item3` and :class:`Modifier` objects via the
:meth:`Workflow3.add_item()` and :meth:`Item3.add_modifier()` methods
respectively. If you instantiate :class:`Item3` or :class:`Modifier`
objects directly, the current :class:`Workflow3` object won't be aware
of them, and they won't be sent to Alfred when you call
:meth:`Workflow3.send_feedback()`.
"""
from __future__ import print_function, unicode_literals, absolute_import
import json
import os
import sys
from .workflow import ICON_WARNING, Workflow
class Variables(dict):
"""Workflow variables for Run Script actions.
.. versionadded: 1.26
This class allows you to set workflow variables from
Run Script actions.
It is a subclass of :class:`dict`.
>>> v = Variables(username='deanishe', password='hunter2')
>>> v.arg = u'output value'
>>> print(v)
See :ref:`variables-run-script` in the User Guide for more
information.
Args:
arg (unicode, optional): Main output/``{query}``.
**variables: Workflow variables to set.
Attributes:
arg (unicode): Output value (``{query}``).
config (dict): Configuration for downstream workflow element.
"""
def __init__(self, arg=None, **variables):
"""Create a new `Variables` object."""
self.arg = arg
self.config = {}
super(Variables, self).__init__(**variables)
@property
def obj(self):
"""Return ``alfredworkflow`` `dict`."""
o = {}
if self:
d2 = {}
for k, v in self.items():
d2[k] = v
o['variables'] = d2
if self.config:
o['config'] = self.config
if self.arg is not None:
o['arg'] = self.arg
return {'alfredworkflow': o}
def __unicode__(self):
"""Convert to ``alfredworkflow`` JSON object.
Returns:
unicode: ``alfredworkflow`` JSON object
"""
if not self and not self.config:
if self.arg:
return self.arg
else:
return u''
return json.dumps(self.obj)
def __str__(self):
"""Convert to ``alfredworkflow`` JSON object.
Returns:
str: UTF-8 encoded ``alfredworkflow`` JSON object
"""
return unicode(self).encode('utf-8')
class Modifier(object):
"""Modify :class:`Item3` arg/icon/variables when modifier key is pressed.
Don't use this class directly (as it won't be associated with any
:class:`Item3`), but rather use :meth:`Item3.add_modifier()`
to add modifiers to results.
>>> it = wf.add_item('Title', 'Subtitle', valid=True)
>>> it.setvar('name', 'default')
>>> m = it.add_modifier('cmd')
>>> m.setvar('name', 'alternate')
See :ref:`workflow-variables` in the User Guide for more information
and :ref:`example usage <example-variables>`.
Args:
key (unicode): Modifier key, e.g. ``"cmd"``, ``"alt"`` etc.
subtitle (unicode, optional): Override default subtitle.
arg (unicode, optional): Argument to pass for this modifier.
valid (bool, optional): Override item's validity.
icon (unicode, optional): Filepath/UTI of icon to use
icontype (unicode, optional): Type of icon. See
:meth:`Workflow.add_item() <workflow.Workflow.add_item>`
for valid values.
Attributes:
arg (unicode): Arg to pass to following action.
config (dict): Configuration for a downstream element, such as
a File Filter.
icon (unicode): Filepath/UTI of icon.
icontype (unicode): Type of icon. See
:meth:`Workflow.add_item() <workflow.Workflow.add_item>`
for valid values.
key (unicode): Modifier key (see above).
subtitle (unicode): Override item subtitle.
valid (bool): Override item validity.
variables (dict): Workflow variables set by this modifier.
"""
def __init__(self, key, subtitle=None, arg=None, valid=None, icon=None,
icontype=None):
"""Create a new :class:`Modifier`.
Don't use this class directly (as it won't be associated with any
:class:`Item3`), but rather use :meth:`Item3.add_modifier()`
to add modifiers to results.
Args:
key (unicode): Modifier key, e.g. ``"cmd"``, ``"alt"`` etc.
subtitle (unicode, optional): Override default subtitle.
arg (unicode, optional): Argument to pass for this modifier.
valid (bool, optional): Override item's validity.
icon (unicode, optional): Filepath/UTI of icon to use
icontype (unicode, optional): Type of icon. See
:meth:`Workflow.add_item() <workflow.Workflow.add_item>`
for valid values.
"""
self.key = key
self.subtitle = subtitle
self.arg = arg
self.valid = valid
self.icon = icon
self.icontype = icontype
self.config = {}
self.variables = {}
def setvar(self, name, value):
"""Set a workflow variable for this Item.
Args:
name (unicode): Name of variable.
value (unicode): Value of variable.
"""
self.variables[name] = value
def getvar(self, name, default=None):
"""Return value of workflow variable for ``name`` or ``default``.
Args:
name (unicode): Variable name.
default (None, optional): Value to return if variable is unset.
Returns:
unicode or ``default``: Value of variable if set or ``default``.
"""
return self.variables.get(name, default)
@property
def obj(self):
"""Modifier formatted for JSON serialization for Alfred 3.
Returns:
dict: Modifier for serializing to JSON.
"""
o = {}
if self.subtitle is not None:
o['subtitle'] = self.subtitle
if self.arg is not None:
o['arg'] = self.arg
if self.valid is not None:
o['valid'] = self.valid
if self.variables:
o['variables'] = self.variables
if self.config:
o['config'] = self.config
icon = self._icon()
if icon:
o['icon'] = icon
return o
def _icon(self):
"""Return `icon` object for item.
Returns:
dict: Mapping for item `icon` (may be empty).
"""
icon = {}
if self.icon is not None:
icon['path'] = self.icon
if self.icontype is not None:
icon['type'] = self.icontype
return icon
class Item3(object):
"""Represents a feedback item for Alfred 3+.
Generates Alfred-compliant JSON for a single item.
Don't use this class directly (as it then won't be associated with
any :class:`Workflow3 <workflow.Workflow3>` object), but rather use
:meth:`Workflow3.add_item() <workflow.Workflow3.add_item>`.
See :meth:`~workflow.Workflow3.add_item` for details of arguments.
"""
def __init__(self, title, subtitle='', arg=None, autocomplete=None,
match=None, valid=False, uid=None, icon=None, icontype=None,
type=None, largetext=None, copytext=None, quicklookurl=None):
"""Create a new :class:`Item3` object.
Use same arguments as for
:class:`Workflow.Item <workflow.Workflow.Item>`.
Argument ``subtitle_modifiers`` is not supported.
"""
self.title = title
self.subtitle = subtitle
self.arg = arg
self.autocomplete = autocomplete
self.match = match
self.valid = valid
self.uid = uid
self.icon = icon
self.icontype = icontype
self.type = type
self.quicklookurl = quicklookurl
self.largetext = largetext
self.copytext = copytext
self.modifiers = {}
self.config = {}
self.variables = {}
def setvar(self, name, value):
"""Set a workflow variable for this Item.
Args:
name (unicode): Name of variable.
value (unicode): Value of variable.
"""
self.variables[name] = value
def getvar(self, name, default=None):
"""Return value of workflow variable for ``name`` or ``default``.
Args:
name (unicode): Variable name.
default (None, optional): Value to return if variable is unset.
Returns:
unicode or ``default``: Value of variable if set or ``default``.
"""
return self.variables.get(name, default)
def add_modifier(self, key, subtitle=None, arg=None, valid=None, icon=None,
icontype=None):
"""Add alternative values for a modifier key.
Args:
key (unicode): Modifier key, e.g. ``"cmd"`` or ``"alt"``
subtitle (unicode, optional): Override item subtitle.
arg (unicode, optional): Input for following action.
valid (bool, optional): Override item validity.
icon (unicode, optional): Filepath/UTI of icon.
icontype (unicode, optional): Type of icon. See
:meth:`Workflow.add_item() <workflow.Workflow.add_item>`
for valid values.
Returns:
Modifier: Configured :class:`Modifier`.
"""
mod = Modifier(key, subtitle, arg, valid, icon, icontype)
# Add Item variables to Modifier
mod.variables.update(self.variables)
self.modifiers[key] = mod
return mod
@property
def obj(self):
"""Item formatted for JSON serialization.
Returns:
dict: Data suitable for Alfred 3 feedback.
"""
# Required values
o = {
'title': self.title,
'subtitle': self.subtitle,
'valid': self.valid,
}
# Optional values
if self.arg is not None:
o['arg'] = self.arg
if self.autocomplete is not None:
o['autocomplete'] = self.autocomplete
if self.match is not None:
o['match'] = self.match
if self.uid is not None:
o['uid'] = self.uid
if self.type is not None:
o['type'] = self.type
if self.quicklookurl is not None:
o['quicklookurl'] = self.quicklookurl
if self.variables:
o['variables'] = self.variables
if self.config:
o['config'] = self.config
# Largetype and copytext
text = self._text()
if text:
o['text'] = text
icon = self._icon()
if icon:
o['icon'] = icon
# Modifiers
mods = self._modifiers()
if mods:
o['mods'] = mods
return o
def _icon(self):
"""Return `icon` object for item.
Returns:
dict: Mapping for item `icon` (may be empty).
"""
icon = {}
if self.icon is not None:
icon['path'] = self.icon
if self.icontype is not None:
icon['type'] = self.icontype
return icon
def _text(self):
"""Return `largetext` and `copytext` object for item.
Returns:
dict: `text` mapping (may be empty)
"""
text = {}
if self.largetext is not None:
text['largetype'] = self.largetext
if self.copytext is not None:
text['copy'] = self.copytext
return text
def _modifiers(self):
"""Build `mods` dictionary for JSON feedback.
Returns:
dict: Modifier mapping or `None`.
"""
if self.modifiers:
mods = {}
for k, mod in self.modifiers.items():
mods[k] = mod.obj
return mods
return None
class Workflow3(Workflow):
"""Workflow class that generates Alfred 3+ feedback.
It is a subclass of :class:`~workflow.Workflow` and most of its
methods are documented there.
Attributes:
item_class (class): Class used to generate feedback items.
variables (dict): Top level workflow variables.
"""
item_class = Item3
def __init__(self, **kwargs):
"""Create a new :class:`Workflow3` object.
See :class:`~workflow.Workflow` for documentation.
"""
Workflow.__init__(self, **kwargs)
self.variables = {}
self._rerun = 0
# Get session ID from environment if present
self._session_id = os.getenv('_WF_SESSION_ID') or None
if self._session_id:
self.setvar('_WF_SESSION_ID', self._session_id)
@property
def _default_cachedir(self):
"""Alfred 4's default cache directory."""
return os.path.join(
os.path.expanduser(
'~/Library/Caches/com.runningwithcrayons.Alfred/'
'Workflow Data/'),
self.bundleid)
@property
def _default_datadir(self):
"""Alfred 4's default data directory."""
return os.path.join(os.path.expanduser(
'~/Library/Application Support/Alfred/Workflow Data/'),
self.bundleid)
@property
def rerun(self):
"""How often (in seconds) Alfred should re-run the Script Filter."""
return self._rerun
@rerun.setter
def rerun(self, seconds):
"""Interval at which Alfred should re-run the Script Filter.
Args:
seconds (int): Interval between runs.
"""
self._rerun = seconds
@property
def session_id(self):
"""A unique session ID every time the user uses the workflow.
.. versionadded:: 1.25
The session ID persists while the user is using this workflow.
It expires when the user runs a different workflow or closes
Alfred.
"""
if not self._session_id:
from uuid import uuid4
self._session_id = uuid4().hex
self.setvar('_WF_SESSION_ID', self._session_id)
return self._session_id
def setvar(self, name, value, persist=False):
"""Set a "global" workflow variable.
.. versionchanged:: 1.33
These variables are always passed to downstream workflow objects.
If you have set :attr:`rerun`, these variables are also passed
back to the script when Alfred runs it again.
Args:
name (unicode): Name of variable.
value (unicode): Value of variable.
persist (bool, optional): Also save variable to ``info.plist``?
"""
self.variables[name] = value
if persist:
from .util import set_config
set_config(name, value, self.bundleid)
self.logger.debug('saved variable %r with value %r to info.plist',
name, value)
def getvar(self, name, default=None):
"""Return value of workflow variable for ``name`` or ``default``.
Args:
name (unicode): Variable name.
default (None, optional): Value to return if variable is unset.
Returns:
unicode or ``default``: Value of variable if set or ``default``.
"""
return self.variables.get(name, default)
def add_item(self, title, subtitle='', arg=None, autocomplete=None,
valid=False, uid=None, icon=None, icontype=None, type=None,
largetext=None, copytext=None, quicklookurl=None, match=None):
"""Add an item to be output to Alfred.
Args:
match (unicode, optional): If you have "Alfred filters results"
turned on for your Script Filter, Alfred (version 3.5 and
above) will filter against this field, not ``title``.
See :meth:`Workflow.add_item() <workflow.Workflow.add_item>` for
the main documentation and other parameters.
The key difference is that this method does not support the
``modifier_subtitles`` argument. Use the :meth:`~Item3.add_modifier()`
method instead on the returned item instead.
Returns:
Item3: Alfred feedback item.
"""
item = self.item_class(title, subtitle, arg, autocomplete,
match, valid, uid, icon, icontype, type,
largetext, copytext, quicklookurl)
# Add variables to child item
item.variables.update(self.variables)
self._items.append(item)
return item
@property
def _session_prefix(self):
"""Filename prefix for current session."""
return '_wfsess-{0}-'.format(self.session_id)
def _mk_session_name(self, name):
"""New cache name/key based on session ID."""
return self._session_prefix + name
def cache_data(self, name, data, session=False):
"""Cache API with session-scoped expiry.
.. versionadded:: 1.25
Args:
name (str): Cache key
data (object): Data to cache
session (bool, optional): Whether to scope the cache
to the current session.
``name`` and ``data`` are the same as for the
:meth:`~workflow.Workflow.cache_data` method on
:class:`~workflow.Workflow`.
If ``session`` is ``True``, then ``name`` is prefixed
with :attr:`session_id`.
"""
if session:
name = self._mk_session_name(name)
return super(Workflow3, self).cache_data(name, data)
def cached_data(self, name, data_func=None, max_age=60, session=False):
"""Cache API with session-scoped expiry.
.. versionadded:: 1.25
Args:
name (str): Cache key
data_func (callable): Callable that returns fresh data. It
is called if the cache has expired or doesn't exist.
max_age (int): Maximum allowable age of cache in seconds.
session (bool, optional): Whether to scope the cache
to the current session.
``name``, ``data_func`` and ``max_age`` are the same as for the
:meth:`~workflow.Workflow.cached_data` method on
:class:`~workflow.Workflow`.
If ``session`` is ``True``, then ``name`` is prefixed
with :attr:`session_id`.
"""
if session:
name = self._mk_session_name(name)
return super(Workflow3, self).cached_data(name, data_func, max_age)
def clear_session_cache(self, current=False):
"""Remove session data from the cache.
.. versionadded:: 1.25
.. versionchanged:: 1.27
By default, data belonging to the current session won't be
deleted. Set ``current=True`` to also clear current session.
Args:
current (bool, optional): If ``True``, also remove data for
current session.
"""
def _is_session_file(filename):
if current:
return filename.startswith('_wfsess-')
return filename.startswith('_wfsess-') \
and not filename.startswith(self._session_prefix)
self.clear_cache(_is_session_file)
@property
def obj(self):
"""Feedback formatted for JSON serialization.
Returns:
dict: Data suitable for Alfred 3 feedback.
"""
items = []
for item in self._items:
items.append(item.obj)
o = {'items': items}
if self.variables:
o['variables'] = self.variables
if self.rerun:
o['rerun'] = self.rerun
return o
def warn_empty(self, title, subtitle=u'', icon=None):
"""Add a warning to feedback if there are no items.
.. versionadded:: 1.31
Add a "warning" item to Alfred feedback if no other items
have been added. This is a handy shortcut to prevent Alfred
from showing its fallback searches, which is does if no
items are returned.
Args:
title (unicode): Title of feedback item.
subtitle (unicode, optional): Subtitle of feedback item.
icon (str, optional): Icon for feedback item. If not
specified, ``ICON_WARNING`` is used.
Returns:
Item3: Newly-created item.
"""
if len(self._items):
return
icon = icon or ICON_WARNING
return self.add_item(title, subtitle, icon=icon)
def send_feedback(self):
"""Print stored items to console/Alfred as JSON."""
json.dump(self.obj, sys.stdout)
sys.stdout.flush()
| mit | 8,208,686,674,380,428,000 | 28.443828 | 79 | 0.570964 | false |
torchbox/wagtail | wagtail/contrib/postgres_search/utils.py | 15 | 3459 | from itertools import zip_longest
from django.apps import apps
from django.db import connections
from wagtail.search.index import Indexed, RelatedFields, SearchField
def get_postgresql_connections():
return [connection for connection in connections.all()
if connection.vendor == 'postgresql']
def get_descendant_models(model):
"""
Returns all descendants of a model, including the model itself.
"""
descendant_models = {other_model for other_model in apps.get_models()
if issubclass(other_model, model)}
descendant_models.add(model)
return descendant_models
def get_content_type_pk(model):
# We import it locally because this file is loaded before apps are ready.
from django.contrib.contenttypes.models import ContentType
return ContentType.objects.get_for_model(model).pk
def get_ancestors_content_types_pks(model):
"""
Returns content types ids for the ancestors of this model, excluding it.
"""
from django.contrib.contenttypes.models import ContentType
return [ct.pk for ct in
ContentType.objects.get_for_models(*model._meta.get_parent_list())
.values()]
def get_descendants_content_types_pks(model):
"""
Returns content types ids for the descendants of this model, including it.
"""
from django.contrib.contenttypes.models import ContentType
return [ct.pk for ct in
ContentType.objects.get_for_models(*get_descendant_models(model))
.values()]
def get_search_fields(search_fields):
for search_field in search_fields:
if isinstance(search_field, SearchField):
yield search_field
elif isinstance(search_field, RelatedFields):
for sub_field in get_search_fields(search_field.fields):
yield sub_field
WEIGHTS = 'ABCD'
WEIGHTS_COUNT = len(WEIGHTS)
# These are filled when apps are ready.
BOOSTS_WEIGHTS = []
WEIGHTS_VALUES = []
def get_boosts():
boosts = set()
for model in apps.get_models():
if issubclass(model, Indexed):
for search_field in get_search_fields(model.get_search_fields()):
boost = search_field.boost
if boost is not None:
boosts.add(boost)
return boosts
def determine_boosts_weights(boosts=()):
if not boosts:
boosts = get_boosts()
boosts = list(sorted(boosts, reverse=True))
min_boost = boosts[-1]
if len(boosts) <= WEIGHTS_COUNT:
return list(zip_longest(boosts, WEIGHTS, fillvalue=min(min_boost, 0)))
max_boost = boosts[0]
boost_step = (max_boost - min_boost) / (WEIGHTS_COUNT - 1)
return [(max_boost - (i * boost_step), weight)
for i, weight in enumerate(WEIGHTS)]
def set_weights():
BOOSTS_WEIGHTS.extend(determine_boosts_weights())
weights = [w for w, c in BOOSTS_WEIGHTS]
min_weight = min(weights)
if min_weight <= 0:
if min_weight == 0:
min_weight = -0.1
weights = [w - min_weight for w in weights]
max_weight = max(weights)
WEIGHTS_VALUES.extend([w / max_weight
for w in reversed(weights)])
def get_weight(boost):
if boost is None:
return WEIGHTS[-1]
for max_boost, weight in BOOSTS_WEIGHTS:
if boost >= max_boost:
return weight
return weight
def get_sql_weights():
return '{' + ','.join(map(str, WEIGHTS_VALUES)) + '}'
| bsd-3-clause | 5,584,309,279,480,637,000 | 29.610619 | 78 | 0.645851 | false |
Moguri/BlenderRealtimeEngineAddon | socket_api.py | 2 | 4329 | import enum
import json
import socket
import struct
import sys
import time
class AutoNumber(enum.Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
class MethodIDs(AutoNumber):
__order__ = "add update remove"
add = ()
update = ()
remove = ()
class DataIDs(AutoNumber):
__order__ = "view projection viewport gltf"
view = ()
projection = ()
viewport = ()
gltf = ()
def send_message(_socket, method, data_id, data):
_socket.setblocking(True)
_socket.settimeout(1)
attempts = 3
while attempts > 0:
message = encode_cmd_message(method, data_id)
try:
_socket.send(message)
data_str = json.dumps(data)
_socket.send(struct.pack("I", len(data_str)))
_socket.send(data_str.encode())
except socket.timeout:
attempts -= 1
continue
break
else:
print("Failed to send message (%s, %s)." % (method.name, data_id.name))
_socket.setblocking(False)
def encode_cmd_message(method_id, data_id):
message = data_id.value & 0b00001111
message |= method_id.value << 4
return struct.pack("B", message)
def decode_cmd_message(message):
message = struct.unpack('B', message)[0]
method_id = message >> 4
data_id = message & 0b00001111
return MethodIDs(method_id), DataIDs(data_id)
def decode_size_message(message):
return struct.unpack('I', message)[0]
class SocketClient(object):
def __init__(self, handler):
self.handler = handler
self.socket = socket.socket()
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.connect(("127.0.0.1", 4242))
def run(self):
try:
while True:
try:
self.socket.setblocking(False)
message = self.socket.recv(1)
if message:
self.socket.setblocking(True)
method_id, data_id = decode_cmd_message(message)
size = decode_size_message(self.socket.recv(4))
data = b""
remaining = size
while remaining > 0:
chunk = self.socket.recv(min(1024, remaining))
remaining -= len(chunk)
data += chunk
data = json.loads(data.decode())
if data_id == DataIDs.projection:
if hasattr(self.handler, 'handle_projection'):
self.handler.handle_projection(data['data'])
elif data_id == DataIDs.view:
if hasattr(self.handler, 'handle_view'):
self.handler.handle_view(data['data'])
elif data_id == DataIDs.viewport:
if hasattr(self.handler, 'handle_viewport'):
self.handler.handle_viewport(data['width'], data['height'])
elif data_id == DataIDs.gltf:
if hasattr(self.handler, 'handle_gltf'):
self.handler.handle_gltf(data)
except socket.error:
break
# Return output
img_data, sx, sy = self.handler.get_render_image()
data_size = len(img_data)
self.socket.setblocking(True)
try:
self.socket.send(struct.pack("HH", sx, sy))
start_time = time.clock()
sent_count = 0
while sent_count < data_size:
sent_count += self.socket.send(img_data[sent_count:data_size - sent_count])
etime = time.clock() - start_time
tx = 0 if etime == 0 else sent_count*8/1024/1024/etime
#print("Sent %d bytes in %.2f ms (%.2f Mbps)" % (sent_count, etime*1000, tx))
except socket.timeout:
print("Failed to send result data")
except socket.error as e:
print("Closing")
self.socket.close()
sys.exit()
| mit | -1,389,528,007,131,055,400 | 31.548872 | 95 | 0.505891 | false |
dgarant/pl-markov-network | examples/large_non_gaussian.py | 1 | 2076 | # Demonstrates pseudo-likelihood estimation
# for a large system of continuous variables
from plmrf import *
import numpy as np
import scipy
import time
# Generating a big ring by sampling variables independently,
# then sampling based on each configuration's 'true' potential
nvars = 1000
nsamp = 1000
print("Generating data ...")
indep_data = dict()
for vindex in range(nvars):
samples = np.random.normal(size=nsamp*10)
varname = "x{0}".format(vindex)
indep_data[varname] = samples
# potentials functions are Gaussian kernels
def potential(pindex):
return (1.0/nvars) * np.exp(-np.abs(indep_data["x{0}".format(vindex)], indep_data["x{0}".format((vindex+1) % nvars)]))
unnormalized_density = np.exp(np.sum([potential(p) for p in range(nvars)], axis=0))
relative_density = unnormalized_density / unnormalized_density.sum()
samp_indices = np.random.choice(range(nsamp*10), size=nsamp, p=relative_density)
print("Setting up potentials and variable definitions ...")
data = dict()
var_defs = []
for vindex in range(nvars):
varname = "x{0}".format(vindex)
next_var = "x{0}".format((vindex+1) % nvars)
samples = indep_data[varname][samp_indices]
data[varname] = samples
var_defs.append(VariableDef(varname, samples=samples, num_int_points=10))
potentials = []
tied_params = [[], []]
for vindex in range(nvars):
varname = "x{0}".format(vindex)
next_var = "x{0}".format((vindex+1) % nvars)
potentials.append(GaussianPotential([varname], samples=data, location=0))
potentials.append(GaussianPotential([varname, next_var], samples=data))
tied_params[0].append(len(potentials)-2)
tied_params[1].append(len(potentials)-1)
for p in potentials:
if p.bandwidth < 1e-16:
print(p)
network = LogLinearMarkovNetwork(potentials, var_defs, tied_weights=tied_params)
print("Fitting parameters ...")
start = time.time()
mple_result = network.fit(data, log=True)
end = time.time()
print("Parameter estimation completed in {0} seconds".format(end - start))
print("MPLE optimization result:")
print(mple_result)
| mit | -6,298,216,771,540,330,000 | 30.454545 | 122 | 0.708574 | false |
yongtang/tensorflow | tensorflow/python/keras/layers/preprocessing/text_vectorization.py | 4 | 26014 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text vectorization preprocessing layer."""
# pylint: disable=g-classes-have-attributes
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.layers.preprocessing import index_lookup
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.util.tf_export import keras_export
LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation"
SPLIT_ON_WHITESPACE = "whitespace"
TF_IDF = index_lookup.TF_IDF
INT = index_lookup.INT
MULTI_HOT = index_lookup.MULTI_HOT
COUNT = index_lookup.COUNT
# This is an explicit regex of all the tokens that will be stripped if
# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other
# stripping, a Callable should be passed into the 'standardize' arg.
DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']'
# The string tokens in the extracted vocabulary
_VOCAB_NAME = "vocab"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
# The IDF data for the OOV token
_OOV_IDF_NAME = "oov_idf"
# The string tokens in the full vocabulary
_ACCUMULATOR_VOCAB_NAME = "vocab"
# The total counts of each token in the vocabulary
_ACCUMULATOR_COUNTS_NAME = "counts"
# The number of documents / examples that each token appears in.
_ACCUMULATOR_DOCUMENT_COUNTS = "document_counts"
# The total number of documents / examples in the dataset.
_ACCUMULATOR_NUM_DOCUMENTS = "num_documents"
@keras_export(
"keras.layers.experimental.preprocessing.TextVectorization", v1=[])
class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
"""Text vectorization layer.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one example = one string) into either a list of
token indices (one example = 1D tensor of integer token indices) or a dense
representation (one example = 1D tensor of float values representing data
about the example's tokens).
If desired, the user can call this layer's adapt() method on a dataset.
When this layer is adapted, it will analyze the dataset, determine the
frequency of individual string values, and create a 'vocabulary' from them.
This vocabulary can have unlimited size or be capped, depending on the
configuration options for this layer; if there are more unique values in the
input than the maximum vocabulary size, the most frequent terms will be used
to create the vocabulary.
The processing of each example contains the following steps:
1. standardize each example (usually lowercasing + punctuation stripping)
2. split each example into substrings (usually words)
3. recombine substrings into tokens (usually ngrams)
4. index tokens (associate a unique int value with each token)
5. transform each example using this index, either into a vector of ints or
a dense float vector.
Some notes on passing Callables to customize splitting and normalization for
this layer:
1. Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `tf.keras.utils.register_keras_serializable` for more
details).
2. When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3. When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`. The callable should
return a Tensor with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to",
"split"], ["another", "string", "to", "split"]]`. This makes the callable
site natively compatible with `tf.strings.split()`.
Args:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is `(max_tokens -
1 - (1 if output == `"int"` else 0))`.
standardize: Optional specification for standardization to apply to the
input text. Values can be None (no standardization),
`"lower_and_strip_punctuation"` (lowercase and remove punctuation) or a
Callable. Default is `"lower_and_strip_punctuation"`.
split: Optional specification for splitting the input text. Values can be
None (no splitting), `"whitespace"` (split on ASCII whitespace), or a
Callable. The default is `"whitespace"`.
ngrams: Optional specification for ngrams to create from the possibly-split
input text. Values can be None, an integer or tuple of integers; passing
an integer will create ngrams up to that integer, and passing a tuple of
integers will create ngrams for the specified values in the tuple. Passing
None means that no ngrams will be created.
output_mode: Optional specification for the output of the layer. Values can
be `"int"`, `"multi_hot"`, `"count"` or `"tf_idf"`, configuring the layer
as follows:
- `"int"`: Outputs integer indices, one integer index per split string
token. When output == `"int"`, 0 is reserved for masked locations;
this reduces the vocab size to max_tokens-2 instead of max_tokens-1
- `"multi_hot"`: Outputs a single int array per batch, of either
vocab_size or max_tokens size, containing 1s in all elements where the
token mapped to that index exists at least once in the batch item.
- `"count"`: As `"multi_hot"`, but the int array contains a count of the
number of times the token at that index appeared in the batch item.
- `"tf_idf"`: As `"multi_hot"`, but the TF-IDF algorithm is applied to
find the value in each token slot.
output_sequence_length: Only valid in INT mode. If set, the output will have
its time dimension padded or truncated to exactly `output_sequence_length`
values, resulting in a tensor of shape [batch_size,
output_sequence_length] regardless of how many tokens resulted from the
splitting step. Defaults to None.
pad_to_max_tokens: Only valid in `"multi_hot"`, `"count"`, and `"tf_idf"`
modes. If True, the output will have its feature axis padded to
`max_tokens` even if the number of unique tokens in the vocabulary is less
than max_tokens, resulting in a tensor of shape [batch_size, max_tokens]
regardless of vocabulary size. Defaults to False.
vocabulary: An optional list of vocabulary terms, or a path to a text file
containing a vocabulary to load into this layer. The file should contain
one token per line. If the list or file contains the same token multiple
times, an error will be thrown.
Example:
This example instantiates a TextVectorization layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
>>> text_dataset = tf.data.Dataset.from_tensor_slices(["foo", "bar", "baz"])
>>> max_features = 5000 # Maximum vocab size.
>>> max_len = 4 # Sequence length to pad the outputs to.
>>>
>>> # Create the layer.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len)
>>>
>>> # Now that the vocab layer has been created, call `adapt` on the text-only
>>> # dataset to create the vocabulary. You don't have to batch, but for large
>>> # datasets this means we're not keeping spare copies of the dataset.
>>> vectorize_layer.adapt(text_dataset.batch(64))
>>>
>>> # Create the model that uses the vectorize text layer
>>> model = tf.keras.models.Sequential()
>>>
>>> # Start by creating an explicit input layer. It needs to have a shape of
>>> # (1,) (because we need to guarantee that there is exactly one string
>>> # input per batch), and the dtype needs to be 'string'.
>>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
>>>
>>> # The first layer in our model is the vectorization layer. After this
>>> # layer, we have a tensor of shape (batch_size, max_len) containing vocab
>>> # indices.
>>> model.add(vectorize_layer)
>>>
>>> # Now, the model can map strings to integers, and you can add an embedding
>>> # layer to map these integers to learned embeddings.
>>> input_data = [["foo qux bar"], ["qux baz"]]
>>> model.predict(input_data)
array([[2, 1, 4, 0],
[1, 3, 0, 0]])
Example:
This example instantiates a TextVectorization layer by passing a list
of vocabulary terms to the layer's __init__ method.
>>> vocab_data = ["earth", "wind", "and", "fire"]
>>> max_len = 4 # Sequence length to pad the outputs to.
>>>
>>> # Create the layer, passing the vocab directly. You can also pass the
>>> # vocabulary arg a path to a file containing one vocabulary word per
>>> # line.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len,
... vocabulary=vocab_data)
>>>
>>> # Because we've passed the vocabulary directly, we don't need to adapt
>>> # the layer - the vocabulary is already set. The vocabulary contains the
>>> # padding token ('') and OOV token ('[UNK]') as well as the passed tokens.
>>> vectorize_layer.get_vocabulary()
['', '[UNK]', 'earth', 'wind', 'and', 'fire']
"""
# TODO(momernick): Add an examples section to the docstring.
def __init__(self,
max_tokens=None,
standardize=LOWER_AND_STRIP_PUNCTUATION,
split=SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=INT,
output_sequence_length=None,
pad_to_max_tokens=False,
vocabulary=None,
**kwargs):
# This layer only applies to string processing, and so should only have
# a dtype of 'string'.
if "dtype" in kwargs and kwargs["dtype"] != dtypes.string:
raise ValueError("TextVectorization may only have a dtype of string.")
elif "dtype" not in kwargs:
kwargs["dtype"] = dtypes.string
# 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)
layer_utils.validate_string_arg(
standardize,
allowable_strings=(LOWER_AND_STRIP_PUNCTUATION),
layer_name="TextVectorization",
arg_name="standardize",
allow_none=True,
allow_callables=True)
# 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)
layer_utils.validate_string_arg(
split,
allowable_strings=(SPLIT_ON_WHITESPACE),
layer_name="TextVectorization",
arg_name="split",
allow_none=True,
allow_callables=True)
# Support deprecated names for output_modes.
if output_mode == "binary":
output_mode = MULTI_HOT
if output_mode == "tf-idf":
output_mode = TF_IDF
# 'output_mode' must be one of (None, INT, COUNT, MULTI_HOT, TF_IDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, COUNT, MULTI_HOT, TF_IDF),
layer_name="TextVectorization",
arg_name="output_mode",
allow_none=True)
# 'ngrams' must be one of (None, int, tuple(int))
if not (ngrams is None or
isinstance(ngrams, int) or
isinstance(ngrams, tuple) and
all(isinstance(item, int) for item in ngrams)):
raise ValueError(("`ngrams` must be None, an integer, or a tuple of "
"integers. Got %s") % (ngrams,))
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is INT.
if (output_mode == INT and not (isinstance(output_sequence_length, int) or
(output_sequence_length is None))):
raise ValueError("`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. "
"Got %s" % output_sequence_length)
if output_mode != INT and output_sequence_length is not None:
raise ValueError("`output_sequence_length` must not be set if "
"`output_mode` is not 'int'.")
self._max_tokens = max_tokens
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
vocabulary_size = 0
# IndexLookup needs to keep track the current vocab size outside of its
# layer weights. We persist it as a hidden part of the config during
# serialization.
if "vocabulary_size" in kwargs:
vocabulary_size = kwargs["vocabulary_size"]
del kwargs["vocabulary_size"]
super(TextVectorization, self).__init__(
combiner=None,
**kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell(
"TextVectorization").set(True)
self._index_lookup_layer = string_lookup.StringLookup(
max_tokens=max_tokens,
vocabulary=vocabulary,
pad_to_max_tokens=pad_to_max_tokens,
mask_token="",
output_mode=output_mode if output_mode is not None else INT,
vocabulary_size=vocabulary_size)
def _assert_same_type(self, expected_type, values, value_name):
if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected %s type %s, got %s" %
(value_name, expected_type, values.dtype))
def compute_output_shape(self, input_shape):
if self._output_mode != INT:
return tensor_shape.TensorShape([input_shape[0], self._max_tokens])
if self._output_mode == INT and self._split is None:
if len(input_shape) <= 1:
input_shape = tuple(input_shape) + (1,)
return tensor_shape.TensorShape(input_shape)
if self._output_mode == INT and self._split is not None:
input_shape = list(input_shape)
if len(input_shape) <= 1:
input_shape = input_shape + [self._output_sequence_length]
else:
input_shape[1] = self._output_sequence_length
return tensor_shape.TensorShape(input_shape)
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = (dtypes.int64 if self._output_mode == INT
else backend.floatx())
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Args:
data: The data to train on. It can be passed either as a tf.data Dataset,
as a NumPy array, a string tensor, or as a list of texts.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
"""
if not reset_state:
raise ValueError("TextVectorization does not support streaming adapts.")
# Build the layer explicitly with the original data shape instead of relying
# on an implicit call to `build` in the base layer's `adapt`, since
# preprocessing changes the input shape.
if isinstance(data, (list, tuple, np.ndarray)):
data = ops.convert_to_tensor_v2_with_dispatch(data)
if isinstance(data, ops.Tensor):
if data.shape.rank == 1:
data = array_ops.expand_dims(data, axis=-1)
self.build(data.shape)
preprocessed_inputs = self._preprocess(data)
elif isinstance(data, dataset_ops.DatasetV2):
# TODO(momernick): Replace this with a more V2-friendly API.
shape = dataset_ops.get_legacy_output_shapes(data)
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("The dataset passed to 'adapt' must contain a single "
"tensor value.")
if shape.rank == 0:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, 0))
shape = dataset_ops.get_legacy_output_shapes(data)
if shape.rank == 1:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, -1))
self.build(dataset_ops.get_legacy_output_shapes(data))
preprocessed_inputs = data.map(self._preprocess)
else:
raise ValueError(
"adapt() requires a Dataset or an array as input, got {}".format(
type(data)))
self._index_lookup_layer.adapt(preprocessed_inputs)
def get_vocabulary(self, include_special_tokens=True):
"""Returns the current vocabulary of the layer.
Args:
include_special_tokens: If True, the returned vocabulary will include
the padding and OOV tokens, and a term's index in the vocabulary will
equal the term's index when calling the layer. If False, the returned
vocabulary will not include any padding or OOV tokens.
"""
return self._index_lookup_layer.get_vocabulary(include_special_tokens)
def vocabulary_size(self):
"""Gets the current size of the layer's vocabulary.
Returns:
The integer size of the voculary, including optional mask and oov indices.
"""
return self._index_lookup_layer.vocabulary_size()
def get_config(self):
# This does not include the 'vocabulary' arg, since if the vocab was passed
# at init time it's now stored in variable state - we don't need to
# pull it off disk again.
config = {
"max_tokens": self._index_lookup_layer.max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._index_lookup_layer.pad_to_max_tokens,
"vocabulary_size": self._index_lookup_layer.vocabulary_size(),
}
base_config = super(TextVectorization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self, vocabulary, idf_weights=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and idf weights for this layer directly,
instead of analyzing a dataset through 'adapt'. It should be used whenever
the vocab (and optionally document frequency) information is already known.
If vocabulary data is already present in the layer, this method will replace
it.
Args:
vocabulary: An array of string tokens, or a path to a file containing one
token per line.
idf_weights: An array of document frequency data with equal length to
vocab. Only necessary if the layer output_mode is TF_IDF.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when `"multi_hot"`, `"count"`, and "tfidf" modes,
if `pad_to_max_tokens` is False and the layer itself has already been
called.
"""
self._index_lookup_layer.set_vocabulary(vocabulary, idf_weights=idf_weights)
def build(self, input_shape):
# We have to use 'and not ==' here, because input_shape[1] !/== 1 can result
# in None for undefined shape axes. If using 'and !=', this causes the
# expression to evaluate to False instead of True if the shape is undefined;
# the expression needs to evaluate to True in that case.
if self._split is not None:
if input_shape.ndims > 1 and not input_shape[-1] == 1: # pylint: disable=g-comparison-negation
raise RuntimeError(
"When using TextVectorization to tokenize strings, the innermost "
"dimension of the input array must be 1, got shape "
"{}".format(input_shape))
super(TextVectorization, self).build(input_shape)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if self._output_mode == TF_IDF:
self.set_vocabulary(updates[_VOCAB_NAME], idf_weights=updates[_IDF_NAME])
else:
self.set_vocabulary(updates[_VOCAB_NAME])
def _preprocess(self, inputs):
if self._standardize == LOWER_AND_STRIP_PUNCTUATION:
if tf_utils.is_ragged(inputs):
lowercase_inputs = ragged_functional_ops.map_flat_values(
gen_string_ops.string_lower, inputs)
# Depending on configuration, we may never touch the non-data tensor
# in the ragged inputs tensor. If that is the case, and this is the
# only layer in the keras model, running it will throw an error.
# To get around this, we wrap the result in an identity.
lowercase_inputs = array_ops.identity(lowercase_inputs)
else:
lowercase_inputs = gen_string_ops.string_lower(inputs)
inputs = string_ops.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,
"")
elif callable(self._standardize):
inputs = self._standardize(inputs)
elif self._standardize is not None:
raise ValueError(("%s is not a supported standardization. "
"TextVectorization supports the following options "
"for `standardize`: None, "
"'lower_and_strip_punctuation', or a "
"Callable.") % self._standardize)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension 1 and
# so can be squeezed out. We do this here instead of after splitting for
# performance reasons - it's more expensive to squeeze a ragged tensor.
if inputs.shape.ndims > 1:
inputs = array_ops.squeeze(inputs, axis=-1)
if self._split == SPLIT_ON_WHITESPACE:
# This treats multiple whitespaces as one whitespace, and strips leading
# and trailing whitespace.
inputs = ragged_string_ops.string_split_v2(inputs)
elif callable(self._split):
inputs = self._split(inputs)
else:
raise ValueError(
("%s is not a supported splitting."
"TextVectorization supports the following options "
"for `split`: None, 'whitespace', or a Callable.") % self._split)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however, does
# support both ragged and dense inputs.
if self._ngrams is not None:
inputs = ragged_string_ops.ngrams(
inputs, ngram_width=self._ngrams, separator=" ")
return inputs
def call(self, inputs):
if isinstance(inputs, (list, tuple, np.ndarray)):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
return inputs
lookup_data = self._index_lookup_layer(inputs)
if self._output_mode == INT:
# Maybe trim the output (NOOP if self._output_sequence_length is None).
output_tensor = lookup_data[..., :self._output_sequence_length]
output_shape = output_tensor.shape.as_list()
output_shape[-1] = self._output_sequence_length
# If it is a ragged tensor, convert it to dense with correct shape.
if tf_utils.is_ragged(output_tensor):
return output_tensor.to_tensor(default_value=0, shape=output_shape)
if self._output_sequence_length is None:
return output_tensor
padding, _ = array_ops.required_space_to_batch_paddings(
output_tensor.shape, output_shape)
return array_ops.pad(output_tensor, padding)
return lookup_data
| apache-2.0 | 7,732,761,738,281,165,000 | 44.320557 | 101 | 0.67579 | false |
rec/DMXIS | Macros/Python/formatter.py | 4 | 15337 | """Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
"""A formatter which does nothing.
If the writer parameter is omitted, a NullWriter instance is created.
No methods of the writer are called by NullFormatter instances.
Implementations should inherit from this class if implementing a writer
interface but don't need to inherit any implementation.
"""
def __init__(self, writer=None):
if writer is None:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
"""The standard formatter.
This implementation has demonstrated wide applicability to many writers,
and may be used directly in most circumstances. It has been used to
implement a full-featured World Wide Web browser.
"""
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
self.writer.send_hor_rule(*args, **kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if isinstance(format, str):
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
# This makes a strong assumption that lowercase letters
# and uppercase letters form two contiguous blocks, with
# letters in order!
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data):
if not data: return
prespace = data[:1].isspace()
postspace = data[-1:].isspace()
data = " ".join(data.split())
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, (size, i, b, tt)):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = filter(None, self.margin_stack)
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = filter(None, self.margin_stack)
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance.
A writer which only provides the interface definition; no actions are
taken on any methods. This should be the base class for all writers
which do not need to inherit any implementation methods.
"""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
"""A writer which can be used in debugging formatters, but not much else.
Each method simply announces itself by printing its name and
arguments on standard output.
"""
def new_alignment(self, align):
print "new_alignment(%r)" % (align,)
def new_font(self, font):
print "new_font(%r)" % (font,)
def new_margin(self, margin, level):
print "new_margin(%r, %d)" % (margin, level)
def new_spacing(self, spacing):
print "new_spacing(%r)" % (spacing,)
def new_styles(self, styles):
print "new_styles(%r)" % (styles,)
def send_paragraph(self, blankline):
print "send_paragraph(%r)" % (blankline,)
def send_line_break(self):
print "send_line_break()"
def send_hor_rule(self, *args, **kw):
print "send_hor_rule()"
def send_label_data(self, data):
print "send_label_data(%r)" % (data,)
def send_flowing_data(self, data):
print "send_flowing_data(%r)" % (data,)
def send_literal_data(self, data):
print "send_literal_data(%r)" % (data,)
class DumbWriter(NullWriter):
"""Simple writer class which writes output on the file object passed in
as the file parameter or, if file is omitted, on standard output. The
output is simply word-wrapped to the number of columns specified by
the maxcol parameter. This class is suitable for reflowing a sequence
of paragraphs.
"""
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0].isspace()
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1].isspace()
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file is not None:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
for line in fp:
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
| artistic-2.0 | -7,549,541,865,177,700,000 | 32.542793 | 77 | 0.558453 | false |
pyq881120/amoco | amoco/system/linux_arm.py | 6 | 1636 | # -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2006-2011 Axel Tillequin ([email protected])
# published under GPLv2 license
from amoco.system.core import *
import amoco.arch.arm.cpu_armv7 as cpu
PAGESIZE = 4096
class ELF(CoreExec):
def __init__(self,p):
CoreExec.__init__(self,p,cpu)
# load the program into virtual memory (populate the mmap dict)
def load_binary(self):
p = self.bin
if p!=None:
# create text and data segments according to elf header:
for s in p.Phdr:
ms = p.loadsegment(s,PAGESIZE)
if ms!=None:
vaddr,data = ms.items()[0]
self.mmap.write(vaddr,data)
# create the dynamic segments:
self.load_shlib()
# create the stack zone:
self.mmap.newzone(cpu.sp)
# call dynamic linker to populate mmap with shared libs:
# for now, the external libs are seen through the elf dynamic section:
def load_shlib(self):
for k,f in self.bin._Elf32__dynamic(None).iteritems():
self.mmap.write(k,cpu.ext(f,size=32))
def initenv(self):
from amoco.cas.mapper import mapper
m = mapper()
for k,v in ((cpu.pc, cpu.cst(self.bin.entrypoints[0],32)),
):
m[k] = v
return m
# LIBC HOOKS DEFINED HERE :
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# SYSCALLS:
#----------------------------------------------------------------------------
| gpl-2.0 | 185,811,454,582,083,680 | 31.078431 | 77 | 0.495721 | false |
joodo/davanoffi | board/migrations/0002_auto_20150930_1549.py | 2 | 1141 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('board', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=140, db_index=True)),
('music', models.FileField(null=True, upload_to=b'board/tag_music')),
('background_color', models.CharField(default=b'101010', max_length=6)),
('text_color', models.CharField(default=b'FFFFFF', max_length=6)),
],
),
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(null=True, upload_to=b'board/post_image'),
),
migrations.AddField(
model_name='tag',
name='posts',
field=models.ManyToManyField(related_name='tags', to='board.Post'),
),
]
| unlicense | 8,702,097,238,948,286,000 | 32.558824 | 114 | 0.5539 | false |
Tanych/CodeTracking | 32-Longest-Valid-Parentheses/solution.py | 1 | 1031 | class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
n=len(s)
if n<2:
return 0
# to record the valid '()'
stk=[]
maxlen=0
for i in xrange(n):
if s[i]=='(':
stk.append(i)
else:
# if stk is not empty and top is '(' then math
if stk and s[stk[-1]]=='(':
stk.pop()
else:
stk.append(i)
# then stk should be the index can't be match
if not stk:
return len(s)
# if has no match search the max
t1,t2=n,0
while stk:
t2=stk.pop()
maxlen=max(maxlen,t1-t2-1)
t1=t2
# the first element to deal with
# since the loop will dump if stk is empty
# it will not count stk[0]-0
maxlen=max(maxlen,t1)
return maxlen
| mit | -301,014,748,272,323,140 | 25.461538 | 62 | 0.417071 | false |
abantam/pmtud | nsc/scons-local-1.2.0.d20090223/SCons/Tool/bcc32.py | 19 | 2994 | """SCons.Tool.bcc32
XXX
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/bcc32.py 4043 2009/02/23 09:06:45 scons"
import os
import os.path
import string
import SCons.Defaults
import SCons.Tool
import SCons.Util
def findIt(program, env):
# First search in the SCons path and then the OS path:
borwin = env.WhereIs(program) or SCons.Util.WhereIs(program)
if borwin:
dir = os.path.dirname(borwin)
env.PrependENVPath('PATH', dir)
return borwin
def generate(env):
findIt('bcc32', env)
"""Add Builders and construction variables for bcc to an
Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in ['.c', '.cpp']:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
env['CC'] = 'bcc32'
env['CCFLAGS'] = SCons.Util.CLVar('')
env['CFLAGS'] = SCons.Util.CLVar('')
env['CCCOM'] = '$CC -q $CFLAGS $CCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o$TARGET $SOURCES'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
env['SHCFLAGS'] = SCons.Util.CLVar('$CFLAGS')
env['SHCCCOM'] = '$SHCC -WD $SHCFLAGS $SHCCFLAGS $CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -c -o$TARGET $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.dll'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CFILESUFFIX'] = '.cpp'
def exists(env):
return findIt('bcc32', env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 | 8,319,580,775,648,894,000 | 35.512195 | 115 | 0.694723 | false |
rspavel/spack | var/spack/repos/builtin/packages/libharu/package.py | 5 | 1645 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Libharu(AutotoolsPackage):
"""libharu - free PDF library.
Haru is a free, cross platform, open-sourced software library for
generating PDF."""
homepage = "http://libharu.org"
url = "https://github.com/libharu/libharu/archive/RELEASE_2_3_0.tar.gz"
git = "https://github.com/libharu/libharu.git"
version('master', branch='master')
version('2.3.0', sha256='8f9e68cc5d5f7d53d1bc61a1ed876add1faf4f91070dbc360d8b259f46d9a4d2')
version('2.2.0', sha256='5e63246d2da0272a9dbe5963fd827c7efa6e29d97a2d047c0d4c5f0b780f10b5')
depends_on('libtool', type=('build'))
depends_on('autoconf', type=('build'))
depends_on('automake', type=('build'))
depends_on('libpng')
depends_on('zlib')
def autoreconf(self, spec, prefix):
"""execute their autotools wrapper script"""
if os.path.exists('./buildconf.sh'):
bash = which('bash')
bash('./buildconf.sh', '--force')
def configure_args(self):
"""Point to spack-installed zlib and libpng"""
spec = self.spec
args = []
args.append('--with-zlib={0}'.format(spec['zlib'].prefix))
args.append('--with-png={0}'.format(spec['libpng'].prefix))
return args
def url_for_version(self, version):
url = 'https://github.com/libharu/libharu/archive/RELEASE_{0}.tar.gz'
return url.format(version.underscored)
| lgpl-2.1 | -2,960,561,503,628,441,600 | 33.270833 | 95 | 0.656535 | false |
eternalthinker/nasa-spaceapps-syd17-aviato | ML/latlongv3.py | 1 | 5675 | # to build dataset master sheet
# wirtes into a
import os
import pandas as pd
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import multiprocessing
import csv
import time
def get_sample(df):
df2 = df.sample(frac=0.01)
#print df2[location-long],df2[location-lat]
df2.to_csv('Continental black-tailed godwits.csv')
return df2
def writetocsv(listofdicts,fieldnames):
with open ('cummulative.csv','w') as csvfd:
wr = csv.DictWriter(csvfd,fieldnames=fieldnames)
wr.writeheader()
for row in listofdicts:
wr.writerow(row)
def writefromcsv(csvfile,fieldnames):
listofrows = []
with open('cummulative.csv') as csvfd:
reader = csv.DictReader(csvfd)
for row in reader:
#print row
listofrows.append(row)
return listofrows
def gencords(lonrange,latrange): #generate 1000 * 1000 lon,lat rect patch
lons = np.arange(lonrange[0],lonrange[1],0.01).tolist() #converts array to list
lats = np.arange(latrange[0],latrange[1],0.01).tolist()
cords={}
cordslist = []
for lon in lons:
for lat in lats:
#cords[(lon,lat)]={'visible':0} # lon,lat and default visibility, this needs to be extended to show
cordslist.append({'location-long':lon,'location-lat':lat,'visible':0,'timestamp':0,'htype':0})
#return cords # return as np matrix
writetocsv(cordslist,['location-long','location-lat','visible','timestamp','htype'])
#return cordslist
def fromcsv(filepath, colnames): #returns the lat and long from the file. for example migration file
df = pd.read_csv(filepath)
#df = get_sample(df)
return df.as_matrix(columns =colnames).tolist() #df ->np array ->list
'''
def nearestnei(cords,arr2): # this is where interpolation can be made.
# will multithreading improvise this?
cordlist = []
for cord2 in arr2:
nearnei = [10,[0,0]] # neinei[0] = distance , nearnei[1] = actual [lon,lat]
for cord1 in cords.keys():
dist = math.sqrt(math.pow((cord2[0]-cord1[0]),2) + math.pow((cord2[1]-cord1[1]),2)) # euclidean distance lon2-lon1, lat1-lat1
#print dist,cord2,cord1
if dist < nearnei[0] :
nearnei[0] = dist
nearnei[1][0],nearnei[1][1] = cord1[0],cord1[1]
cords[(nearnei[1][0],nearnei[1][1])]['visible'] = 1 # to mark app location where the bird was sighted.
#print cords[(nearnei[1][0],nearnei[1][1])],(nearnei[1][0],nearnei[1][1])
cordlist.append((nearnei[1][0],nearnei[1][1]))
return cordlist
#cumcsv(cords)
#print cords
'''
def nearestnei(cordslist,arr2): # this is where interpolation can be made.
# will multithreading improvise this?
cordlist = []
idx = 0
for cord2 in arr2:
nearnei = [10,[0,0],idx] # neinei[0] = distance , nearnei[1] = actual [lon,lat]
for i in range(len(cordslist)):
dist = math.sqrt(math.pow((cord2[0]-float(cordslist[i]['location-long'])),2) + math.pow((cord2[1]-float(cordslist[i]['location-lat'])),2)) # euclidean distance lon2-lon1, lat1-lat1
#print dist,cord2,cord1
if dist < nearnei[0] :
nearnei[0] = dist
nearnei[1][0],nearnei[1][1] = cordslist[i]['location-long'],cordslist[i]['location-lat']
idx = i
cordslist[idx]['visible'] = 1 # to mark app location where the bird was sighted.
cordslist[idx]['timestamp'] = time.strptime(cord2[2][:-4],"%Y-%m-%d %H:%M:%S").tm_mon
#print cords[(nearnei[1][0],nearnei[1][1])],(nearnei[1][0],nearnei[1][1])
cordlist.append((cordslist[idx]['location-long'],cordslist[idx]['location-lat']))
writetocsv(cordslist,['location-long','location-lat','visible','timestamp','htype'])
return cordlist
def nearesthab(cordslist,arr3):
cordlist = []
idx = 0
for cord2 in arr3:
nearhab = [100,[0,0],idx]
for i in range(len(cordslist)):
dist = math.sqrt(math.pow((cord2[0]-float(cordslist[i]['location-long'])),2) + math.pow((cord2[1]-float(cordslist[i]['location-lat'])),2))
if dist < nearhab[0]:
nearhab[0] = dist
nearhab[1][0],nearhab[1][1] = cordslist[i]['location-long'],cordslist[i]['location-lat']
idx = i
cordslist[idx]['htype']=cord2[2]
writetocsv(cordslist,['location-long','location-lat','visible','timestamp','htype'])
def plot_map(arr2): # projecting on a base map (the datapoint is lat,lon)
numar = np.array(arr2)
bmap = Basemap(lat_0=0,lon_0=0) #central latitute,central longitude
bmap.drawcoastlines() #to draw coastlines
bmap.drawmapboundary() #draw a line around the map region
blon,blat=bmap(numar[:,0],numar[:,1]) # before using tthe values. they need to be transformed #ref1
#numar[:,1] is the first col, #numar[:,2] is the second column of the numpy array
bmap.plot(blon,blat,marker='o',color='k')
plt.show()
def main():
filepath2 = os.path.join('~','Desktop','everything nice','Programming','hackathon','SpaceApps2017','submission','samplesubs','blacktailgodwit_short.csv') # species tracks #continentail black-tailed godwits
filepath1 = os.path.join('~','Desktop','everything nice','Programming','hackathon','SpaceApps2017','submission','samplesubs','cummulative.csv') # habitats
filepath3 = os.path.join('~','Desktop','everything nice','Programming','hackathon','SpaceApps2017','submission','samplesubs','habitcsv.csv')
gencords([-6.022,-5.628],[38.946,39.085])
arr1 = writefromcsv(filepath1,['location-long','location-lat','visible','timestamp','htype'])
arr2 = fromcsv(filepath2,['location-long','location-lat','timestamp'])
#print arr2
arr3 = fromcsv(filepath3,['location-long','location-lat','htype'])
print "approximating..."
cordl = nearestnei(arr1,arr2)
print "habitat..."
nearesthab(arr1,arr3)
plot_map(arr2)
#plot_map(cordl)
main()
| mit | 4,205,732,828,279,096,300 | 38.964789 | 206 | 0.680176 | false |
rolandmansilla/microblog | flask/lib/python2.7/site-packages/pbr/tests/test_setup.py | 22 | 17125 | # Copyright (c) 2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import tempfile
try:
import cStringIO as io
BytesIO = io.StringIO
except ImportError:
import io
BytesIO = io.BytesIO
import fixtures
from pbr import git
from pbr import options
from pbr import packaging
from pbr.tests import base
class SkipFileWrites(base.BaseTestCase):
scenarios = [
('changelog_option_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('changelog_option_false',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('changelog_env_true',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('changelog_both_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('authors_option_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=git.generate_authors, filename='AUTHORS')),
('authors_option_false',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=git.generate_authors, filename='AUTHORS')),
('authors_env_true',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=git.generate_authors, filename='AUTHORS')),
('authors_both_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=git.generate_authors, filename='AUTHORS')),
]
def setUp(self):
super(SkipFileWrites, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
if not os.path.exists(self.git_dir):
self.skipTest("%s is missing; skipping git-related checks"
% self.git_dir)
return
self.filename = os.path.join(self.temp_path, self.filename)
self.option_dict = dict()
if self.option_key is not None:
self.option_dict[self.option_key] = ('setup.cfg',
self.option_value)
self.useFixture(
fixtures.EnvironmentVariable(self.env_key, self.env_value))
def test_skip(self):
self.pkg_func(git_dir=self.git_dir,
dest_dir=self.temp_path,
option_dict=self.option_dict)
self.assertEqual(
not os.path.exists(self.filename),
(self.option_value.lower() in options.TRUE_VALUES
or self.env_value is not None))
_changelog_content = """04316fe (review/monty_taylor/27519) Make python
378261a Add an integration test script.
3c373ac (HEAD, tag: 2013.2.rc2, tag: 2013.2, milestone-proposed) Merge "Lib
182feb3 (tag: 0.5.17) Fix pip invocation for old versions of pip.
fa4f46e (tag: 0.5.16) Remove explicit depend on distribute.
d1c53dd Use pip instead of easy_install for installation.
a793ea1 Merge "Skip git-checkout related tests when .git is missing"
6c27ce7 Skip git-checkout related tests when .git is missing
04984a5 Refactor hooks file.
a65e8ee (tag: 0.5.14, tag: 0.5.13) Remove jinja pin.
"""
class GitLogsTest(base.BaseTestCase):
def setUp(self):
super(GitLogsTest, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
self.useFixture(
fixtures.EnvironmentVariable('SKIP_GENERATE_AUTHORS'))
self.useFixture(
fixtures.EnvironmentVariable('SKIP_WRITE_GIT_CHANGELOG'))
def test_write_git_changelog(self):
self.useFixture(fixtures.FakePopen(lambda _: {
"stdout": BytesIO(_changelog_content.encode('utf-8'))
}))
git.write_git_changelog(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "ChangeLog"), "r") as ch_fh:
changelog_contents = ch_fh.read()
self.assertIn("2013.2", changelog_contents)
self.assertIn("0.5.17", changelog_contents)
self.assertIn("------", changelog_contents)
self.assertIn("Refactor hooks file", changelog_contents)
self.assertNotIn("Refactor hooks file.", changelog_contents)
self.assertNotIn("182feb3", changelog_contents)
self.assertNotIn("review/monty_taylor/27519", changelog_contents)
self.assertNotIn("0.5.13", changelog_contents)
self.assertNotIn('Merge "', changelog_contents)
def test_generate_authors(self):
author_old = u"Foo Foo <[email protected]>"
author_new = u"Bar Bar <[email protected]>"
co_author = u"Foo Bar <[email protected]>"
co_author_by = u"Co-authored-by: " + co_author
git_log_cmd = (
"git --git-dir=%s log --format=%%aN <%%aE>"
% self.git_dir)
git_co_log_cmd = ("git --git-dir=%s log" % self.git_dir)
git_top_level = "git rev-parse --show-toplevel"
cmd_map = {
git_log_cmd: author_new,
git_co_log_cmd: co_author_by,
git_top_level: self.root_dir,
}
exist_files = [self.git_dir,
os.path.join(self.temp_path, "AUTHORS.in")]
self.useFixture(fixtures.MonkeyPatch(
"os.path.exists",
lambda path: os.path.abspath(path) in exist_files))
def _fake_run_shell_command(cmd, **kwargs):
return cmd_map[" ".join(cmd)]
self.useFixture(fixtures.MonkeyPatch(
"pbr.git._run_shell_command",
_fake_run_shell_command))
with open(os.path.join(self.temp_path, "AUTHORS.in"), "w") as auth_fh:
auth_fh.write("%s\n" % author_old)
git.generate_authors(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "AUTHORS"), "r") as auth_fh:
authors = auth_fh.read()
self.assertTrue(author_old in authors)
self.assertTrue(author_new in authors)
self.assertTrue(co_author in authors)
class BuildSphinxTest(base.BaseTestCase):
scenarios = [
('true_autodoc_caps',
dict(has_opt=True, autodoc='True', has_autodoc=True)),
('true_autodoc_caps_with_excludes',
dict(has_opt=True, autodoc='True', has_autodoc=True,
excludes="fake_package.fake_private_module\n"
"fake_package.another_fake_*\n"
"fake_package.unknown_module")),
('true_autodoc_lower',
dict(has_opt=True, autodoc='true', has_autodoc=True)),
('false_autodoc',
dict(has_opt=True, autodoc='False', has_autodoc=False)),
('no_autodoc',
dict(has_opt=False, autodoc='False', has_autodoc=False)),
]
def setUp(self):
super(BuildSphinxTest, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
"sphinx.setup_command.BuildDoc.run", lambda self: None))
from distutils import dist
self.distr = dist.Distribution()
self.distr.packages = ("fake_package",)
self.distr.command_options["build_sphinx"] = {
"source_dir": ["a", "."]}
pkg_fixture = fixtures.PythonPackage(
"fake_package", [("fake_module.py", b""),
("another_fake_module_for_testing.py", b""),
("fake_private_module.py", b"")])
self.useFixture(pkg_fixture)
self.useFixture(base.DiveDir(pkg_fixture.base))
self.distr.command_options["pbr"] = {}
if hasattr(self, "excludes"):
self.distr.command_options["pbr"]["autodoc_exclude_modules"] = (
'setup.cfg',
"fake_package.fake_private_module\n"
"fake_package.another_fake_*\n"
"fake_package.unknown_module")
if self.has_opt:
options = self.distr.command_options["pbr"]
options["autodoc_index_modules"] = ('setup.cfg', self.autodoc)
def test_build_doc(self):
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.run()
self.assertTrue(
os.path.exists("api/autoindex.rst") == self.has_autodoc)
self.assertTrue(
os.path.exists(
"api/fake_package.fake_module.rst") == self.has_autodoc)
if not self.has_autodoc or hasattr(self, "excludes"):
assertion = self.assertFalse
else:
assertion = self.assertTrue
assertion(
os.path.exists(
"api/fake_package.fake_private_module.rst"))
assertion(
os.path.exists(
"api/fake_package.another_fake_module_for_testing.rst"))
def test_builders_config(self):
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.finalize_options()
self.assertEqual(2, len(build_doc.builders))
self.assertIn('html', build_doc.builders)
self.assertIn('man', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = ''
build_doc.finalize_options()
self.assertEqual('', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = 'man'
build_doc.finalize_options()
self.assertEqual(1, len(build_doc.builders))
self.assertIn('man', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = 'html,man,doctest'
build_doc.finalize_options()
self.assertIn('html', build_doc.builders)
self.assertIn('man', build_doc.builders)
self.assertIn('doctest', build_doc.builders)
def test_cmd_builder_override(self):
if self.has_opt:
self.distr.command_options["pbr"] = {
"autodoc_index_modules": ('setup.cfg', self.autodoc)
}
self.distr.command_options["build_sphinx"]["builder"] = (
"command line", "non-existing-builder")
build_doc = packaging.LocalBuildDoc(self.distr)
self.assertNotIn('non-existing-builder', build_doc.builders)
self.assertIn('html', build_doc.builders)
# process command line options which should override config
build_doc.finalize_options()
self.assertIn('non-existing-builder', build_doc.builders)
self.assertNotIn('html', build_doc.builders)
def test_cmd_builder_override_multiple_builders(self):
if self.has_opt:
self.distr.command_options["pbr"] = {
"autodoc_index_modules": ('setup.cfg', self.autodoc)
}
self.distr.command_options["build_sphinx"]["builder"] = (
"command line", "builder1,builder2")
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.finalize_options()
self.assertEqual(["builder1", "builder2"], build_doc.builders)
class ParseRequirementsTest(base.BaseTestCase):
def setUp(self):
super(ParseRequirementsTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix='openstack',
suffix='.setup')
def test_parse_requirements_normal(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_git_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-e git://foo.com/zipball#egg=bar")
self.assertEqual(['bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_versioned_git_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-e git://foo.com/zipball#egg=bar-1.2.4")
self.assertEqual(['bar>=1.2.4'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_http_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("https://foo.com/zipball#egg=bar")
self.assertEqual(['bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_versioned_http_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("https://foo.com/zipball#egg=bar-4.2.1")
self.assertEqual(['bar>=4.2.1'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_removes_index_lines(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-f foobar")
self.assertEqual([], packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_override_with_env(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_parse_requirements_override_with_env_multiple_files(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
"no-such-file," + self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_get_requirement_from_file_empty(self):
actual = packaging.get_reqs_from_files([])
self.assertEqual([], actual)
def test_parse_requirements_with_comments(self):
with open(self.tmp_file, 'w') as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_python_version(self):
with open("requirements-py%d.txt" % sys.version_info[0],
"w") as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements())
def test_parse_requirements_right_python_version(self):
with open("requirements-py1.txt", "w") as fh:
fh.write("thisisatrap")
with open("requirements-py%d.txt" % sys.version_info[0],
"w") as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements())
class ParseDependencyLinksTest(base.BaseTestCase):
def setUp(self):
super(ParseDependencyLinksTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix="openstack",
suffix=".setup")
def test_parse_dependency_normal(self):
with open(self.tmp_file, "w") as fh:
fh.write("http://test.com\n")
self.assertEqual(
["http://test.com"],
packaging.parse_dependency_links([self.tmp_file]))
def test_parse_dependency_with_git_egg_url(self):
with open(self.tmp_file, "w") as fh:
fh.write("-e git://foo.com/zipball#egg=bar")
self.assertEqual(
["git://foo.com/zipball#egg=bar"],
packaging.parse_dependency_links([self.tmp_file]))
| bsd-3-clause | -516,611,274,390,199,550 | 39.294118 | 78 | 0.595912 | false |
jumpstarter-io/cinder | cinder/volume/drivers/hds/hnas_backend.py | 5 | 24386 | # Copyright (c) 2014 Hitachi Data Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Hitachi Unified Storage (HUS-HNAS) platform. Backend operations.
"""
import re
from cinder.openstack.common import log as logging
from cinder.openstack.common import units
from cinder import utils
LOG = logging.getLogger("cinder.volume.driver")
class HnasBackend():
"""Back end. Talks to HUS-HNAS."""
def get_version(self, cmd, ver, ip0, user, pw):
"""Gets version information from the storage unit
:param ver: string driver version
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:returns: formated string with version information
"""
out, err = utils.execute(cmd,
"-version",
check_exit_code=True)
util = out.split()[1]
out, err = utils.execute(cmd,
'-u', user, '-p', pw, ip0,
"cluster-getmac",
check_exit_code=True)
hardware = out.split()[2]
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, 'ver',
check_exit_code=True)
lines = out.split('\n')
model = ""
for line in lines:
if 'Model:' in line:
model = line.split()[1]
if 'Software:' in line:
ver = line.split()[1]
out = "Array_ID: %s (%s) version: %s LU: 256 RG: 0 RG_LU: 0 \
Utility_version: %s" % (hardware, model, ver, util)
LOG.debug('get_version: ' + out + ' -- ' + err)
return out
def get_iscsi_info(self, cmd, ip0, user, pw):
"""Gets IP addresses for EVSs, use EVSID as controller.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:returns: formated string with iSCSI information
"""
out, err = utils.execute(cmd,
'-u', user, '-p', pw, ip0,
'evsipaddr', '-l',
check_exit_code=True)
lines = out.split('\n')
newout = ""
for line in lines:
if 'evs' in line and 'admin' not in line:
inf = line.split()
(evsnum, ip) = (inf[1], inf[3])
newout += "CTL: %s Port: 0 IP: %s Port: 3260 Link: Up\n" \
% (evsnum, ip)
LOG.debug('get_iscsi_info: ' + out + ' -- ' + err)
return newout
def get_hdp_info(self, cmd, ip0, user, pw):
"""Gets the list of filesystems and fsids.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:returns: formated string with filesystems and fsids
"""
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, 'df', '-a',
check_exit_code=True)
lines = out.split('\n')
newout = ""
for line in lines:
if 'Not mounted' in line:
continue
if 'GB' in line or 'TB' in line:
inf = line.split()
(fsid, fslabel, capacity, used, perstr) = \
(inf[0], inf[1], inf[3], inf[5], inf[7])
(availunit, usedunit) = (inf[4], inf[6])
if usedunit == 'GB':
usedmultiplier = units.Ki
else:
usedmultiplier = units.Mi
if availunit == 'GB':
availmultiplier = units.Ki
else:
availmultiplier = units.Mi
m = re.match("\((\d+)\%\)", perstr)
if m:
percent = m.group(1)
else:
percent = 0
newout += "HDP: %s %d MB %d MB %d %% LUs: 256 Normal %s\n" \
% (fsid, int(float(capacity) * availmultiplier),
int(float(used) * usedmultiplier),
int(percent), fslabel)
LOG.debug('get_hdp_info: ' + newout + ' -- ' + err)
return newout
def _get_evs(self, cmd, ip0, user, pw, fsid):
"""Gets the EVSID for the named filesystem."""
out, err = utils.execute(cmd,
'-u', user, '-p', pw, ip0,
"evsfs", "list",
check_exit_code=True)
LOG.debug('get_evs: out ' + out)
lines = out.split('\n')
for line in lines:
inf = line.split()
if fsid in line and (fsid == inf[0] or fsid == inf[1]):
return inf[3]
LOG.warn('get_evs: ' + out + ' -- ' + 'No info for ' + fsid)
return 0
def _get_evsips(self, cmd, ip0, user, pw, evsid):
"""Gets the EVS IPs for the named filesystem."""
out, err = utils.execute(cmd,
'-u', user, '-p', pw, ip0,
'evsipaddr', '-e', evsid,
check_exit_code=True)
iplist = ""
lines = out.split('\n')
for line in lines:
inf = line.split()
if 'evs' in line:
iplist += inf[3] + ' '
LOG.debug('get_evsips: ' + iplist)
return iplist
def _get_fsid(self, cmd, ip0, user, pw, fslabel):
"""Gets the FSID for the named filesystem."""
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, 'evsfs', 'list',
check_exit_code=True)
LOG.debug('get_fsid: out ' + out)
lines = out.split('\n')
for line in lines:
inf = line.split()
if fslabel in line and fslabel == inf[1]:
LOG.debug('get_fsid: ' + line)
return inf[0]
LOG.warn('get_fsid: ' + out + ' -- ' + 'No infor for ' + fslabel)
return 0
def get_nfs_info(self, cmd, ip0, user, pw):
"""Gets information on each NFS export.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:returns: formated string
"""
out, err = utils.execute(cmd,
'-u', user, '-p', pw, ip0,
'for-each-evs', '-q',
'nfs-export', 'list',
check_exit_code=True)
lines = out.split('\n')
newout = ""
export = ""
path = ""
for line in lines:
inf = line.split()
if 'Export name' in line:
export = inf[2]
if 'Export path' in line:
path = inf[2]
if 'File system info' in line:
fs = ""
if 'File system label' in line:
fs = inf[3]
if 'Transfer setting' in line and fs != "":
fsid = self._get_fsid(cmd, ip0, user, pw, fs)
evsid = self._get_evs(cmd, ip0, user, pw, fsid)
ips = self._get_evsips(cmd, ip0, user, pw, evsid)
newout += "Export: %s Path: %s HDP: %s FSID: %s \
EVS: %s IPS: %s\n" \
% (export, path, fs, fsid, evsid, ips)
fs = ""
LOG.debug('get_nfs_info: ' + newout + ' -- ' + err)
return newout
def create_lu(self, cmd, ip0, user, pw, hdp, size, name):
"""Creates a new Logical Unit.
If the operation can not be performed for some reason, utils.execute()
throws an error and aborts the operation. Used for iSCSI only
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param hdp: data Pool the logical unit will be created
:param size: Size (Mb) of the new logical unit
:param name: name of the logical unit
:returns: formated string with 'LUN %d HDP: %d size: %s MB, is
successfully created'
"""
_evsid = self._get_evs(cmd, ip0, user, pw, hdp)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-lu', 'add', "-e",
name, hdp,
'/.cinder/' + name + '.iscsi',
size + 'M',
check_exit_code=True)
out = "LUN %s HDP: %s size: %s MB, is successfully created" \
% (name, hdp, size)
LOG.debug('create_lu: ' + out)
return out
def delete_lu(self, cmd, ip0, user, pw, hdp, lun):
"""Delete an logical unit. Used for iSCSI only
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param hdp: data Pool of the logical unit
:param lun: id of the logical unit being deleted
:returns: formated string 'Logical unit deleted successfully.'
"""
_evsid = self._get_evs(cmd, ip0, user, pw, hdp)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-lu', 'del', '-d',
'-f', lun,
check_exit_code=True)
LOG.debug('delete_lu: ' + out + ' -- ' + err)
return out
def create_dup(self, cmd, ip0, user, pw, src_lun, hdp, size, name):
"""Clones a volume
Clone primitive used to support all iSCSI snapshot/cloning functions.
Used for iSCSI only.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param hdp: data Pool of the logical unit
:param src_lun: id of the logical unit being deleted
:param size: size of the LU being cloned. Only for logging purposes
:returns: formated string
"""
_evsid = self._get_evs(cmd, ip0, user, pw, hdp)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-lu', 'clone', '-e',
src_lun, name,
'/.cinder/' + name + '.iscsi',
check_exit_code=True)
out = "LUN %s HDP: %s size: %s MB, is successfully created" \
% (name, hdp, size)
LOG.debug('create_dup: ' + out + ' -- ' + err)
return out
def file_clone(self, cmd, ip0, user, pw, fslabel, src, name):
"""Clones NFS files to a new one named 'name'
Clone primitive used to support all NFS snapshot/cloning functions.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param fslabel: file system label of the new file
:param src: source file
:param name: target path of the new created file
:returns: formated string
"""
_fsid = self._get_fsid(cmd, ip0, user, pw, fslabel)
_evsid = self._get_evs(cmd, ip0, user, pw, _fsid)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'file-clone-create', '-f', fslabel,
src, name,
check_exit_code=True)
out = "LUN %s HDP: %s Clone: %s -> %s" % (name, _fsid, src, name)
LOG.debug('file_clone: ' + out + ' -- ' + err)
return out
def extend_vol(self, cmd, ip0, user, pw, hdp, lun, new_size, name):
"""Extend a iSCSI volume.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param hdp: data Pool of the logical unit
:param lun: id of the logical unit being extended
:param new_size: new size of the LU
:param name: formated string
"""
_evsid = self._get_evs(cmd, ip0, user, pw, hdp)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-lu', 'expand',
name, new_size + 'M',
check_exit_code=True)
out = ("LUN: %s successfully extended to %s MB" % (name, new_size))
LOG.debug('extend_vol: ' + out)
return out
def add_iscsi_conn(self, cmd, ip0, user, pw, lun, hdp,
port, iqn, initiator):
"""Setup the lun on on the specified target port
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param lun: id of the logical unit being extended
:param hdp: data pool of the logical unit
:param port: iSCSI port
:param iqn: iSCSI qualified name
:param initiator: initiator address
"""
_evsid = self._get_evs(cmd, ip0, user, pw, hdp)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-target', 'list', iqn,
check_exit_code=True)
# even though ssc uses the target alias, need to return the full iqn
fulliqn = ""
lines = out.split('\n')
for line in lines:
if 'Globally unique name' in line:
fulliqn = line.split()[3]
# find first free hlun
hlun = 0
for line in lines:
if line.startswith(' '):
lunline = line.split()[0]
vol = line.split()[1]
if lunline[0].isdigit():
# see if already mounted
if vol[:29] == lun[:29]:
LOG.info('lun: %s already mounted %s' % (lun, lunline))
conn = (int(lunline), lun, initiator, hlun, fulliqn,
hlun, hdp, port)
out = "H-LUN: %d alreadymapped LUN: %s, iSCSI \
Initiator: %s @ index: %d, and Target: %s \
@ index %d is successfully paired @ CTL: \
%s, Port: %s" % conn
LOG.debug('add_iscsi_conn: returns ' + out)
return out
if int(lunline) == hlun:
hlun += 1
if int(lunline) > hlun:
# found a hole
break
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-target', 'addlu',
iqn, lun, hlun,
check_exit_code=True)
conn = (int(hlun), lun, initiator, int(hlun), fulliqn, int(hlun),
hdp, port)
out = "H-LUN: %d mapped LUN: %s, iSCSI Initiator: %s \
@ index: %d, and Target: %s @ index %d is \
successfully paired @ CTL: %s, Port: %s" % conn
LOG.debug('add_iscsi_conn: returns ' + out)
return out
def del_iscsi_conn(self, cmd, ip0, user, pw, evsid, iqn, hlun):
"""Remove the lun on on the specified target port
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param evsid: EVSID for the file system
:param iqn: iSCSI qualified name
:param hlun: logical unit id
:return: formated string
"""
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", evsid,
'iscsi-target', 'list', iqn,
check_exit_code=True)
lines = out.split('\n')
out = ("H-LUN: %d already deleted from target %s" % (int(hlun), iqn))
# see if lun is already detached
for line in lines:
if line.startswith(' '):
lunline = line.split()[0]
if lunline[0].isdigit() and lunline == hlun:
out = ""
break
if out != "":
# hlun wasn't found
LOG.info('del_iscsi_conn: hlun not found' + out)
return out
# remove the LU from the target
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", evsid,
'iscsi-target', 'dellu',
'-f', iqn, hlun,
check_exit_code=True)
out = "H-LUN: %d successfully deleted from target %s" \
% (int(hlun), iqn)
LOG.debug('del_iscsi_conn: ' + out + ' -- ')
return out
def get_targetiqn(self, cmd, ip0, user, pw, targetalias, hdp, secret):
"""Obtain the targets full iqn
Return the target's full iqn rather than its alias.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param targetalias: alias of the target
:param hdp: data pool of the logical unit
:param secret: CHAP secret of the target
:return: string with full IQN
"""
_evsid = self._get_evs(cmd, ip0, user, pw, hdp)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-target', 'list', targetalias,
check_exit_code=True)
if "does not exist" in out:
if secret == "":
secret = '""'
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-target', 'add',
targetalias, secret,
check_exit_code=True)
else:
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-target', 'add',
targetalias, secret,
check_exit_code=True)
lines = out.split('\n')
# returns the first iqn
for line in lines:
if 'Alias' in line:
fulliqn = line.split()[2]
return fulliqn
def set_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp, secret):
"""Sets the chap secret for the specified target.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param targetalias: alias of the target
:param hdp: data pool of the logical unit
:param secret: CHAP secret of the target
"""
_evsid = self._get_evs(cmd, ip0, user, pw, hdp)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-target', 'list',
targetalias,
check_exit_code=False)
if "does not exist" in out:
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-target', 'add',
targetalias, secret,
check_exit_code=True)
else:
LOG.info('targetlist: ' + targetalias + ' -- ' + out)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-target', 'mod',
'-s', secret, '-a', 'enable',
targetalias,
check_exit_code=True)
def get_targetsecret(self, cmd, ip0, user, pw, targetalias, hdp):
"""Returns the chap secret for the specified target.
:param ip0: string IP address of controller
:param user: string user authentication for array
:param pw: string password authentication for array
:param targetalias: alias of the target
:param hdp: data pool of the logical unit
:return secret: CHAP secret of the target
"""
_evsid = self._get_evs(cmd, ip0, user, pw, hdp)
out, err = utils.execute(cmd,
'-u', user, '-p', pw,
ip0, "console-context",
"--evs", _evsid,
'iscsi-target', 'list', targetalias,
check_exit_code=True)
enabled = ""
secret = ""
lines = out.split('\n')
for line in lines:
if 'Secret' in line:
secret = line.split()[2]
if 'Authentication' in line:
enabled = line.split()[2]
if enabled == 'Enabled':
return secret
| apache-2.0 | -4,481,986,955,292,050,400 | 38.846405 | 79 | 0.451407 | false |
Barmaley-exe/scikit-learn | sklearn/covariance/tests/test_covariance.py | 5 | 10604 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
"""Tests Covariance module on a simple dataset.
"""
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
"""Tests ShrunkCovariance module on a simple dataset.
"""
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
"""Tests LedoitWolf module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# (too) large data set
X_large = np.ones((20, 200))
assert_raises(MemoryError, ledoit_wolf, X_large, block_size=100)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_oas():
"""Tests OAS module on a simple dataset.
"""
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
### Same tests without assuming centered data
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause | -7,593,635,443,028,954,000 | 38.274074 | 79 | 0.659751 | false |
funginstitute/patentprocessor | test/test_parse_file.py | 6 | 3865 | #!/usr/bin/env python
import os
import sys
import unittest
import logging
import re
from collections import Iterable
sys.path.append('../')
import parse
import lib.handlers.grant_handler_v42 as grant_handler_v42
basedir = os.path.dirname(__file__)
testdir = os.path.join(basedir, './fixtures/xml/')
testfileone = 'ipg120327.one.xml'
testfiletwo = 'ipg120327.two.xml'
regex = re.compile(r"""([<][?]xml version.*?[>]\s*[<][!]DOCTYPE\s+([A-Za-z-]+)\s+.*?/\2[>])""", re.S+re.I)
class TestParseFile(unittest.TestCase):
def setUp(self):
pass
def test_extract_xml_strings_one(self):
parsed_output = parse.extract_xml_strings(testdir+testfileone)
self.assertTrue(isinstance(parsed_output, list))
self.assertTrue(len(parsed_output) == 1)
self.assertTrue(isinstance(parsed_output[0], tuple))
self.assertTrue(isinstance(parsed_output[0][1], str))
self.assertTrue(regex.match(parsed_output[0][1]))
def test_parse_files_one(self):
filelist = [testdir+testfileone]
parsed_output = parse.parse_files(filelist)
self.assertTrue(isinstance(parsed_output,Iterable))
parsed_output = list(parsed_output)
self.assertTrue(len(parsed_output) == 1)
self.assertTrue(isinstance(parsed_output[0], tuple))
self.assertTrue(isinstance(parsed_output[0][1], str))
self.assertTrue(regex.match(parsed_output[0][1]))
def test_extract_xml_strings_two(self):
parsed_output = parse.extract_xml_strings(testdir+testfiletwo)
self.assertTrue(isinstance(parsed_output, Iterable))
parsed_output = list(parsed_output)
self.assertTrue(len(parsed_output) == 2)
self.assertTrue(isinstance(parsed_output[0], tuple))
self.assertTrue(isinstance(parsed_output[0][1], str))
self.assertTrue(isinstance(parsed_output[1], tuple))
self.assertTrue(isinstance(parsed_output[1][1], str))
self.assertTrue(regex.match(parsed_output[0][1]))
self.assertTrue(regex.match(parsed_output[1][1]))
def test_parse_files_two(self):
filelist = [testdir+testfiletwo]
parsed_output = parse.parse_files(filelist)
self.assertTrue(isinstance(parsed_output,Iterable))
parsed_output = list(parsed_output)
self.assertTrue(len(parsed_output) == 2)
self.assertTrue(isinstance(parsed_output[0], tuple))
self.assertTrue(isinstance(parsed_output[0][1], str))
self.assertTrue(isinstance(parsed_output[1], tuple))
self.assertTrue(isinstance(parsed_output[1][1], str))
self.assertTrue(regex.match(parsed_output[0][1]))
self.assertTrue(regex.match(parsed_output[1][1]))
def test_use_parse_files_one(self):
filelist = [testdir+testfileone]
parsed_output = list(parse.parse_files(filelist))
patobj = grant_handler_v42.PatentGrant(parsed_output[0][1], True)
self.assertTrue(patobj)
def test_use_parse_files_two(self):
filelist = [testdir+testfiletwo]
parsed_output = parse.parse_files(filelist)
parsed_xml = []
for us_patent_grant in parsed_output:
self.assertTrue(isinstance(us_patent_grant, tuple))
self.assertTrue(isinstance(us_patent_grant[1], str))
patobj = grant_handler_v42.PatentGrant(us_patent_grant[1], True)
self.assertTrue(patobj)
def test_list_files(self):
testdir = os.path.join(basedir, './fixtures/xml')
xmlregex = r'ipg120327.one.xml'
files = parse.list_files(testdir, xmlregex)
self.assertTrue(isinstance(files, list))
self.assertTrue(len(files) == 1)
self.assertTrue(all(filter(lambda x: isinstance(x, str), files)))
self.assertTrue(all(map(lambda x: os.path.exists(x), files)))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 4,166,907,937,054,603,000 | 40.117021 | 106 | 0.657439 | false |
xuweiliang/Codelibrary | nova/virt/xenapi/client/session.py | 1 | 13700 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import contextlib
try:
import cPickle as pickle
except ImportError:
import pickle
import errno
import socket
import time
from eventlet import queue
from eventlet import timeout
from oslo_log import log as logging
from oslo_utils import versionutils
from six.moves import http_client
from six.moves import range
from six.moves import urllib
try:
import xmlrpclib
except ImportError:
import six.moves.xmlrpc_client as xmlrpclib
import nova.conf
from nova import context
from nova import exception
from nova.i18n import _, _LE, _LW
from nova import objects
from nova import version
from nova.virt.xenapi.client import objects as cli_objects
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
def apply_session_helpers(session):
session.VM = cli_objects.VM(session)
session.SR = cli_objects.SR(session)
session.VDI = cli_objects.VDI(session)
session.VIF = cli_objects.VIF(session)
session.VBD = cli_objects.VBD(session)
session.PBD = cli_objects.PBD(session)
session.PIF = cli_objects.PIF(session)
session.VLAN = cli_objects.VLAN(session)
session.host = cli_objects.Host(session)
session.network = cli_objects.Network(session)
session.pool = cli_objects.Pool(session)
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls."""
# This is not a config option as it should only ever be
# changed in development environments.
# MAJOR VERSION: Incompatible changes with the plugins
# MINOR VERSION: Compatible changes, new plguins, etc
PLUGIN_REQUIRED_VERSION = '1.7'
def __init__(self, url, user, pw):
version_string = version.version_string_with_package()
self.nova_version = ('%(vendor)s %(product)s %(version)s' %
{'vendor': version.vendor_string(),
'product': version.product_string(),
'version': version_string})
import XenAPI
self.XenAPI = XenAPI
self._sessions = queue.Queue()
self.is_slave = False
self.host_checked = False
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
self.url = self._create_first_session(url, user, pw, exception)
self._populate_session_pool(url, user, pw, exception)
self.host_uuid = self._get_host_uuid()
self.host_ref = self._get_host_ref()
self.product_version, self.product_brand = \
self._get_product_version_and_brand()
self._verify_plugin_version()
apply_session_helpers(self)
def _login_with_password(self, user, pw, session, exception):
with timeout.Timeout(CONF.xenserver.login_timeout, exception):
session.login_with_password(user, pw,
self.nova_version, 'OpenStack')
def _verify_plugin_version(self):
requested_version = self.PLUGIN_REQUIRED_VERSION
current_version = self.call_plugin_serialized(
'nova_plugin_version', 'get_version')
if not versionutils.is_compatible(requested_version, current_version):
raise self.XenAPI.Failure(
_("Plugin version mismatch (Expected %(exp)s, got %(got)s)") %
{'exp': requested_version, 'got': current_version})
def _create_first_session(self, url, user, pw, exception):
try:
session = self._create_session_and_login(url, user, pw, exception)
except self.XenAPI.Failure as e:
# if user and pw of the master are different, we're doomed!
if e.details[0] == 'HOST_IS_SLAVE':
master = e.details[1]
url = pool.swap_xapi_host(url, master)
session = self._create_session_and_login(url, user, pw,
exception)
self.is_slave = True
else:
raise
self._sessions.put(session)
return url
def _populate_session_pool(self, url, user, pw, exception):
for i in range(CONF.xenserver.connection_concurrent - 1):
session = self._create_session_and_login(url, user, pw, exception)
self._sessions.put(session)
def _get_host_uuid(self):
if self.is_slave:
aggr = objects.AggregateList.get_by_host(
context.get_admin_context(),
CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_LE('Host is member of a pool, but DB '
'says otherwise'))
raise exception.AggregateHostNotFound()
return aggr.metadata[CONF.host]
else:
with self._get_session() as session:
host_ref = session.xenapi.session.get_this_host(session.handle)
return session.xenapi.host.get_uuid(host_ref)
def _get_product_version_and_brand(self):
"""Return a tuple of (major, minor, rev) for the host version and
a string of the product brand.
"""
software_version = self._get_software_version()
product_version_str = software_version.get('product_version')
# Product version is only set in some cases (e.g. XCP, XenServer) and
# not in others (e.g. xenserver-core, XAPI-XCP).
# In these cases, the platform version is the best number to use.
if product_version_str is None:
product_version_str = software_version.get('platform_version',
'0.0.0')
product_brand = software_version.get('product_brand')
product_version = versionutils.convert_version_to_tuple(
product_version_str)
return product_version, product_brand
def _get_software_version(self):
return self.call_xenapi('host.get_software_version', self.host_ref)
def get_session_id(self):
"""Return a string session_id. Used for vnc consoles."""
with self._get_session() as session:
return str(session._session)
@contextlib.contextmanager
def _get_session(self):
"""Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
finally:
self._sessions.put(session)
def _get_host_ref(self):
"""Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
return session.xenapi.host.get_by_uuid(self.host_uuid)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
with self._get_session() as session:
return session.xenapi_request(method, args)
def call_plugin(self, plugin, fn, args):
"""Call host.call_plugin on a background thread."""
# NOTE(armando): pass the host uuid along with the args so that
# the plugin gets executed on the right host when using XS pools
args['host_uuid'] = self.host_uuid
with self._get_session() as session:
return self._unwrap_plugin_exceptions(
session.xenapi.host.call_plugin,
self.host_ref, plugin, fn, args)
def call_plugin_serialized(self, plugin, fn, *args, **kwargs):
params = {'params': pickle.dumps(dict(args=args, kwargs=kwargs))}
rv = self.call_plugin(plugin, fn, params)
return pickle.loads(rv)
def call_plugin_serialized_with_retry(self, plugin, fn, num_retries,
callback, retry_cb=None, *args,
**kwargs):
"""Allows a plugin to raise RetryableError so we can try again."""
attempts = num_retries + 1
sleep_time = 0.5
for attempt in range(1, attempts + 1):
try:
if attempt > 1:
time.sleep(sleep_time)
sleep_time = min(2 * sleep_time, 15)
callback_result = None
if callback:
callback_result = callback(kwargs)
msg = ('%(plugin)s.%(fn)s attempt %(attempt)d/%(attempts)d, '
'callback_result: %(callback_result)s')
LOG.debug(msg,
{'plugin': plugin, 'fn': fn, 'attempt': attempt,
'attempts': attempts,
'callback_result': callback_result})
return self.call_plugin_serialized(plugin, fn, *args, **kwargs)
except self.XenAPI.Failure as exc:
if self._is_retryable_exception(exc, fn):
LOG.warning(_LW('%(plugin)s.%(fn)s failed. '
'Retrying call.'),
{'plugin': plugin, 'fn': fn})
if retry_cb:
retry_cb(exc=exc)
else:
raise
except socket.error as exc:
if exc.errno == errno.ECONNRESET:
LOG.warning(_LW('Lost connection to XenAPI during call to '
'%(plugin)s.%(fn)s. Retrying call.'),
{'plugin': plugin, 'fn': fn})
if retry_cb:
retry_cb(exc=exc)
else:
raise
raise exception.PluginRetriesExceeded(num_retries=num_retries)
def _is_retryable_exception(self, exc, fn):
_type, method, error = exc.details[:3]
if error == 'RetryableError':
LOG.debug("RetryableError, so retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
elif "signal" in method:
LOG.debug("Error due to a signal, retrying %(fn)s", {'fn': fn},
exc_info=True)
return True
else:
return False
def _create_session(self, url):
"""Stubout point. This can be replaced with a mock session."""
self.is_local_connection = url == "unix://local"
if self.is_local_connection:
return self.XenAPI.xapi_local()
return self.XenAPI.Session(url)
def _create_session_and_login(self, url, user, pw, exception):
session = self._create_session(url)
self._login_with_password(user, pw, session, exception)
return session
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
"""Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure as exc:
LOG.debug("Got exception: %s", exc)
if (len(exc.details) == 4 and
exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and
exc.details[2] == 'Failure'):
params = None
try:
params = ast.literal_eval(exc.details[3])
except Exception:
raise exc
raise self.XenAPI.Failure(params)
else:
raise
except xmlrpclib.ProtocolError as exc:
LOG.debug("Got exception: %s", exc)
raise
def get_rec(self, record_type, ref):
try:
return self.call_xenapi('%s.get_record' % record_type, ref)
except self.XenAPI.Failure as e:
if e.details[0] != 'HANDLE_INVALID':
raise
return None
def get_all_refs_and_recs(self, record_type):
"""Retrieve all refs and recs for a Xen record type.
Handles race-conditions where the record may be deleted between
the `get_all` call and the `get_record` call.
"""
return self.call_xenapi('%s.get_all_records' % record_type).items()
@contextlib.contextmanager
def custom_task(self, label, desc=''):
"""Return exclusive session for scope of with statement."""
name = 'nova-%s' % (label)
task_ref = self.call_xenapi("task.create", name,
desc)
try:
LOG.debug('Created task %s with ref %s' % (name, task_ref))
yield task_ref
finally:
self.call_xenapi("task.destroy", task_ref)
LOG.debug('Destroyed task ref %s' % (task_ref))
@contextlib.contextmanager
def http_connection(session):
conn = None
xs_url = urllib.parse.urlparse(session.url)
LOG.debug("Creating http(s) connection to %s" % session.url)
if xs_url.scheme == 'http':
conn = http_client.HTTPConnection(xs_url.netloc)
elif xs_url.scheme == 'https':
conn = http_client.HTTPSConnection(xs_url.netloc)
conn.connect()
try:
yield conn
finally:
conn.close()
| apache-2.0 | -5,587,690,191,793,458,000 | 38.031339 | 79 | 0.571241 | false |
alanjw/GreenOpenERP-Win-X86 | python/Lib/test/test_socket.py | 2 | 58111 | #!/usr/bin/env python
import unittest
from test import test_support
import errno
import socket
import select
import time
import traceback
import Queue
import sys
import os
import array
import contextlib
from weakref import proxy
import signal
import math
def try_address(host, port=0, family=socket.AF_INET):
"""Try to bind a socket on the given host:port and return True
if that has been possible."""
try:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.bind((host, port))
except (socket.error, socket.gaierror):
return False
else:
sock.close()
return True
HOST = test_support.HOST
MSG = b'Michael Gilfix was here\n'
SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1', family=socket.AF_INET6)
try:
import thread
import threading
except ImportError:
thread = None
threading = None
HOST = test_support.HOST
MSG = 'Michael Gilfix was here\n'
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = test_support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = Queue.Queue(1)
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
self.__setUp()
if not self.server_ready.is_set():
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if not self.queue.empty():
msg = self.queue.get()
self.fail(msg)
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if not callable(test_func):
raise TypeError("test_func must be a callable function.")
try:
test_func()
except Exception, strerror:
self.queue.put(strerror)
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
def raise_error(*args, **kwargs):
raise socket.error
def raise_herror(*args, **kwargs):
raise socket.herror
def raise_gaierror(*args, **kwargs):
raise socket.gaierror
self.assertRaises(socket.error, raise_error,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_herror,
"Error raising socket exception.")
self.assertRaises(socket.error, raise_gaierror,
"Error raising socket exception.")
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(UnicodeEncodeError):
s.sendto(u'\u2620', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertIn('not complex', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', None)
self.assertIn('not NoneType', str(cm.exception))
# 3 args
with self.assertRaises(UnicodeEncodeError):
s.sendto(u'\u2620', 0, sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertIn('not complex', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto('foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto('foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except socket.error:
# Probably a similar problem as above; skip this test
return
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
if hasattr(sys, "getrefcount"):
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
self.assertEqual(sys.getrefcount(__name__), orig,
"socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except socket.error:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1L<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1L<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1L, 2L, 3L ]
bad_values = [ -1, -2, -3, -1L, -2L, -3L ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith('linux') or
sys.platform.startswith('freebsd') or
sys.platform.startswith('netbsd') or
sys.platform == 'darwin'):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except socket.error:
pass
else:
raise socket.error
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf it it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except socket.error:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
def testIPv4_inet_aton_fourbytes(self):
if not hasattr(socket, 'inet_aton'):
return # No inet_aton, nothing to check
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual('\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual('\xff'*4, socket.inet_aton('255.255.255.255'))
def testIPv4toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
self.assertEqual('\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual('\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual('\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual('\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual('\xff\xff\xff\xff', f('255.255.255.255'))
self.assertEqual('\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual('\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual('\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual('\xff\xff\xff\xff', g('255.255.255.255'))
def testIPv6toString(self):
if not hasattr(socket, 'inet_pton'):
return # No inet_pton() on this platform
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_pton(AF_INET6, a)
self.assertEqual('\x00' * 16, f('::'))
self.assertEqual('\x00' * 16, f('0::0'))
self.assertEqual('\x00\x01' + '\x00' * 14, f('1::'))
self.assertEqual(
'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
def testStringToIPv4(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
self.assertEqual('1.0.1.0', f('\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f('\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f('\x01\x02\x03\x04'))
self.assertEqual('1.0.1.0', g('\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g('\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g('\xff\xff\xff\xff'))
def testStringToIPv6(self):
if not hasattr(socket, 'inet_ntop'):
return # No inet_ntop() on this platform
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
return
except ImportError:
return
f = lambda a: inet_ntop(AF_INET6, a)
self.assertEqual('::', f('\x00' * 16))
self.assertEqual('::1', f('\x00' * 15 + '\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f('\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
# XXX The following don't test module-level functionality...
def _get_unused_port(self, bind_address='0.0.0.0'):
"""Use a temporary socket to elicit an unused ephemeral port.
Args:
bind_address: Hostname or IP address to search for a port on.
Returns: A most likely to be unused port.
"""
tempsock = socket.socket()
tempsock.bind((bind_address, 0))
host, port = tempsock.getsockname()
tempsock.close()
return port
def testSockName(self):
# Testing getsockname()
port = self._get_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except socket.error:
# Probably name lookup wasn't set up right; skip this test
return
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(socket.error, sock.send, "spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
host = '0.0.0.0'
port = self._get_unused_port(bind_address=host)
big_port = port + 65536
neg_port = port - 65536
sock = socket.socket()
try:
self.assertRaises(OverflowError, sock.bind, (host, big_port))
self.assertRaises(OverflowError, sock.bind, (host, neg_port))
sock.bind((host, port))
finally:
sock.close()
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if SUPPORTS_IPV6:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, None, socket.AF_INET)
for family, _, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * (1024**2))
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall, b"x" * (1024**2))
finally:
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def testListenBacklog0(self):
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
# backlog = 0
srv.listen(0)
srv.close()
@unittest.skipUnless(SUPPORTS_IPV6, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
('::1',0, 0xffffffff), 0)
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
try:
self.assertRaises(OverflowError, s.bind, ('::1', 0, -10))
finally:
s.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = ''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, 'f' * 2048)
def _testSendAll(self):
big_chunk = 'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
if not hasattr(socket, "fromfd"):
return # On Windows, this doesn't exist
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), '')
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(0)
start = time.time()
try:
self.serv.accept()
except socket.error:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except socket.error:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
bufsize = -1 # Use default buffer size
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
SocketConnectedTest.setUp(self)
self.serv_file = self.cli_conn.makefile('rb', self.bufsize)
def tearDown(self):
self.serv_file.close()
self.assertTrue(self.serv_file.closed)
self.serv_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.cli_file = self.serv_conn.makefile('wb')
def clientTearDown(self):
self.cli_file.close()
self.assertTrue(self.cli_file.closed)
self.cli_file = None
SocketConnectedTest.clientTearDown(self)
def testSmallRead(self):
# Performing small file read test
first_seg = self.serv_file.read(len(MSG)-3)
second_seg = self.serv_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, MSG)
def _testSmallRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testFullRead(self):
# read until EOF
msg = self.serv_file.read()
self.assertEqual(msg, MSG)
def _testFullRead(self):
self.cli_file.write(MSG)
self.cli_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = ''
while 1:
char = self.serv_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, MSG)
def _testUnbufferedRead(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadline(self):
# Performing file readline test
line = self.serv_file.readline()
self.assertEqual(line, MSG)
def _testReadline(self):
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterRead(self):
a_baloo_is = self.serv_file.read(len("A baloo is"))
self.assertEqual("A baloo is", a_baloo_is)
_a_bear = self.serv_file.read(len(" a bear"))
self.assertEqual(" a bear", _a_bear)
line = self.serv_file.readline()
self.assertEqual("\n", line)
line = self.serv_file.readline()
self.assertEqual("A BALOO IS A BEAR.\n", line)
line = self.serv_file.readline()
self.assertEqual(MSG, line)
def _testReadlineAfterRead(self):
self.cli_file.write("A baloo is a bear\n")
self.cli_file.write("A BALOO IS A BEAR.\n")
self.cli_file.write(MSG)
self.cli_file.flush()
def testReadlineAfterReadNoNewline(self):
end_of_ = self.serv_file.read(len("End Of "))
self.assertEqual("End Of ", end_of_)
line = self.serv_file.readline()
self.assertEqual("Line", line)
def _testReadlineAfterReadNoNewline(self):
self.cli_file.write("End Of Line")
def testClosedAttr(self):
self.assertTrue(not self.serv_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.cli_file.closed)
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv(self, size):
return self._recv_step.next()()
@staticmethod
def _raise_eintr():
raise socket.error(errno.EINTR)
def _test_readline(self, size=-1, **kwargs):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "This is the first line\nAnd the sec",
self._raise_eintr,
lambda : "ond line is here\n",
lambda : "",
])
fo = socket._fileobject(mock_sock, **kwargs)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, **kwargs):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "This is the first line\nAnd the sec",
self._raise_eintr,
lambda : "ond line is here\n",
lambda : "",
])
fo = socket._fileobject(mock_sock, **kwargs)
self.assertEqual(fo.read(size), "This is the first line\n"
"And the second line is here\n")
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(bufsize=1024)
self._test_readline(size=100, bufsize=1024)
self._test_read(bufsize=1024)
self._test_read(size=100, bufsize=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : "aa",
lambda : "\n",
lambda : "BB",
self._raise_eintr,
lambda : "bb",
lambda : "",
])
fo = socket._fileobject(mock_sock, bufsize=0)
self.assertEqual(fo.readline(size), "aa\n")
self.assertEqual(fo.readline(size), "BBbb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(bufsize=0)
self._test_read(size=100, bufsize=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that httplib relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.serv_file.readline() # first line
self.assertEqual(line, "A. " + MSG) # first line
self.serv_file = self.cli_conn.makefile('rb', 0)
line = self.serv_file.readline() # second line
self.assertEqual(line, "B. " + MSG) # second line
def _testUnbufferedReadline(self):
self.cli_file.write("A. " + MSG)
self.cli_file.write("B. " + MSG)
self.cli_file.flush()
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = test_support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(socket.error) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = test_support.find_unused_port()
with self.assertRaises(socket.error) as cm:
socket.create_connection((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = test_support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send("done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, "done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class Urllib2FileobjectTest(unittest.TestCase):
# urllib2.HTTPHandler has "borrowed" socket._fileobject, and requires that
# it close the socket if the close c'tor argument is true
def testClose(self):
class MockSocket:
closed = False
def flush(self): pass
def close(self): self.closed = True
# must not close unless we request it: the original use of _fileobject
# by module socket requires that the underlying socket not be closed until
# the _socketobject that created the _fileobject is closed
s = MockSocket()
f = socket._fileobject(s)
f.close()
self.assertTrue(not s.closed)
s = MockSocket()
f = socket._fileobject(s, close=True)
f.close()
self.assertTrue(s.closed)
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
if not hasattr(signal, "alarm"):
return # can only test on *nix
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except socket.error:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(socket.error, Exception))
self.assertTrue(issubclass(socket.herror, socket.error))
self.assertTrue(issubclass(socket.gaierror, socket.error))
self.assertTrue(issubclass(socket.timeout, socket.error))
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = "\x00python-test-hello\x00\xff"
s1 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s1.bind(address)
s1.listen(1)
s2 = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s2.connect(s1.getsockname())
s1.accept()
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = "\x00" + "h" * (self.UNIX_PATH_MAX - 1)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.assertRaises(socket.error, s.bind, address)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array('c', ' '*1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
with test_support.check_py3k_warnings():
buf = buffer(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array('c', ' '*1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf.tostring()[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
with test_support.check_py3k_warnings():
buf = buffer(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
if test_support.verbose:
print "TIPC module is not loaded, please 'sudo modprobe tipc'"
return False
class TIPCTest (unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
class TIPCThreadableTest (unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + (TIPC_UPPER - TIPC_LOWER) / 2, 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
Urllib2FileobjectTest,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
])
if hasattr(socket, "socketpair"):
tests.append(BasicSocketPairTest)
if sys.platform == 'linux2':
tests.append(TestLinuxAbstractNamespace)
if isTipcAvailable():
tests.append(TIPCTest)
tests.append(TIPCThreadableTest)
thread_info = test_support.threading_setup()
test_support.run_unittest(*tests)
test_support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
| agpl-3.0 | -8,929,866,341,141,276,000 | 33.716656 | 117 | 0.59345 | false |
brianmay/python-tldap-debian | tldap/schemas/ds389.py | 1 | 1387 | # Copyright 2012-2014 Brian May
#
# This file is part of python-tldap.
#
# python-tldap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-tldap is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-tldap If not, see <http://www.gnu.org/licenses/>.
import tldap
import tldap.base
import tldap.fields
# Directory Server
class passwordObject(tldap.base.LDAPobject):
pwdpolicysubentry = tldap.fields.CharField()
passwordExpirationTim = tldap.fields.CharField()
passwordExpWarne = tldap.fields.CharField()
passwordRetryCoun = tldap.fields.CharField()
retryCountResetTime = tldap.fields.CharField()
accountUnlockTime = tldap.fields.CharField()
passwordHistory = tldap.fields.CharField()
passwordAllowChangeTime = tldap.fields.CharField()
passwordGraceUserTime = tldap.fields.CharField()
nsRoleDN = tldap.fields.CharField()
nsRole = tldap.fields.CharField(max_instances=None) # Readonly
| gpl-3.0 | 4,000,363,407,715,313,700 | 37.527778 | 70 | 0.762797 | false |
nachandr/cfme_tests | cfme/markers/polarion.py | 2 | 1406 | """polarion(*tcid): Marker for marking tests as automation for polarion test cases."""
import attr
import pytest
from cfme.fixtures.pytest_store import store
def pytest_configure(config):
config.addinivalue_line("markers", __doc__.splitlines()[0])
def extract_polarion_ids(item):
"""Extracts Polarion TC IDs from the test item. Returns None if no marker present."""
polarion = item.get_closest_marker('polarion')
return list(map(str, getattr(polarion, 'args', [])))
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(session, config, items):
xml = getattr(config, '_xml', None)
if xml is None:
return
if store.parallelizer_role != 'master':
return
config.pluginmanager.register(ReportPolarionToJunitPlugin(
xml=xml,
node_map={item.nodeid: extract_polarion_ids(item) for item in items},
))
@attr.s(hash=False)
class ReportPolarionToJunitPlugin:
xml = attr.ib()
node_map = attr.ib()
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_logreport(self, report):
"""Adds the supplied test case id to the xunit file as a property"""
if report.when != 'setup':
return
reporter = self.xml.node_reporter(report)
polarion_ids = self.node_map.get(report.nodeid, [])
for polarion_id in polarion_ids:
reporter.add_property('test_id', polarion_id)
| gpl-2.0 | 8,446,228,546,752,307,000 | 30.954545 | 89 | 0.672831 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.