repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
vdloo/jobrunner | flows/builtin/tests/unit/helpers/capabilities/test_port_is_free.py | 1 | 3668 | from uuid import uuid4
from mock import Mock, call
from tests.testcase import TestCase
from flows.builtin.helpers.capabilities import port_is_free, set_cached_port_is_free, \
reset_cached_port_is_free_timestamp
class TestPortIsFree(TestCase):
def setUp(self):
self.time = self.set_up_patch(
'flows.builtin.helpers.capabilities.time'
)
self.time.side_effect = (
# Save time in global
1497192855.084605,
# Check time for the first time after setting globals
1497192856.064605,
# Second time is not saved in global because
# it happens in the 10 second window
)
self.flow_uuid = str(uuid4())
self.job = Mock(details={'flow_uuid': self.flow_uuid})
self.check_nonzero_exit = self.set_up_patch(
'flows.builtin.helpers.capabilities.check_nonzero_exit'
)
self.check_nonzero_exit.return_value = True
self.get_flow_details_by_uuid = self.set_up_patch(
'flows.builtin.helpers.capabilities.get_flow_details_by_uuid'
)
self.get_flow_details_by_uuid.return_value = Mock(
meta={'store': {'port': 1234}}
)
# Reset the global memoized result every test method
set_cached_port_is_free(None)
reset_cached_port_is_free_timestamp()
def test_port_is_free_gets_flow_details_for_job_uuid(self):
port_is_free(self.job)
self.get_flow_details_by_uuid.assert_called_once_with(
self.flow_uuid
)
def test_port_is_free_checks_port_is_free(self):
port_is_free(self.job)
self.check_nonzero_exit.assert_called_once_with(
'netstat -tuna | grep -q 1234'
)
def test_port_is_free_uses_cached_result_when_checking_twice(self):
port_is_free(self.job)
port_is_free(self.job)
# Only once
self.check_nonzero_exit.assert_called_once_with(
'netstat -tuna | grep -q 1234'
)
def test_port_is_free_checks_again_if_port_was_free(self):
set_cached_port_is_free(True)
self.time.side_effect = (
# Save time in global
1497192855.084605,
# Check time for the first time after setting globals
1497192856.064605 + 10,
# Save time in global, updating the first timestamp
1497192856.084605 + 10,
)
port_is_free(self.job)
port_is_free(self.job)
expected_calls = [
call('netstat -tuna | grep -q 1234')
] * 2
self.assertCountEqual(
expected_calls, self.check_nonzero_exit.mock_calls
)
def test_port_is_free_checks_again_after_ten_seconds(self):
self.time.side_effect = (
# Save time in global
1497192855.084605,
# Check time for the first time after setting globals
1497192856.064605 + 10,
# Save time in global, updating the first timestamp
1497192856.084605 + 10,
)
port_is_free(self.job)
port_is_free(self.job)
expected_calls = [
call('netstat -tuna | grep -q 1234')
] * 2
self.assertCountEqual(
expected_calls, self.check_nonzero_exit.mock_calls
)
def test_port_is_free_returns_true_if_port_is_free(self):
self.check_nonzero_exit.return_value = False
ret = port_is_free(self.job)
self.assertTrue(ret)
def test_port_is_free_returns_false_if_port_is_already_bound(self):
ret = port_is_free(self.job)
self.assertFalse(ret)
| apache-2.0 |
caisq/tensorflow | tensorflow/python/util/tf_inspect.py | 9 | 11486 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFDecorator-aware replacements for the inspect module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import inspect as _inspect
import six
from tensorflow.python.util import tf_decorator
ArgSpec = _inspect.ArgSpec
if hasattr(_inspect, 'FullArgSpec'):
FullArgSpec = _inspect.FullArgSpec # pylint: disable=invalid-name
else:
FullArgSpec = namedtuple('FullArgSpec', [
'args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults',
'annotations'
])
def currentframe():
"""TFDecorator-aware replacement for inspect.currentframe."""
return _inspect.stack()[1][0]
def getargspec(obj):
"""TFDecorator-aware replacement for inspect.getargspec.
Args:
obj: A function, partial function, or callable object, possibly
decorated.
Returns:
The `ArgSpec` that describes the signature of the outermost decorator that
changes the callable's signature. If the callable is not decorated,
`inspect.getargspec()` will be called directly on the object.
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
TypeError: For objects of unsupported types.
"""
if isinstance(obj, functools.partial):
return _get_argspec_for_partial(obj)
decorators, target = tf_decorator.unwrap(obj)
spec = next((d.decorator_argspec
for d in decorators
if d.decorator_argspec is not None), None)
if spec:
return spec
try:
# Python3 will handle most callables here (not partial).
return _inspect.getargspec(target)
except TypeError:
pass
if isinstance(target, type):
try:
return _inspect.getargspec(target.__init__)
except TypeError:
pass
try:
return _inspect.getargspec(target.__new__)
except TypeError:
pass
# The `type(target)` ensures that if a class is received we don't return
# the signature of it's __call__ method.
return _inspect.getargspec(type(target).__call__)
def _get_argspec_for_partial(obj):
"""Implements `getargspec` for `functools.partial` objects.
Args:
obj: The `functools.partial` obeject
Returns:
An `inspect.ArgSpec`
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
"""
# When callable is a functools.partial object, we construct its ArgSpec with
# following strategy:
# - If callable partial contains default value for positional arguments (ie.
# object.args), then final ArgSpec doesn't contain those positional arguments.
# - If callable partial contains default value for keyword arguments (ie.
# object.keywords), then we merge them with wrapped target. Default values
# from callable partial takes precedence over those from wrapped target.
#
# However, there is a case where it is impossible to construct a valid
# ArgSpec. Python requires arguments that have no default values must be
# defined before those with default values. ArgSpec structure is only valid
# when this presumption holds true because default values are expressed as a
# tuple of values without keywords and they are always assumed to belong to
# last K arguments where K is number of default values present.
#
# Since functools.partial can give default value to any argument, this
# presumption may no longer hold in some cases. For example:
#
# def func(m, n):
# return 2 * m + n
# partialed = functools.partial(func, m=1)
#
# This example will result in m having a default value but n doesn't. This is
# usually not allowed in Python and can not be expressed in ArgSpec correctly.
#
# Thus, we must detect cases like this by finding first argument with default
# value and ensures all following arguments also have default values. When
# this is not true, a ValueError is raised.
n_prune_args = len(obj.args)
partial_keywords = obj.keywords or {}
args, varargs, keywords, defaults = getargspec(obj.func)
# Pruning first n_prune_args arguments.
args = args[n_prune_args:]
# Partial function may give default value to any argument, therefore length
# of default value list must be len(args) to allow each argument to
# potentially be given a default value.
all_defaults = [None] * len(args)
if defaults:
all_defaults[-len(defaults):] = defaults
# Fill in default values provided by partial function in all_defaults.
for kw, default in six.iteritems(partial_keywords):
idx = args.index(kw)
all_defaults[idx] = default
# Find first argument with default value set.
first_default = next((idx for idx, x in enumerate(all_defaults) if x), None)
# If no default values are found, return ArgSpec with defaults=None.
if first_default is None:
return ArgSpec(args, varargs, keywords, None)
# Checks if all arguments have default value set after first one.
invalid_default_values = [
args[i] for i, j in enumerate(all_defaults) if not j and i > first_default
]
if invalid_default_values:
raise ValueError('Some arguments %s do not have default value, but they '
'are positioned after those with default values. This can '
'not be expressed with ArgSpec.' % invalid_default_values)
return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))
if hasattr(_inspect, 'getfullargspec'):
_getfullargspec = _inspect.getfullargspec
else:
def _getfullargspec(target):
"""A python2 version of getfullargspec.
Args:
target: the target object to inspect.
Returns:
A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations.
"""
argspecs = _inspect.getargspec(target)
fullargspecs = FullArgSpec(
args=argspecs.args,
varargs=argspecs.varargs,
varkw=argspecs.keywords,
defaults=argspecs.defaults,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
return fullargspecs
def getfullargspec(obj):
"""TFDecorator-aware replacement for `inspect.getfullargspec`.
This wrapper emulates `inspect.getfullargspec` in[^)]* Python2.
Args:
obj: A callable, possibly decorated.
Returns:
The `FullArgSpec` that describes the signature of
the outermost decorator that changes the callable's signature. If the
callable is not decorated, `inspect.getfullargspec()` will be called
directly on the callable.
"""
decorators, target = tf_decorator.unwrap(obj)
return next((d.decorator_argspec
for d in decorators
if d.decorator_argspec is not None), _getfullargspec(target))
def getcallargs(func, *positional, **named):
"""TFDecorator-aware replacement for inspect.getcallargs.
Args:
func: A callable, possibly decorated
*positional: The positional arguments that would be passed to `func`.
**named: The named argument dictionary that would be passed to `func`.
Returns:
A dictionary mapping `func`'s named arguments to the values they would
receive if `func(*positional, **named)` were called.
`getcallargs` will use the argspec from the outermost decorator that provides
it. If no attached decorators modify argspec, the final unwrapped target's
argspec will be used.
"""
argspec = getfullargspec(func)
call_args = named.copy()
this = getattr(func, 'im_self', None) or getattr(func, '__self__', None)
if ismethod(func) and this:
positional = (this,) + positional
remaining_positionals = [arg for arg in argspec.args if arg not in call_args]
call_args.update(dict(zip(remaining_positionals, positional)))
default_count = 0 if not argspec.defaults else len(argspec.defaults)
if default_count:
for arg, value in zip(argspec.args[-default_count:], argspec.defaults):
if arg not in call_args:
call_args[arg] = value
return call_args
def getframeinfo(*args, **kwargs):
return _inspect.getframeinfo(*args, **kwargs)
def getdoc(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getdoc.
Args:
object: An object, possibly decorated.
Returns:
The docstring associated with the object.
The outermost-decorated object is intended to have the most complete
documentation, so the decorated parameter is not unwrapped.
"""
return _inspect.getdoc(object)
def getfile(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getfile."""
unwrapped_object = tf_decorator.unwrap(object)[1]
# Work around for the case when object is a stack frame
# and only .pyc files are used. In this case, getfile
# might return incorrect path. So, we get the path from f_globals
# instead.
if (hasattr(unwrapped_object, 'f_globals') and
'__file__' in unwrapped_object.f_globals):
return unwrapped_object.f_globals['__file__']
return _inspect.getfile(unwrapped_object)
def getmembers(object, predicate=None): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getmembers."""
return _inspect.getmembers(object, predicate)
def getmodule(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getmodule."""
return _inspect.getmodule(object)
def getmro(cls):
"""TFDecorator-aware replacement for inspect.getmro."""
return _inspect.getmro(cls)
def getsource(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getsource."""
return _inspect.getsource(tf_decorator.unwrap(object)[1])
def isbuiltin(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isbuiltin."""
return _inspect.isbuiltin(tf_decorator.unwrap(object)[1])
def isclass(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isclass."""
return _inspect.isclass(tf_decorator.unwrap(object)[1])
def isfunction(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isfunction."""
return _inspect.isfunction(tf_decorator.unwrap(object)[1])
def ismethod(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.ismethod."""
return _inspect.ismethod(tf_decorator.unwrap(object)[1])
def ismodule(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.ismodule."""
return _inspect.ismodule(tf_decorator.unwrap(object)[1])
def isroutine(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isroutine."""
return _inspect.isroutine(tf_decorator.unwrap(object)[1])
def stack(context=1):
"""TFDecorator-aware replacement for inspect.stack."""
return _inspect.stack(context)[1:]
| apache-2.0 |
QISKit/qiskit-sdk-py | test/python/transpiler/test_passmanager.py | 1 | 4748 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the passmanager logic"""
import copy
import numpy as np
from qiskit import QuantumRegister, QuantumCircuit
from qiskit.transpiler import PassManager
from qiskit.transpiler import PropertySet
from qiskit.compiler import transpile
from qiskit.converters import circuit_to_dag
from qiskit.transpiler.passes import CommutativeCancellation
from qiskit.transpiler.passes import Optimize1qGates, Unroller
from qiskit.test.mock import FakeRueschlikon
from qiskit.test import QiskitTestCase
class TestPassManager(QiskitTestCase):
"""Test Pass maanger logic."""
def test_callback(self):
"""Test the callback parameter."""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
circuit.h(qr[0])
circuit.h(qr[0])
circuit.h(qr[0])
expected_start = QuantumCircuit(qr)
expected_start.u2(0, np.pi, qr[0])
expected_start.u2(0, np.pi, qr[0])
expected_start.u2(0, np.pi, qr[0])
expected_start_dag = circuit_to_dag(expected_start)
expected_end = QuantumCircuit(qr)
expected_end.u2(0, np.pi, qr[0])
expected_end_dag = circuit_to_dag(expected_end)
calls = []
def callback(**kwargs):
out_dict = kwargs
out_dict['dag'] = copy.deepcopy(kwargs['dag'])
calls.append(out_dict)
passmanager = PassManager(callback=callback)
passmanager.append(Unroller(['u2']))
passmanager.append(Optimize1qGates())
transpile(circuit, FakeRueschlikon(), pass_manager=passmanager)
self.assertEqual(len(calls), 2)
self.assertEqual(len(calls[0]), 5)
self.assertEqual(calls[0]['count'], 0)
self.assertEqual(calls[0]['pass_'].name(), 'Unroller')
self.assertEqual(expected_start_dag, calls[0]['dag'])
self.assertIsInstance(calls[0]['time'], float)
self.assertEqual(calls[0]['property_set'], PropertySet())
self.assertEqual('MyCircuit', calls[0]['dag'].name)
self.assertEqual(len(calls[1]), 5)
self.assertEqual(calls[1]['count'], 1)
self.assertEqual(calls[1]['pass_'].name(), 'Optimize1qGates')
self.assertEqual(expected_end_dag, calls[1]['dag'])
self.assertIsInstance(calls[0]['time'], float)
self.assertEqual(calls[0]['property_set'], PropertySet())
self.assertEqual('MyCircuit', calls[1]['dag'].name)
def test_callback_with_pass_requires(self):
"""Test the callback with a pass with another pass requirement."""
qr = QuantumRegister(3, 'qr')
circuit = QuantumCircuit(qr, name='MyCircuit')
circuit.z(qr[0])
circuit.cx(qr[0], qr[2])
circuit.z(qr[0])
expected_start = QuantumCircuit(qr)
expected_start.z(qr[0])
expected_start.cx(qr[0], qr[2])
expected_start.z(qr[0])
expected_start_dag = circuit_to_dag(expected_start)
expected_end = QuantumCircuit(qr)
expected_end.cx(qr[0], qr[2])
expected_end_dag = circuit_to_dag(expected_end)
calls = []
def callback(**kwargs):
out_dict = kwargs
out_dict['dag'] = copy.deepcopy(kwargs['dag'])
calls.append(out_dict)
passmanager = PassManager(callback=callback)
passmanager.append(CommutativeCancellation())
passmanager.run(circuit)
self.assertEqual(len(calls), 2)
self.assertEqual(len(calls[0]), 5)
self.assertEqual(calls[0]['count'], 0)
self.assertEqual(calls[0]['pass_'].name(), 'CommutationAnalysis')
self.assertEqual(expected_start_dag, calls[0]['dag'])
self.assertIsInstance(calls[0]['time'], float)
self.assertIsInstance(calls[0]['property_set'], PropertySet)
self.assertEqual('MyCircuit', calls[0]['dag'].name)
self.assertEqual(len(calls[1]), 5)
self.assertEqual(calls[1]['count'], 1)
self.assertEqual(calls[1]['pass_'].name(), 'CommutativeCancellation')
self.assertEqual(expected_end_dag, calls[1]['dag'])
self.assertIsInstance(calls[0]['time'], float)
self.assertIsInstance(calls[0]['property_set'], PropertySet)
self.assertEqual('MyCircuit', calls[1]['dag'].name)
| apache-2.0 |
nagyistoce/edx-platform | common/djangoapps/student/migrations/0039_auto__del_courseregistrationcode.py | 114 | 13511 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'CourseRegistrationCode'
db.delete_table('student_courseregistrationcode')
def backwards(self, orm):
# Adding model 'CourseRegistrationCode'
db.create_table('student_courseregistrationcode', (
('code', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('transaction_group_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=255, null=True, db_index=True)),
('redeemed_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='redeemed_by_user', null=True, to=orm['auth.User'])),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('redeemed_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 6, 25, 0, 0), null=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 6, 25, 0, 0))),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_by_user', to=orm['auth.User'])),
))
db.send_create_signal('student', ['CourseRegistrationCode'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'student.anonymoususerid': {
'Meta': {'object_name': 'AnonymousUserId'},
'anonymous_user_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseaccessrole': {
'Meta': {'unique_together': "(('user', 'org', 'course_id', 'role'),)", 'object_name': 'CourseAccessRole'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.courseenrollmentallowed': {
'Meta': {'unique_together': "(('email', 'course_id'),)", 'object_name': 'CourseEnrollmentAllowed'},
'auto_enroll': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.loginfailures': {
'Meta': {'object_name': 'LoginFailures'},
'failure_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lockout_until': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.passwordhistory': {
'Meta': {'object_name': 'PasswordHistory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_set': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.pendingemailchange': {
'Meta': {'object_name': 'PendingEmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.pendingnamechange': {
'Meta': {'object_name': 'PendingNameChange'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'rationale': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.registration': {
'Meta': {'object_name': 'Registration', 'db_table': "'auth_registration'"},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'student.userprofile': {
'Meta': {'object_name': 'UserProfile', 'db_table': "'auth_userprofile'"},
'allow_certificate': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'city': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'courseware': ('django.db.models.fields.CharField', [], {'default': "'course.xml'", 'max_length': '255', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'goals': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'mailing_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'meta': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'year_of_birth': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'})
},
'student.usersignupsource': {
'Meta': {'object_name': 'UserSignupSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'student.userstanding': {
'Meta': {'object_name': 'UserStanding'},
'account_status': ('django.db.models.fields.CharField', [], {'max_length': '31', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'standing_last_changed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'standing'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'student.usertestgroup': {
'Meta': {'object_name': 'UserTestGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'db_index': 'True', 'symmetrical': 'False'})
}
}
complete_apps = ['student']
| agpl-3.0 |
jf-parent/brome | brome/runner/virtualbox_instance.py | 2 | 8797 | from time import sleep
import subprocess
import paramiko
import netifaces as ni
from brome.runner.base_instance import BaseInstance
class VirtualboxInstance(BaseInstance):
"""Virtual box instance
Attributes:
runner (object)
browser_config (object)
index (int)
Kwargs:
vbox (object)
"""
def __init__(self, runner, browser_config, index, **kwargs):
self.runner = runner
self.browser_config = browser_config
self.index = index
self.vbox = kwargs.get('vbox')
def get_ip(self):
"""Return the ip address of the node
"""
return self.browser_config.get('ip')
def execute_command(self, command, **kwargs):
"""Execute a command on the node
Args:
command (str)
Kwargs:
username (str)
"""
self.info_log("executing command: %s" % command)
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
username = kwargs.get(
'username',
self.browser_config.get('username')
)
password = self.browser_config.get('password')
ssh.connect(self.get_ip(), username=username, password=password)
stdin, stdout, stderr = ssh.exec_command(command)
ssh.close()
return (stdout, stderr)
except Exception as e:
msg = "Execute_command exception: %s" % str(e)
self.error_log(msg)
raise Exception(msg)
def scp_file_remote_to_local(self, remote_path, local_path):
"""Scp a remote file to local
Args:
remote_path (str)
local_path (str)
"""
sshadd_command = [
'ssh-add',
'/Users/pyrat/.ssh/ubuntuNode'
]
self.info_log(
"executing command: %s" %
' '.join(sshadd_command)
)
p = subprocess.Popen(sshadd_command)
p.wait()
scp_command = [
'scp',
'-o',
'StrictHostKeyChecking=no',
'%s@%s:"%s"' %
(
self.browser_config.get('username'),
self.get_ip(),
remote_path
),
local_path
]
self.info_log(
"executing command: %s" %
' '.join(scp_command)
)
p = subprocess.Popen(scp_command)
p.wait()
def startup(self):
"""This will launch and configure the virtual box machine
"""
# Do not launch the virtual machine
if not self.browser_config.get('launch', False):
return True
self.info_log("Starting up...")
try:
vm_already_running_cmd = [
"VBoxManage",
"showvminfo",
self.browser_config.get('vbname'),
"--machinereadable",
"|",
"grep",
"VMState=",
"|",
"cut",
"-d'='",
"-f2"
]
output = subprocess.check_output(
' '.join(vm_already_running_cmd),
stderr=subprocess.STDOUT,
shell=True
).decode('utf').strip()
print(
"Is vm already running output: {output}"
.format(output=output)
)
if output.find('running') != -1:
return True
# Cleanup the vbox guestproperty variable
subprocess.call([
'VBoxManage',
'guestproperty',
'delete',
self.browser_config.get('vbname'),
'wait_until_ready'
])
subprocess.call([
'VBoxManage',
'guestproperty',
'delete',
self.browser_config.get('vbname'),
'hub_ip'
])
startvm = [
"VBoxManage",
"startvm",
"'{vbname}'"
.format(
vbname=self.browser_config.get('vbname')
),
"--type",
self.browser_config.get('vbox_type', 'gui')
]
out = subprocess.check_output(
' '.join(startvm),
stderr=subprocess.STDOUT,
shell=True
)
self.info_log('VBoxManage output: {out}'.format(out=out))
instance_ready = False
# TODO should be configurable
timeout = 60
self.info_log('Waiting for instance to start...')
for i in range(timeout):
getproperty = [
'VBoxManage',
'guestproperty',
'get',
self.browser_config.get('vbname'),
'wait_until_ready'
]
output = subprocess.check_output(
' '.join(getproperty),
stderr=subprocess.STDOUT,
shell=True
).decode('utf').strip()
self.info_log(
'VBoxManage guestproperty output: {output}'
.format(output=output)
)
if output.find('ready') != -1:
instance_ready = True
break
sleep(1)
sleep(3)
if instance_ready:
self.info_log('[Done] Instance ready...')
else:
raise Exception("Timeout error: the virtualbox machine is still not ready.") # noqa
# HUB IP
hub_ip = ni.ifaddresses('en0')[2][0]['addr']
self.info_log("Hub ip: %s" % hub_ip)
# Start selenium on the node
# LINUX
if self.browser_config.get('platform').lower() == "linux":
self.info_log('Starting the selenium node server')
# Update the hub_ip browser config
self.browser_config.config['hub_ip'] = hub_ip
command = self.browser_config.get(
"selenium_command"
).format(**self.browser_config.config)
self.execute_command(command)
# WINDOWS
elif self.browser_config.get('platform').lower() == "windows":
self.info_log("Setting the guest property in Windows")
# user_session.machine.set_guest_property(
# "hub_ip", "%s:%s" % (hub_ip, '4444'), ''
# )
return True
except Exception as e:
self.error_log('Exception: %s' % e)
raise
def tear_down(self):
"""Tear down the virtual box machine
"""
if not self.browser_config.get('terminate'):
self.warning_log("Skipping terminate")
return
self.info_log("Tearing down")
if self.browser_config.get('platform').lower() == 'linux':
self.execute_command("shutdown -h now", username='root')
elif self.browser_config.get('platform').lower() == 'windows':
self.session.console.power_down()
def start_video_recording(self, local_video_file_path, video_filename):
"""Start the video recording
"""
self.runner.info_log("Starting video recording...")
self.local_video_recording_file_path = local_video_file_path
self.remote_video_recording_file_path = video_filename
self.execute_command(
"./start_recording.sh '%s'" % self.remote_video_recording_file_path
)
def stop_video_recording(self):
"""Stop the video recording
"""
self.runner.info_log("Stopping video recording...")
self.execute_command("./stop_recording.sh")
# self.runner.info_log("output: %s"%output)
sleep(5)
self.scp_file_remote_to_local(
self.remote_video_recording_file_path,
self.local_video_recording_file_path
)
def get_id(self):
return '%s - %s' % (self.browser_config.browser_id, self.index)
def debug_log(self, msg):
self.runner.debug_log("[%s]%s" % (self.get_id(), msg))
def info_log(self, msg):
self.runner.info_log("[%s]%s" % (self.get_id(), msg))
def warning_log(self, msg):
self.runner.warning_log("[%s]%s" % (self.get_id(), msg))
def error_log(self, msg):
self.runner.error_log("[%s]%s" % (self.get_id(), msg))
def critial_log(self, msg):
self.runner.critial_log("[%s]%s" % (self.get_id(), msg))
| mit |
izapolsk/integration_tests | cfme/tests/containers/test_cockpit.py | 1 | 2437 | import pytest
from cfme import test_requirements
from cfme.containers.provider import ContainersProvider
from cfme.markers.env_markers.provider import providers
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.providers import ProviderFilter
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.tier(1),
pytest.mark.provider(gen_func=providers,
filters=[ProviderFilter(classes=[ContainersProvider],
required_flags=['cockpit'])],
scope='function'),
test_requirements.containers
]
@pytest.mark.parametrize('cockpit', [False, True], ids=['disabled', 'enabled'])
def test_cockpit_button_access(appliance, provider, cockpit, request):
""" The test verifies the existence of cockpit "Web Console"
button on each node, click the button if enabled, verify no errors are displayed.
Polarion:
assignee: juwatts
caseimportance: high
casecomponent: Containers
initialEstimate: 1/6h
"""
request.addfinalizer(lambda: appliance.server.settings.disable_server_roles('cockpit_ws'))
if cockpit:
appliance.server.settings.enable_server_roles('cockpit_ws')
wait_for(lambda: appliance.server_roles['cockpit_ws'] is True, delay=10, timeout=300)
elif not cockpit:
appliance.server.settings.disable_server_roles('cockpit_ws')
wait_for(lambda: appliance.server_roles['cockpit_ws'] is False, delay=10, timeout=300)
else:
pytest.skip("Cockpit should be either enabled or disabled.")
collection = appliance.collections.container_nodes
nodes = collection.all()
for node in nodes:
view = (navigate_to(node, 'Details', force=True) if node else
pytest.skip("Could not determine node of {}".format(provider.name)))
if cockpit:
assert not view.toolbar.web_console.disabled
view.toolbar.web_console.click()
webconsole = node.vm_console
webconsole.switch_to_console()
assert not view.is_displayed
assert node.name in appliance.server.browser.url
webconsole.close_console_window()
assert view.is_displayed
view.flash.assert_no_error()
else:
assert view.toolbar.web_console.disabled
| gpl-2.0 |
alazyer/oscar | frobshop/oscar/apps/customer/migrations/0003_auto__add_productalert.py | 16 | 19453 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
depends_on = (
('catalogue', '0001_initial'),
)
def forwards(self, orm):
# Adding model 'ProductAlert'
db.create_table('customer_productalert', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='alerts', null=True, to=orm[AUTH_USER_MODEL])),
('email', self.gf('django.db.models.fields.EmailField')(db_index=True, max_length=75, null=True, blank=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, db_index=True)),
('status', self.gf('django.db.models.fields.CharField')(default='Active', max_length=20)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_confirmed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_cancelled', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_closed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('customer', ['ProductAlert'])
def backwards(self, orm):
# Deleting model 'ProductAlert'
db.delete_table('customer_productalert')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 26, 13, 49, 39, 401244)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 26, 13, 49, 39, 401151)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "u'Order related'", 'max_length': '255'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'blank': 'True'})
},
'customer.email': {
'Meta': {'object_name': 'Email'},
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_text': ('django.db.models.fields.TextField', [], {}),
'date_sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emails'", 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'customer.notification': {
'Meta': {'ordering': "('-date_sent',)", 'object_name': 'Notification'},
'body': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Inbox'", 'max_length': '32'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications'", 'to': "orm['{0}']".format(AUTH_USER_MODEL)}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(AUTH_USER_MODEL), 'null': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'customer.productalert': {
'Meta': {'object_name': 'ProductAlert'},
'date_cancelled': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Active'", 'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'alerts'", 'null': 'True', 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
}
}
complete_apps = ['customer']
| gpl-2.0 |
earshel/PokeyPySnipe | POGOProtos/Settings/MapSettings_pb2.py | 16 | 4773 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/MapSettings.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/MapSettings.proto',
package='POGOProtos.Settings',
syntax='proto3',
serialized_pb=_b('\n%POGOProtos/Settings/MapSettings.proto\x12\x13POGOProtos.Settings\"\x8f\x02\n\x0bMapSettings\x12\x1d\n\x15pokemon_visible_range\x18\x01 \x01(\x01\x12\x1d\n\x15poke_nav_range_meters\x18\x02 \x01(\x01\x12\x1e\n\x16\x65ncounter_range_meters\x18\x03 \x01(\x01\x12+\n#get_map_objects_min_refresh_seconds\x18\x04 \x01(\x02\x12+\n#get_map_objects_max_refresh_seconds\x18\x05 \x01(\x02\x12+\n#get_map_objects_min_distance_meters\x18\x06 \x01(\x02\x12\x1b\n\x13google_maps_api_key\x18\x07 \x01(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_MAPSETTINGS = _descriptor.Descriptor(
name='MapSettings',
full_name='POGOProtos.Settings.MapSettings',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pokemon_visible_range', full_name='POGOProtos.Settings.MapSettings.pokemon_visible_range', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='poke_nav_range_meters', full_name='POGOProtos.Settings.MapSettings.poke_nav_range_meters', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='encounter_range_meters', full_name='POGOProtos.Settings.MapSettings.encounter_range_meters', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='get_map_objects_min_refresh_seconds', full_name='POGOProtos.Settings.MapSettings.get_map_objects_min_refresh_seconds', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='get_map_objects_max_refresh_seconds', full_name='POGOProtos.Settings.MapSettings.get_map_objects_max_refresh_seconds', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='get_map_objects_min_distance_meters', full_name='POGOProtos.Settings.MapSettings.get_map_objects_min_distance_meters', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='google_maps_api_key', full_name='POGOProtos.Settings.MapSettings.google_maps_api_key', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=63,
serialized_end=334,
)
DESCRIPTOR.message_types_by_name['MapSettings'] = _MAPSETTINGS
MapSettings = _reflection.GeneratedProtocolMessageType('MapSettings', (_message.Message,), dict(
DESCRIPTOR = _MAPSETTINGS,
__module__ = 'POGOProtos.Settings.MapSettings_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.MapSettings)
))
_sym_db.RegisterMessage(MapSettings)
# @@protoc_insertion_point(module_scope)
| mit |
gushedaoren/django-rest-auth | rest_auth/django_test_urls.py | 35 | 4671 | # Moved in Django 1.8 from django to tests/auth_tests/urls.py
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.urls import urlpatterns
from django.contrib.messages.api import info
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.template import RequestContext, Template
from django.views.decorators.cache import never_cache
class CustomRequestAuthenticationForm(AuthenticationForm):
def __init__(self, request, *args, **kwargs):
assert isinstance(request, HttpRequest)
super(CustomRequestAuthenticationForm, self).__init__(request, *args, **kwargs)
@never_cache
def remote_user_auth_view(request):
"""
Dummy view for remote user tests
"""
t = Template("Username is {{ user }}.")
c = RequestContext(request, {})
return HttpResponse(t.render(c))
def auth_processor_no_attr_access(request):
render(request, 'context_processors/auth_attrs_no_access.html')
# *After* rendering, we check whether the session was accessed
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_attr_access(request):
render(request, 'context_processors/auth_attrs_access.html')
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_user(request):
return render(request, 'context_processors/auth_attrs_user.html')
def auth_processor_perms(request):
return render(request, 'context_processors/auth_attrs_perms.html')
def auth_processor_perm_in_perms(request):
return render(request, 'context_processors/auth_attrs_perm_in_perms.html')
def auth_processor_messages(request):
info(request, "Message 1")
return render(request, 'context_processors/auth_attrs_messages.html')
def userpage(request):
pass
def custom_request_auth_login(request):
return views.login(request, authentication_form=CustomRequestAuthenticationForm)
# special urls for auth test cases
urlpatterns += [
url(r'^logout/custom_query/$', views.logout, dict(redirect_field_name='follow')),
url(r'^logout/next_page/$', views.logout, dict(next_page='/somewhere/')),
url(r'^logout/next_page/named/$', views.logout, dict(next_page='password_reset')),
url(r'^remote_user/$', remote_user_auth_view),
url(r'^password_reset_from_email/$', views.password_reset, dict(from_email='[email protected]')),
url(r'^password_reset/custom_redirect/$', views.password_reset, dict(post_reset_redirect='/custom/')),
url(r'^password_reset/custom_redirect/named/$', views.password_reset, dict(post_reset_redirect='password_reset')),
url(r'^password_reset/html_email_template/$', views.password_reset,
dict(html_email_template_name='registration/html_password_reset_email.html')),
url(r'^reset/custom/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm,
dict(post_reset_redirect='/custom/')),
url(r'^reset/custom/named/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm,
dict(post_reset_redirect='password_reset')),
url(r'^password_change/custom/$', views.password_change, dict(post_change_redirect='/custom/')),
url(r'^password_change/custom/named/$', views.password_change, dict(post_change_redirect='password_reset')),
url(r'^admin_password_reset/$', views.password_reset, dict(is_admin_site=True)),
url(r'^login_required/$', login_required(views.password_reset)),
url(r'^login_required_login_url/$', login_required(views.password_reset, login_url='/somewhere/')),
url(r'^auth_processor_no_attr_access/$', auth_processor_no_attr_access),
url(r'^auth_processor_attr_access/$', auth_processor_attr_access),
url(r'^auth_processor_user/$', auth_processor_user),
url(r'^auth_processor_perms/$', auth_processor_perms),
url(r'^auth_processor_perm_in_perms/$', auth_processor_perm_in_perms),
url(r'^auth_processor_messages/$', auth_processor_messages),
url(r'^custom_request_auth_login/$', custom_request_auth_login),
url(r'^userpage/(.+)/$', userpage, name="userpage"),
# This line is only required to render the password reset with is_admin=True
url(r'^admin/', include(admin.site.urls)),
]
| mit |
lyndonChen/eonboard | eoncloud_web/cloud/network_task.py | 1 | 18827 | #-*-coding=utf-8-*-
import datetime
import logging
import time
from django.conf import settings
from celery import app
from cloud_utils import create_rc_by_network,\
create_rc_by_subnet, create_rc_by_router,\
create_rc_by_floating, create_rc_by_security, \
create_rc_by_udc
from biz.network.models import Network, Subnet, Router, RouterInterface
from biz.firewall.models import Firewall, FirewallRules
from biz.floating.settings import FLOATING_AVAILABLE, FLOATING_RELEASED, \
FLOATING_BINDED, FLOATING_ERROR, RESOURCE_TYPE
from biz.network.settings import NETWORK_STATE_ACTIVE,\
NETWORK_STATE_ERROR, NETWORK_STATE_UPDATING
from biz.instance.models import Instance
from biz.lbaas.models import BalancerPool
from biz.lbaas.models import BalancerVIP
from biz.floating.models import Floating
from biz.lbaas.models import BalancerPool
from biz.lbaas.models import BalancerVIP
from biz.floating.models import Floating
from api import neutron
from api import network
LOG = logging.getLogger("cloud.tasks")
ACTIVE = 1
DELETED = 2
ERROR = 3
def create_default_private_network(instance):
# create network
try:
network = Network.objects.get(pk=instance.network_id)
return network
except Network.DoesNotExist:
pass
network = Network.objects.filter(is_default=True,
status__in=[0,1],
user=instance.user,
user_data_center=instance.user_data_center)
if len(network) > 0:
return network[0]
network = Network.objects.create(name=settings.DEFAULT_NETWORK_NAME,
status=0,
is_default=True,
user=instance.user,
user_data_center=instance.user_data_center)
subnet = Subnet.objects.create(name=settings.DEFAULT_SUBNET_NAME,
network=network,
address="172.31.0.0/24",
ip_version=4,
status=0,
user=instance.user,
user_data_center=instance.user_data_center)
router = Router.objects.create(name=settings.DEFAULT_ROUTER_NAME,
status=0,
is_default=True,
is_gateway=True,
user=instance.user,
user_data_center=instance.user_data_center)
router_interface = RouterInterface.objects.create(network_id=network.id,
router=router,
subnet=subnet,
user=instance.user,
user_data_center=instance.user_data_center,
deleted=False)
nt = network_create_task(network)
sub = subnet_create_task(subnet)
rt = router_create_task(router)
# set external gateway
router_add_gateway_task(router)
# add network to router
router_add_interface_task(router, subnet, router_interface)
# default security group
return network
@app.task
def network_create_task(network):
rc = create_rc_by_network(network)
network_params = {'name': "network-%s" % network.id, "admin_state_up": True}
LOG.info("start create network,id:[%s],name[%s]" % (network.id, network.name))
try:
net = neutron.network_create(rc, **network_params)
network.network_id = net.id
network.status = NETWORK_STATE_ACTIVE
network.save()
except Exception as ex:
network.status = NETWORK_STATE_ERROR
network.save()
LOG.info("create network error,id:[%s],name[%s],msg:[%s]" % (network.id, network.name, ex))
raise ex
return network
@app.task
def network_delete_task(network):
rc = create_rc_by_network(network)
LOG.info("delete network,id:[%s],name[%s]" % (network.id, network.name))
try:
# delete all subnet
LOG.info("delete all subnet, network id [%s] name[%s]" % (network.id, network.name))
subnet_set = Subnet.objects.filter(network_id=network.id, deleted=False)
for subnet in subnet_set:
subnet_delete_task(subnet)
# delete network
net = neutron.network_delete(rc, network.network_id)
network.network_id = None
network.deleted = True
network.save()
except Exception as ex:
network.status = NETWORK_STATE_ERROR
network.save()
LOG.info("delete network error,id:[%s],name[%s],msg:[%s]" % (network.id, network.name, ex))
raise ex
return network
@app.task
def subnet_create_task(subnet=None):
rc = create_rc_by_subnet(subnet)
subnet_params = {"network_id": subnet.network.network_id,
"name": "subnet-%s" % subnet.id,
"cidr": subnet.address,
"ip_version": subnet.ip_version,
"enable_dhcp": True}
try:
sub = neutron.subnet_create(rc, **subnet_params)
subnet.subnet_id = sub.id
subnet.status = NETWORK_STATE_ACTIVE
subnet.save()
except Exception as ex:
subnet.status = NETWORK_STATE_ERROR
subnet.save()
LOG.info("create subnet error,id:[%s], msg:[%s]" % (subnet.id, ex))
raise ex
return subnet
@app.task
def subnet_delete_task(subnet):
rc = create_rc_by_subnet(subnet)
try:
sub = neutron.subnet_delete(rc, subnet.subnet_id)
subnet.deleted = True
subnet.save()
except Exception as ex:
subnet.status = NETWORK_STATE_ERROR
subnet.save()
LOG.info("delete subnet error,id:[%s], msg:[%s]" % (subnet.id, ex))
raise ex
return subnet
@app.task
def router_create_task(router=None):
rc = create_rc_by_router(router)
router_params = {"name": "router-%s" % router.id,
"distributed": False,
"ha": False}
try:
rot = neutron.router_create(rc, **router_params)
router.router_id = rot.id
if router.is_gateway:
router_add_gateway_task(router)
router.status = NETWORK_STATE_ACTIVE
router.save()
except Exception as ex:
router.status = NETWORK_STATE_ERROR
router.save()
LOG.info("delete router error,id:[%s], msg:[%s]" % (router.id, ex))
raise ex
return router
@app.task
def router_delete_task(router=None):
rc = create_rc_by_router(router)
LOG.info("delete router,id:[%s],name[%s]" % (router.id, router.name))
try:
ro = neutron.router_delete(rc, router.router_id)
router.router_id = None
router.deleted = True
router.save()
except Exception as ex:
router.status = NETWORK_STATE_ERROR
router.save()
LOG.info("delete router error,id:[%s],name[%s],msg:[%s]" % (network.id, network.name, ex))
raise ex
return network
@app.task
def router_add_gateway_task(router=None):
rc = create_rc_by_router(router)
# find external network
search_opts = {'router:external': True}
networks = neutron.network_list(rc, **search_opts)
ext_net = filter(lambda n: n.name.lower() == router.user_data_center.data_center.ext_net, networks)
ext_net_id = None
if ext_net and len(ext_net) > 0:
ext_net_id = ext_net[0].id
# set external gateway
neutron.router_add_gateway(rc, router.router_id, ext_net_id)
time.sleep(5)
# update cloud db router gateway info
os_router = neutron.router_get(rc, router.router_id)
ext_fixed_ips = os_router["external_gateway_info"].get("external_fixed_ips", [])
router.gateway = ext_fixed_ips[0].get("ip_address") if ext_fixed_ips else "---"
router.status = NETWORK_STATE_ACTIVE
router.is_gateway = True
router.save()
return True
@app.task
def router_remove_gateway_task(router=None):
if not router:
return
rc = create_rc_by_router(router)
neutron.router_remove_gateway(rc, router.router_id)
router.gateway = ''
router.status = NETWORK_STATE_ACTIVE
router.is_gateway = False
router.save()
@app.task
def router_add_interface_task(router=None, subnet=None, router_interface=None):
rc = create_rc_by_router(router)
router_inf = neutron.router_add_interface(rc, router.router_id,
subnet_id=subnet.subnet_id)
router_interface.os_port_id = router_inf['port_id']
router_interface.save()
router.status = NETWORK_STATE_ACTIVE
router.save()
@app.task
def network_and_subnet_create_task(network,subnet):
rc = create_rc_by_network(network)
LOG.info("Begin create network,id[%s], name[%s]" % (network.id, network.name))
try:
net = network_create_task(network)
LOG.info("Begin create subnet,id[%s], name[%s]" % (subnet.id, subnet.name))
subnet_create_task(subnet)
except Exception as e:
raise e
@app.task
def router_remove_interface_task(router=None, subnet=None, router_interface=None):
rc = create_rc_by_router(router)
try:
neutron.router_remove_interface(rc, router.router_id, subnet.subnet_id, router_interface.os_port_id)
router.status = NETWORK_STATE_ACTIVE
router.save()
except Exception as e:
router_interface.deleted = False
router_interface.save()
LOG.error("detach network to router error,msg:[%S]" % e)
router.status = NETWORK_STATE_ACTIVE
router.save()
@app.task
def network_link_router_task(router=None, subnet=None, router_interface=None):
router_add_interface_task(router=router, subnet=subnet, router_interface=router_interface)
@app.task
def allocate_floating_task(floating=None):
rc = create_rc_by_floating(floating)
LOG.info("Begin to allocate floating, [%s]" % floating.id);
search_opts = {'router:external': True}
networks = neutron.network_list(rc, **search_opts)
ext_net = filter(lambda n: n.name.lower() == floating.user_data_center.data_center.ext_net, networks)
ext_net_id = None
if ext_net and len(ext_net) > 0:
ext_net_id = ext_net[0].id
if ext_net_id:
try:
fip = network.tenant_floating_ip_allocate(rc, pool=ext_net_id)
floating.ip = fip.ip
floating.status = FLOATING_AVAILABLE
floating.uuid = fip.id
floating.save();
LOG.info("End to allocate floating, [%s][%s]" % (floating.id, fip.ip));
except Exception as e:
floating.status = FLOATING_ERROR
floating.save();
LOG.exception(e);
LOG.info("End to allocate floating, [%s][exception]" % floating.id);
else:
LOG.info("End to allocate floating, [%s][---]" % floating.id);
def floating_release(floating, **kwargs):
rc = create_rc_by_floating(floating)
result = True
if floating.uuid:
result = network.tenant_floating_ip_release(rc, floating.uuid)
floating.status = FLOATING_RELEASED
floating.deleted = 1
floating.delete_date = datetime.datetime.now()
floating.save()
LOG.info("floating action, [%s][relese][%s]" % (floating.id, result));
def floating_associate(floating, **kwargs):
resource_type_dict = dict(RESOURCE_TYPE)
resource_type = kwargs.get('resource_type')[0]
resource = kwargs.get('resource')[0]
if resource:
rc = create_rc_by_floating(floating)
ports = None
resource_obj = None
if resource_type_dict[str(resource_type)] == 'INSTANCE':
ins = Instance.objects.get(pk=resource)
resource_obj = ins
ports = network.floating_ip_target_get_by_instance(rc, ins.uuid)
elif resource_type_dict[resource_type] == 'LOADBALANCER':
pool = BalancerPool.objects.get(pk=resource)
if not pool or not pool.vip:
floating.status = FLOATING_AVAILABLE
floating.save()
return None
resource_obj = pool
ports = pool.vip.port_id+"_"+pool.vip.address
if not ports:
LOG.info("floating action, resourceType[%s],[%s][associate][ins:%s] ports is None" % (resource_type_dict[resource_type], floating.id, resource));
floating.status = FLOATING_AVAILABLE
floating.save()
return
LOG.info("floating action, [%s][associate][ins:%s][ports:%s]" % (
floating.id, resource, ports))
try:
network.floating_ip_associate(rc, floating.uuid, ports)
port, fixed_ip = ports.split('_')
floating.resource = resource
floating.resource_type = resource_type
floating.status = FLOATING_BINDED
floating.fixed_ip = fixed_ip
floating.port_id = port
floating.save()
if resource_type_dict[str(resource_type)] == 'INSTANCE':
resource_obj.public_ip = floating.ip
resource_obj.save()
elif resource_type_dict[resource_type] == 'LOADBALANCER':
vip = BalancerVIP.objects.get(pk=resource_obj.vip.id)
vip.public_address = floating.ip
vip.save()
except Exception as e:
LOG.error(e)
floating.status = FLOATING_AVAILABLE
floating.save()
else:
LOG.info("floating action, [%s][associate] no ins_id" % floating.id);
def floating_disassociate(floating, **kwargs):
rc = create_rc_by_floating(floating)
LOG.info("floating action, [%s][disassociate][port:%s]" % (floating.id, floating.port_id));
try:
if floating.uuid and floating.port_id:
network.floating_ip_disassociate(rc, floating.uuid, floating.port_id)
if floating.resource_type == 'INSTANCE':
ins = Instance.objects.get(pk=floating.resource)
ins.public_ip = None
ins.save()
elif floating.resource_type == 'LOADBALANCER':
pool = BalancerPool.objects.get(pk=floating.resource)
vip = BalancerVIP.objects.get(pk=pool.vip.id)
vip.public_address = None
vip.save()
#floating.instance = None
floating.resource = None
floating.resource_type = None
floating.status = FLOATING_AVAILABLE
floating.fixed_ip = None
floating.port_id = None
floating.save()
except Exception as e:
return False
@app.task
def floating_action_task(floating=None, act=None, **kwargs):
LOG.info("Begin to floating action, [%s][%s]" % (floating.id, act));
try:
globals()["floating_%s" % act](floating, **kwargs)
except Exception as e:
LOG.exception(e)
LOG.info("End floating action, [%s][%s]" % (floating.id, act));
@app.task
def security_group_create_task(firewall=None):
if not firewall:
return
rc = create_rc_by_security(firewall)
security_group = network.security_group_create(rc, firewall.name, firewall.desc)
firewall.firewall_id = security_group.id
firewall.save()
@app.task
def security_group_delete_task(firewall=None):
if not firewall:
return
rc = create_rc_by_security(firewall)
try:
security_group = network.security_group_delete(rc, firewall.firewall_id)
firewall.firewall_id = ""
firewall.deleted = True
firewall.save()
firewall_rule_set = FirewallRules.objects.filter(firewall=firewall.id)
if not firewall_rule_set:
return
for rule in firewall_rule_set:
rule.firewall_rules_id = ''
rule.deleted = True
rule.save()
except Exception as e:
LOG.error("Firewall delete error, msg: %s" % e)
raise e
@app.task
def security_group_rule_create_task(firewall_rule=None):
if not firewall_rule:
return
rc = create_rc_by_security(firewall_rule)
try:
rule = network.security_group_rule_create(rc, parent_group_id=firewall_rule.firewall.firewall_id,
direction=firewall_rule.direction,
ethertype=firewall_rule.ether_type,
ip_protocol=firewall_rule.protocol,
from_port=firewall_rule.port_range_min,
to_port=firewall_rule.port_range_max,
cidr=firewall_rule.remote_ip_prefix,
group_id=firewall_rule.remote_group_id)
firewall_rule.firewall_rules_id = rule.id
firewall_rule.save()
except Exception as e:
firewall_rule.delete()
raise e
@app.task
def security_group_rule_delete_task(firewall_rule=None):
if not firewall_rule:
return
rc = create_rc_by_security(firewall_rule)
try:
network.security_group_rule_delete(rc, firewall_rule.firewall_rules_id)
firewall_rule.firewall_rules_id = ''
firewall_rule.deleted = True
firewall_rule.save()
except Exception as e:
LOG.info("Delete firewall rule error %s" % e)
raise e
@app.task
def server_update_security_groups_task(instance, firewall=None):
if not firewall:
return
rc = create_rc_by_security(firewall)
try:
LOG.info("Update server security group ,server_id[%s],security_group[%s]" % (instance.uuid, firewall.firewall_id))
network.server_update_security_groups(rc, instance.uuid, [firewall.firewall_id])
except Exception as e:
LOG.error("Update server security group error, msg: %s" % e)
raise e
def edit_default_security_group(user, udc):
rc = create_rc_by_udc(udc)
sec_group_list = network.security_group_list(rc)
default_sec_group = None
for sec_group in sec_group_list:
if sec_group.name == "default":
default_sec_group = sec_group
break
if default_sec_group is None:
LOG.error("default security group not found.instance:[%s], project:[%s]" \
% (instance.id, instance.user_data_center.tenant_name))
return
firewall = Firewall.objects.create(name=settings.DEFAULT_FIREWALL_NAME,
desc=settings.DEFAULT_FIREWALL_NAME,
is_default=True,
firewall_id=default_sec_group.id,
user=user,
user_data_center=udc,
deleted=False)
| apache-2.0 |
NeCTAR-RC/nova | nova/db/sqlalchemy/api.py | 1 | 252255 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import collections
import copy
import datetime
import functools
import inspect
import sys
import uuid
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_db import options as oslo_db_options
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import update_match
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import noload
from sqlalchemy.orm import undefer
from sqlalchemy.schema import Table
from sqlalchemy import sql
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from sqlalchemy.sql import false
from sqlalchemy.sql import func
from sqlalchemy.sql import null
from sqlalchemy.sql import true
from nova import block_device
from nova.compute import task_states
from nova.compute import vm_states
import nova.context
from nova.db.sqlalchemy import models
from nova import exception
from nova.i18n import _, _LI, _LE, _LW
from nova.objects import fields
from nova import quota
from nova import safe_utils
db_opts = [
cfg.StrOpt('osapi_compute_unique_server_name_scope',
default='',
help='When set, compute API will consider duplicate hostnames '
'invalid within the specified scope, regardless of case. '
'Should be empty, "project" or "global".'),
]
api_db_opts = [
cfg.StrOpt('connection',
help='The SQLAlchemy connection string to use to connect to '
'the Nova API database.',
secret=True),
cfg.BoolOpt('sqlite_synchronous',
default=True,
help='If True, SQLite uses synchronous mode.'),
cfg.StrOpt('slave_connection',
secret=True,
help='The SQLAlchemy connection string to use to connect to the'
' slave database.'),
cfg.StrOpt('mysql_sql_mode',
default='TRADITIONAL',
help='The SQL mode to be used for MySQL sessions. '
'This option, including the default, overrides any '
'server-set SQL mode. To use whatever SQL mode '
'is set by the server configuration, '
'set this to no value. Example: mysql_sql_mode='),
cfg.IntOpt('idle_timeout',
default=3600,
help='Timeout before idle SQL connections are reaped.'),
cfg.IntOpt('max_pool_size',
help='Maximum number of SQL connections to keep open in a '
'pool.'),
cfg.IntOpt('max_retries',
default=10,
help='Maximum number of database connection retries '
'during startup. Set to -1 to specify an infinite '
'retry count.'),
cfg.IntOpt('retry_interval',
default=10,
help='Interval between retries of opening a SQL connection.'),
cfg.IntOpt('max_overflow',
help='If set, use this value for max_overflow with '
'SQLAlchemy.'),
cfg.IntOpt('connection_debug',
default=0,
help='Verbosity of SQL debugging information: 0=None, '
'100=Everything.'),
cfg.BoolOpt('connection_trace',
default=False,
help='Add Python stack traces to SQL as comment strings.'),
cfg.IntOpt('pool_timeout',
help='If set, use this value for pool_timeout with '
'SQLAlchemy.'),
]
CONF = cfg.CONF
CONF.register_opts(db_opts)
CONF.register_opts(oslo_db_options.database_opts, 'database')
CONF.register_opts(api_db_opts, group='api_database')
CONF.import_opt('until_refresh', 'nova.quota')
LOG = logging.getLogger(__name__)
main_context_manager = enginefacade.transaction_context()
api_context_manager = enginefacade.transaction_context()
def _get_db_conf(conf_group, connection=None):
kw = dict(
connection=connection or conf_group.connection,
slave_connection=conf_group.slave_connection,
sqlite_fk=False,
__autocommit=True,
expire_on_commit=False,
mysql_sql_mode=conf_group.mysql_sql_mode,
idle_timeout=conf_group.idle_timeout,
connection_debug=conf_group.connection_debug,
max_pool_size=conf_group.max_pool_size,
max_overflow=conf_group.max_overflow,
pool_timeout=conf_group.pool_timeout,
sqlite_synchronous=conf_group.sqlite_synchronous,
connection_trace=conf_group.connection_trace,
max_retries=conf_group.max_retries,
retry_interval=conf_group.retry_interval)
return kw
def _context_manager_from_context(context):
if context:
try:
return context.db_connection
except AttributeError:
pass
def configure(conf):
main_context_manager.configure(**_get_db_conf(conf.database))
api_context_manager.configure(**_get_db_conf(conf.api_database))
def create_context_manager(connection=None):
"""Create a database context manager object.
: param connection: The database connection string
"""
ctxt_mgr = enginefacade.transaction_context()
ctxt_mgr.configure(**_get_db_conf(CONF.database, connection=connection))
return ctxt_mgr
def get_context_manager(context):
"""Get a database context manager object.
:param context: The request context that can contain a context manager
"""
return _context_manager_from_context(context) or main_context_manager
def get_engine(use_slave=False, context=None):
"""Get a database engine object.
:param use_slave: Whether to use the slave connection
:param context: The request context that can contain a context manager
"""
ctxt_mgr = _context_manager_from_context(context) or main_context_manager
return ctxt_mgr.get_legacy_facade().get_engine(use_slave=use_slave)
def get_api_engine():
return api_context_manager.get_legacy_facade().get_engine()
_SHADOW_TABLE_PREFIX = 'shadow_'
_DEFAULT_QUOTA_NAME = 'default'
PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`nova.context.authorize_project_context` and
:py:func:`nova.context.authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
nova.context.require_context(args[0])
return f(*args, **kwargs)
return wrapper
def require_instance_exists_using_uuid(f):
"""Decorator to require the specified instance to exist.
Requires the wrapped function to use context and instance_uuid as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, instance_uuid, *args, **kwargs):
instance_get_by_uuid(context, instance_uuid)
return f(context, instance_uuid, *args, **kwargs)
return wrapper
def require_aggregate_exists(f):
"""Decorator to require the specified aggregate to exist.
Requires the wrapped function to use context and aggregate_id as
their first two arguments.
"""
@functools.wraps(f)
def wrapper(context, aggregate_id, *args, **kwargs):
aggregate_get(context, aggregate_id)
return f(context, aggregate_id, *args, **kwargs)
return wrapper
def select_db_reader_mode(f):
"""Decorator to select synchronous or asynchronous reader mode.
The kwarg argument 'use_slave' defines reader mode. Asynchronous reader
will be used if 'use_slave' is True and synchronous reader otherwise.
If 'use_slave' is not specified default value 'False' will be used.
Wrapped function must have a context in the arguments.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
wrapped_func = safe_utils.get_wrapped_function(f)
keyed_args = inspect.getcallargs(wrapped_func, *args, **kwargs)
context = keyed_args['context']
use_slave = keyed_args.get('use_slave', False)
if use_slave:
reader_mode = main_context_manager.async
else:
reader_mode = main_context_manager.reader
with reader_mode.using(context):
return f(*args, **kwargs)
return wrapper
def pick_context_manager_writer(f):
"""Decorator to use a writer db context manager.
The db context manager will be picked from the RequestContext.
Wrapped function must have a RequestContext in the arguments.
"""
@functools.wraps(f)
def wrapped(context, *args, **kwargs):
ctxt_mgr = get_context_manager(context)
with ctxt_mgr.writer.using(context):
return f(context, *args, **kwargs)
return wrapped
def pick_context_manager_reader(f):
"""Decorator to use a reader db context manager.
The db context manager will be picked from the RequestContext.
Wrapped function must have a RequestContext in the arguments.
"""
@functools.wraps(f)
def wrapped(context, *args, **kwargs):
ctxt_mgr = get_context_manager(context)
with ctxt_mgr.reader.using(context):
return f(context, *args, **kwargs)
return wrapped
def pick_context_manager_reader_allow_async(f):
"""Decorator to use a reader.allow_async db context manager.
The db context manager will be picked from the RequestContext.
Wrapped function must have a RequestContext in the arguments.
"""
@functools.wraps(f)
def wrapped(context, *args, **kwargs):
ctxt_mgr = get_context_manager(context)
with ctxt_mgr.reader.allow_async.using(context):
return f(context, *args, **kwargs)
return wrapped
def model_query(context, model,
args=None,
read_deleted=None,
project_only=False):
"""Query helper that accounts for context's `read_deleted` field.
:param context: NovaContext of the query.
:param model: Model to query. Must be a subclass of ModelBase.
:param args: Arguments to query. If None - model is used.
:param read_deleted: If not None, overrides context's read_deleted field.
Permitted values are 'no', which does not return
deleted values; 'only', which only returns deleted
values; and 'yes', which does not filter deleted
values.
:param project_only: If set and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
"""
if read_deleted is None:
read_deleted = context.read_deleted
query_kwargs = {}
if 'no' == read_deleted:
query_kwargs['deleted'] = False
elif 'only' == read_deleted:
query_kwargs['deleted'] = True
elif 'yes' == read_deleted:
pass
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
query = sqlalchemyutils.model_query(
model, context.session, args, **query_kwargs)
# We can't use oslo.db model_query's project_id here, as it doesn't allow
# us to return both our projects and unowned projects.
if nova.context.is_user_context(context) and project_only:
if project_only == 'allow_none':
query = query.\
filter(or_(model.project_id == context.project_id,
model.project_id == null()))
else:
query = query.filter_by(project_id=context.project_id)
return query
def convert_objects_related_datetimes(values, *datetime_keys):
if not datetime_keys:
datetime_keys = ('created_at', 'deleted_at', 'updated_at')
for key in datetime_keys:
if key in values and values[key]:
if isinstance(values[key], six.string_types):
try:
values[key] = timeutils.parse_strtime(values[key])
except ValueError:
# Try alternate parsing since parse_strtime will fail
# with say converting '2015-05-28T19:59:38+00:00'
values[key] = timeutils.parse_isotime(values[key])
# NOTE(danms): Strip UTC timezones from datetimes, since they're
# stored that way in the database
values[key] = values[key].replace(tzinfo=None)
return values
def _sync_instances(context, project_id, user_id):
return dict(zip(('instances', 'cores', 'ram'),
_instance_data_get_for_user(context, project_id, user_id)))
def _sync_floating_ips(context, project_id, user_id):
return dict(floating_ips=_floating_ip_count_by_project(
context, project_id))
def _sync_fixed_ips(context, project_id, user_id):
return dict(fixed_ips=_fixed_ip_count_by_project(context, project_id))
def _sync_security_groups(context, project_id, user_id):
return dict(security_groups=_security_group_count_by_project_and_user(
context, project_id, user_id))
def _sync_server_groups(context, project_id, user_id):
return dict(server_groups=_instance_group_count_by_project_and_user(
context, project_id, user_id))
QUOTA_SYNC_FUNCTIONS = {
'_sync_instances': _sync_instances,
'_sync_floating_ips': _sync_floating_ips,
'_sync_fixed_ips': _sync_fixed_ips,
'_sync_security_groups': _sync_security_groups,
'_sync_server_groups': _sync_server_groups,
}
###################
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.items():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
# method signature requires us to return an iterable even if for OR
# operator this will actually be a single clause
return [or_(*[field == value for value in self.values])]
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
###################
@pick_context_manager_writer
def service_destroy(context, service_id):
service = service_get(context, service_id)
model_query(context, models.Service).\
filter_by(id=service_id).\
soft_delete(synchronize_session=False)
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
model_query(context, models.ComputeNode).\
filter(or_(models.ComputeNode.service_id == service_id,
models.ComputeNode.host == service['host'])).\
soft_delete(synchronize_session=False)
@pick_context_manager_reader
def service_get(context, service_id):
query = model_query(context, models.Service).filter_by(id=service_id)
result = query.first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@pick_context_manager_reader_allow_async
def service_get_minimum_version(context, binary):
min_version = context.session.query(
func.min(models.Service.version)).\
filter(models.Service.binary == binary).\
filter(models.Service.forced_down == false()).\
scalar()
return min_version
@pick_context_manager_reader
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@pick_context_manager_reader
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(topic=topic).\
all()
@pick_context_manager_reader
def service_get_by_host_and_topic(context, host, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
@pick_context_manager_reader
def service_get_all_by_binary(context, binary, include_disabled=False):
query = model_query(context, models.Service, read_deleted="no").\
filter_by(binary=binary)
if not include_disabled:
query = query.filter_by(disabled=False)
return query.all()
@pick_context_manager_reader
def service_get_by_host_and_binary(context, host, binary):
result = model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@pick_context_manager_reader
def service_get_all_by_host(context, host):
return model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@pick_context_manager_reader_allow_async
def service_get_by_compute_host(context, host):
result = model_query(context, models.Service, read_deleted="no").\
filter_by(host=host).\
filter_by(binary='nova-compute').\
first()
if not result:
raise exception.ComputeHostNotFound(host=host)
return result
@pick_context_manager_writer
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
try:
service_ref.save(context.session)
except db_exc.DBDuplicateEntry as e:
if 'binary' in e.columns:
raise exception.ServiceBinaryExists(host=values.get('host'),
binary=values.get('binary'))
raise exception.ServiceTopicExists(host=values.get('host'),
topic=values.get('topic'))
return service_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def service_update(context, service_id, values):
service_ref = service_get(context, service_id)
# Only servicegroup.drivers.db.DbDriver._report_state() updates
# 'report_count', so if that value changes then store the timestamp
# as the last time we got a state report.
if 'report_count' in values:
if values['report_count'] > service_ref.report_count:
service_ref.last_seen_up = timeutils.utcnow()
service_ref.update(values)
return service_ref
###################
def _compute_node_select(context, filters=None):
# NOTE(jaypipes): With the addition of the resource-providers database
# schema, inventory and allocation information for various resources
# on a compute node are to be migrated from the compute_nodes and
# instance_extra tables into the new inventories and allocations tables.
# During the time that this data migration is ongoing we need to allow
# the scheduler to essentially be blind to the underlying database
# schema changes. So, this query here returns three sets of resource
# attributes:
# - inv_memory_mb, inv_memory_mb_used, inv_memory_mb_reserved,
# inv_ram_allocation_ratio
# - inv_vcpus, inv_vcpus_used, inv_cpu_allocation_ratio
# - inv_local_gb, inv_local_gb_used, inv_disk_allocation_ratio
# These resource capacity/usage fields store the total and used values
# for those three resource classes that are currently store in similar
# fields in the compute_nodes table (e.g. memory_mb and memory_mb_used)
# The code that runs the online data migrations will be able to tell if
# the compute node has had its inventory information moved to the
# inventories table by checking for a non-None field value for the
# inv_memory_mb, inv_vcpus, and inv_disk_gb fields.
#
# The below SQLAlchemy code below produces the following SQL statement
# exactly:
#
# SELECT
# cn.*,
# ram_inv.total as inv_memory_mb,
# ram_inv.reserved as inv_memory_mb_reserved,
# ram_inv.allocation_ratio as inv_ram_allocation_ratio,
# ram_usage.used as inv_memory_mb_used,
# cpu_inv.total as inv_vcpus,
# cpu_inv.allocation_ratio as inv_cpu_allocation_ratio,
# cpu_usage.used as inv_vcpus_used,
# disk_inv.total as inv_local_gb,
# disk_inv.allocation_ratio as inv_disk_allocation_ratio,
# disk_usage.used as inv_local_gb_used
# FROM compute_nodes AS cn
# LEFT OUTER JOIN resource_providers AS rp
# ON cn.uuid = rp.uuid
# LEFT OUTER JOIN inventories AS ram_inv
# ON rp.id = ram_inv.resource_provider_id
# AND ram_inv.resource_class_id = :RAM_MB
# LEFT OUTER JOIN (
# SELECT resource_provider_id, SUM(used) as used
# FROM allocations
# WHERE resource_class_id = :RAM_MB
# GROUP BY resource_provider_id
# ) AS ram_usage
# ON ram_inv.resource_provider_id = ram_usage.resource_provider_id
# LEFT OUTER JOIN inventories AS cpu_inv
# ON rp.id = cpu_inv.resource_provider_id
# AND cpu_inv.resource_class_id = :VCPUS
# LEFT OUTER JOIN (
# SELECT resource_provider_id, SUM(used) as used
# FROM allocations
# WHERE resource_class_id = :VCPUS
# GROUP BY resource_provider_id
# ) AS cpu_usage
# ON cpu_inv.resource_provider_id = cpu_usage.resource_provider_id
# LEFT OUTER JOIN inventories AS disk_inv
# ON rp.id = disk_inv.resource_provider_id
# AND disk_inv.resource_class_id = :DISK_GB
# LEFT OUTER JOIN (
# SELECT resource_provider_id, SUM(used) as used
# FROM allocations
# WHERE resource_class_id = :DISK_GB
# GROUP BY resource_provider_id
# ) AS disk_usage
# ON disk_inv.resource_provider_id = disk_usage.resource_provider_id
# WHERE cn.deleted = 0;
if filters is None:
filters = {}
RAM_MB = fields.ResourceClass.index(fields.ResourceClass.MEMORY_MB)
VCPU = fields.ResourceClass.index(fields.ResourceClass.VCPU)
DISK_GB = fields.ResourceClass.index(fields.ResourceClass.DISK_GB)
cn_tbl = sa.alias(models.ComputeNode.__table__, name='cn')
rp_tbl = sa.alias(models.ResourceProvider.__table__, name='rp')
inv_tbl = models.Inventory.__table__
alloc_tbl = models.Allocation.__table__
ram_inv = sa.alias(inv_tbl, name='ram_inv')
cpu_inv = sa.alias(inv_tbl, name='cpu_inv')
disk_inv = sa.alias(inv_tbl, name='disk_inv')
ram_usage = sa.select([alloc_tbl.c.resource_provider_id,
sql.func.sum(alloc_tbl.c.used).label('used')])
ram_usage = ram_usage.where(alloc_tbl.c.resource_class_id == RAM_MB)
ram_usage = ram_usage.group_by(alloc_tbl.c.resource_provider_id)
ram_usage = sa.alias(ram_usage, name='ram_usage')
cpu_usage = sa.select([alloc_tbl.c.resource_provider_id,
sql.func.sum(alloc_tbl.c.used).label('used')])
cpu_usage = cpu_usage.where(alloc_tbl.c.resource_class_id == VCPU)
cpu_usage = cpu_usage.group_by(alloc_tbl.c.resource_provider_id)
cpu_usage = sa.alias(cpu_usage, name='cpu_usage')
disk_usage = sa.select([alloc_tbl.c.resource_provider_id,
sql.func.sum(alloc_tbl.c.used).label('used')])
disk_usage = disk_usage.where(alloc_tbl.c.resource_class_id == DISK_GB)
disk_usage = disk_usage.group_by(alloc_tbl.c.resource_provider_id)
disk_usage = sa.alias(disk_usage, name='disk_usage')
cn_rp_join = sql.outerjoin(
cn_tbl, rp_tbl,
cn_tbl.c.uuid == rp_tbl.c.uuid)
ram_inv_join = sql.outerjoin(
cn_rp_join, ram_inv,
sql.and_(rp_tbl.c.id == ram_inv.c.resource_provider_id,
ram_inv.c.resource_class_id == RAM_MB))
ram_join = sql.outerjoin(
ram_inv_join, ram_usage,
ram_inv.c.resource_provider_id == ram_usage.c.resource_provider_id)
cpu_inv_join = sql.outerjoin(
ram_join, cpu_inv,
sql.and_(rp_tbl.c.id == cpu_inv.c.resource_provider_id,
cpu_inv.c.resource_class_id == VCPU))
cpu_join = sql.outerjoin(
cpu_inv_join, cpu_usage,
cpu_inv.c.resource_provider_id == cpu_usage.c.resource_provider_id)
disk_inv_join = sql.outerjoin(
cpu_join, disk_inv,
sql.and_(rp_tbl.c.id == disk_inv.c.resource_provider_id,
disk_inv.c.resource_class_id == DISK_GB))
disk_join = sql.outerjoin(
disk_inv_join, disk_usage,
disk_inv.c.resource_provider_id == disk_usage.c.resource_provider_id)
# TODO(jaypipes): Remove all capacity and usage fields from this method
# entirely and deal with allocations and inventory information in a
# tabular fashion instead of a columnar fashion like the legacy
# compute_nodes table schema does.
inv_cols = [
ram_inv.c.total.label('inv_memory_mb'),
ram_inv.c.reserved.label('inv_memory_mb_reserved'),
ram_inv.c.allocation_ratio.label('inv_ram_allocation_ratio'),
ram_usage.c.used.label('inv_memory_mb_used'),
cpu_inv.c.total.label('inv_vcpus'),
cpu_inv.c.allocation_ratio.label('inv_cpu_allocation_ratio'),
cpu_usage.c.used.label('inv_vcpus_used'),
disk_inv.c.total.label('inv_local_gb'),
disk_inv.c.reserved.label('inv_local_gb_reserved'),
disk_inv.c.allocation_ratio.label('inv_disk_allocation_ratio'),
disk_usage.c.used.label('inv_local_gb_used'),
]
cols_in_output = list(cn_tbl.c)
cols_in_output.extend(inv_cols)
select = sa.select(cols_in_output).select_from(disk_join)
if context.read_deleted == "no":
select = select.where(cn_tbl.c.deleted == 0)
if "compute_id" in filters:
select = select.where(cn_tbl.c.id == filters["compute_id"])
if "service_id" in filters:
select = select.where(cn_tbl.c.service_id == filters["service_id"])
if "host" in filters:
select = select.where(cn_tbl.c.host == filters["host"])
if "hypervisor_hostname" in filters:
hyp_hostname = filters["hypervisor_hostname"]
select = select.where(cn_tbl.c.hypervisor_hostname == hyp_hostname)
engine = get_engine(context)
conn = engine.connect()
results = conn.execute(select).fetchall()
# Callers expect dict-like objects, not SQLAlchemy RowProxy objects...
results = [dict(r) for r in results]
conn.close()
return results
@pick_context_manager_reader
def compute_node_get(context, compute_id):
results = _compute_node_select(context, {"compute_id": compute_id})
if not results:
raise exception.ComputeHostNotFound(host=compute_id)
return results[0]
@pick_context_manager_reader
def compute_node_get_model(context, compute_id):
# TODO(edleafe): remove once the compute node resource provider migration
# is complete, and this distinction is no longer necessary.
result = model_query(context, models.ComputeNode).\
filter_by(id=compute_id).\
first()
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
return result
@pick_context_manager_reader
def compute_nodes_get_by_service_id(context, service_id):
results = _compute_node_select(context, {"service_id": service_id})
if not results:
raise exception.ServiceNotFound(service_id=service_id)
return results
@pick_context_manager_reader
def compute_node_get_by_host_and_nodename(context, host, nodename):
results = _compute_node_select(context,
{"host": host, "hypervisor_hostname": nodename})
if not results:
raise exception.ComputeHostNotFound(host=host)
return results[0]
@pick_context_manager_reader_allow_async
def compute_node_get_all_by_host(context, host):
results = _compute_node_select(context, {"host": host})
if not results:
raise exception.ComputeHostNotFound(host=host)
return results
@pick_context_manager_reader
def compute_node_get_all(context):
return _compute_node_select(context)
@pick_context_manager_reader
def compute_node_search_by_hypervisor(context, hypervisor_match):
field = models.ComputeNode.hypervisor_hostname
return model_query(context, models.ComputeNode).\
filter(field.like('%%%s%%' % hypervisor_match)).\
all()
@pick_context_manager_writer
def compute_node_create(context, values):
"""Creates a new ComputeNode and populates the capacity fields
with the most recent data.
"""
convert_objects_related_datetimes(values)
compute_node_ref = models.ComputeNode()
compute_node_ref.update(values)
compute_node_ref.save(context.session)
return compute_node_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def compute_node_update(context, compute_id, values):
"""Updates the ComputeNode record with the most recent data."""
compute_ref = compute_node_get_model(context, compute_id)
# Always update this, even if there's going to be no other
# changes in data. This ensures that we invalidate the
# scheduler cache of compute node data in case of races.
values['updated_at'] = timeutils.utcnow()
convert_objects_related_datetimes(values)
compute_ref.update(values)
return compute_ref
@pick_context_manager_writer
def compute_node_delete(context, compute_id):
"""Delete a ComputeNode record."""
result = model_query(context, models.ComputeNode).\
filter_by(id=compute_id).\
soft_delete(synchronize_session=False)
if not result:
raise exception.ComputeHostNotFound(host=compute_id)
@pick_context_manager_reader
def compute_node_statistics(context):
"""Compute statistics over all compute nodes."""
# TODO(sbauza): Remove the service_id filter in a later release
# once we are sure that all compute nodes report the host field
_filter = or_(models.Service.host == models.ComputeNode.host,
models.Service.id == models.ComputeNode.service_id)
result = model_query(context,
models.ComputeNode, (
func.count(models.ComputeNode.id),
func.sum(models.ComputeNode.vcpus),
func.sum(models.ComputeNode.memory_mb),
func.sum(models.ComputeNode.local_gb),
func.sum(models.ComputeNode.vcpus_used),
func.sum(models.ComputeNode.memory_mb_used),
func.sum(models.ComputeNode.local_gb_used),
func.sum(models.ComputeNode.free_ram_mb),
func.sum(models.ComputeNode.free_disk_gb),
func.sum(models.ComputeNode.current_workload),
func.sum(models.ComputeNode.running_vms),
func.sum(models.ComputeNode.disk_available_least),
), read_deleted="no").\
filter(models.Service.disabled == false()).\
filter(models.Service.binary == "nova-compute").\
filter(_filter).\
first()
# Build a dict of the info--making no assumptions about result
fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
'current_workload', 'running_vms', 'disk_available_least')
return {field: int(result[idx] or 0)
for idx, field in enumerate(fields)}
###################
@main_context_manager.writer
def certificate_create(context, values):
certificate_ref = models.Certificate()
for (key, value) in values.items():
certificate_ref[key] = value
certificate_ref.save(context.session)
return certificate_ref
@main_context_manager.reader
def certificate_get_all_by_project(context, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@main_context_manager.reader
def certificate_get_all_by_user(context, user_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@main_context_manager.reader
def certificate_get_all_by_user_and_project(context, user_id, project_id):
return model_query(context, models.Certificate, read_deleted="no").\
filter_by(user_id=user_id).\
filter_by(project_id=project_id).\
all()
###################
@require_context
@main_context_manager.reader
def floating_ip_get(context, id):
try:
result = model_query(context, models.FloatingIp, project_only=True).\
filter_by(id=id).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFound(id=id)
except db_exc.DBError:
msg = _LW("Invalid floating IP ID %s in request") % id
LOG.warning(msg)
raise exception.InvalidID(id=id)
return result
@require_context
@main_context_manager.reader
def floating_ip_get_pools(context):
pools = []
for result in model_query(context, models.FloatingIp,
(models.FloatingIp.pool,)).distinct():
pools.append({'name': result[0]})
return pools
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
@main_context_manager.writer
def floating_ip_allocate_address(context, project_id, pool,
auto_assigned=False):
nova.context.authorize_project_context(context, project_id)
floating_ip_ref = model_query(context, models.FloatingIp,
read_deleted="no").\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
first()
if not floating_ip_ref:
raise exception.NoMoreFloatingIps()
params = {'project_id': project_id, 'auto_assigned': auto_assigned}
rows_update = model_query(context, models.FloatingIp, read_deleted="no").\
filter_by(id=floating_ip_ref['id']).\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(pool=pool).\
update(params, synchronize_session='evaluate')
if not rows_update:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another one')
raise db_exc.RetryRequest(exception.FloatingIpAllocateFailed())
return floating_ip_ref['address']
@require_context
@main_context_manager.writer
def floating_ip_bulk_create(context, ips, want_result=True):
try:
tab = models.FloatingIp().__table__
context.session.execute(tab.insert(), ips)
except db_exc.DBDuplicateEntry as e:
raise exception.FloatingIpExists(address=e.value)
if want_result:
return model_query(context, models.FloatingIp).filter(
models.FloatingIp.address.in_(
[ip['address'] for ip in ips])).all()
def _ip_range_splitter(ips, block_size=256):
"""Yields blocks of IPs no more than block_size elements long."""
out = []
count = 0
for ip in ips:
out.append(ip['address'])
count += 1
if count > block_size - 1:
yield out
out = []
count = 0
if out:
yield out
@require_context
@main_context_manager.writer
def floating_ip_bulk_destroy(context, ips):
project_id_to_quota_count = collections.defaultdict(int)
for ip_block in _ip_range_splitter(ips):
# Find any floating IPs that were not auto_assigned and
# thus need quota released.
query = model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
filter_by(auto_assigned=False)
for row in query.all():
# The count is negative since we release quota by
# reserving negative quota.
project_id_to_quota_count[row['project_id']] -= 1
# Delete the floating IPs.
model_query(context, models.FloatingIp).\
filter(models.FloatingIp.address.in_(ip_block)).\
soft_delete(synchronize_session='fetch')
# Delete the quotas, if needed.
# Quota update happens in a separate transaction, so previous must have
# been committed first.
for project_id, count in project_id_to_quota_count.items():
try:
reservations = quota.QUOTAS.reserve(context,
project_id=project_id,
floating_ips=count)
quota.QUOTAS.commit(context, reservations, project_id=project_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to update usages bulk "
"deallocating floating IP"))
@require_context
@main_context_manager.writer
def floating_ip_create(context, values):
floating_ip_ref = models.FloatingIp()
floating_ip_ref.update(values)
try:
floating_ip_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return floating_ip_ref
def _floating_ip_count_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why leave auto_assigned floating IPs out?
return model_query(context, models.FloatingIp, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
count()
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def floating_ip_fixed_ip_associate(context, floating_address,
fixed_address, host):
fixed_ip_ref = model_query(context, models.FixedIp).\
filter_by(address=fixed_address).\
options(joinedload('network')).\
first()
if not fixed_ip_ref:
raise exception.FixedIpNotFoundForAddress(address=fixed_address)
rows = model_query(context, models.FloatingIp).\
filter_by(address=floating_address).\
filter(models.FloatingIp.project_id ==
context.project_id).\
filter(or_(models.FloatingIp.fixed_ip_id ==
fixed_ip_ref['id'],
models.FloatingIp.fixed_ip_id.is_(None))).\
update({'fixed_ip_id': fixed_ip_ref['id'], 'host': host})
if not rows:
raise exception.FloatingIpAssociateFailed(address=floating_address)
return fixed_ip_ref
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def floating_ip_deallocate(context, address):
return model_query(context, models.FloatingIp).\
filter_by(address=address).\
filter(and_(models.FloatingIp.project_id != null()),
models.FloatingIp.fixed_ip_id == null()).\
update({'project_id': None,
'host': None,
'auto_assigned': False},
synchronize_session=False)
@require_context
@main_context_manager.writer
def floating_ip_destroy(context, address):
model_query(context, models.FloatingIp).\
filter_by(address=address).\
delete()
@require_context
@main_context_manager.writer
def floating_ip_disassociate(context, address):
floating_ip_ref = model_query(context,
models.FloatingIp).\
filter_by(address=address).\
first()
if not floating_ip_ref:
raise exception.FloatingIpNotFoundForAddress(address=address)
fixed_ip_ref = model_query(context, models.FixedIp).\
filter_by(id=floating_ip_ref['fixed_ip_id']).\
options(joinedload('network')).\
first()
floating_ip_ref.fixed_ip_id = None
floating_ip_ref.host = None
return fixed_ip_ref
def _floating_ip_get_all(context):
return model_query(context, models.FloatingIp, read_deleted="no")
@main_context_manager.reader
def floating_ip_get_all(context):
floating_ip_refs = _floating_ip_get_all(context).\
options(joinedload('fixed_ip')).\
all()
if not floating_ip_refs:
raise exception.NoFloatingIpsDefined()
return floating_ip_refs
@main_context_manager.reader
def floating_ip_get_all_by_host(context, host):
floating_ip_refs = _floating_ip_get_all(context).\
filter_by(host=host).\
options(joinedload('fixed_ip')).\
all()
if not floating_ip_refs:
raise exception.FloatingIpNotFoundForHost(host=host)
return floating_ip_refs
@require_context
@main_context_manager.reader
def floating_ip_get_all_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
# TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
return _floating_ip_get_all(context).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
options(joinedload_all('fixed_ip.instance')).\
all()
@require_context
@main_context_manager.reader
def floating_ip_get_by_address(context, address):
return _floating_ip_get_by_address(context, address)
def _floating_ip_get_by_address(context, address):
# if address string is empty explicitly set it to None
if not address:
address = None
try:
result = model_query(context, models.FloatingIp).\
filter_by(address=address).\
options(joinedload_all('fixed_ip.instance')).\
first()
if not result:
raise exception.FloatingIpNotFoundForAddress(address=address)
except db_exc.DBError:
msg = _("Invalid floating IP %s in request") % address
LOG.warning(msg)
raise exception.InvalidIpAddressError(msg)
# If the floating IP has a project ID set, check to make sure
# the non-admin user has access.
if result.project_id and nova.context.is_user_context(context):
nova.context.authorize_project_context(context, result.project_id)
return result
@require_context
@main_context_manager.reader
def floating_ip_get_by_fixed_address(context, fixed_address):
return model_query(context, models.FloatingIp).\
outerjoin(models.FixedIp,
models.FixedIp.id ==
models.FloatingIp.fixed_ip_id).\
filter(models.FixedIp.address == fixed_address).\
all()
@require_context
@main_context_manager.reader
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
return model_query(context, models.FloatingIp).\
filter_by(fixed_ip_id=fixed_ip_id).\
all()
@require_context
@main_context_manager.writer
def floating_ip_update(context, address, values):
float_ip_ref = _floating_ip_get_by_address(context, address)
float_ip_ref.update(values)
try:
float_ip_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FloatingIpExists(address=values['address'])
return float_ip_ref
###################
@require_context
@main_context_manager.reader
def dnsdomain_get(context, fqdomain):
return model_query(context, models.DNSDomain, read_deleted="no").\
filter_by(domain=fqdomain).\
with_lockmode('update').\
first()
def _dnsdomain_get_or_create(context, fqdomain):
domain_ref = dnsdomain_get(context, fqdomain)
if not domain_ref:
dns_ref = models.DNSDomain()
dns_ref.update({'domain': fqdomain,
'availability_zone': None,
'project_id': None})
return dns_ref
return domain_ref
@main_context_manager.writer
def dnsdomain_register_for_zone(context, fqdomain, zone):
domain_ref = _dnsdomain_get_or_create(context, fqdomain)
domain_ref.scope = 'private'
domain_ref.availability_zone = zone
context.session.add(domain_ref)
@main_context_manager.writer
def dnsdomain_register_for_project(context, fqdomain, project):
domain_ref = _dnsdomain_get_or_create(context, fqdomain)
domain_ref.scope = 'public'
domain_ref.project_id = project
context.session.add(domain_ref)
@main_context_manager.writer
def dnsdomain_unregister(context, fqdomain):
model_query(context, models.DNSDomain).\
filter_by(domain=fqdomain).\
delete()
@main_context_manager.reader
def dnsdomain_get_all(context):
return model_query(context, models.DNSDomain, read_deleted="no").all()
###################
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
@main_context_manager.writer
def fixed_ip_associate(context, address, instance_uuid, network_id=None,
reserved=False, virtual_interface_id=None):
"""Keyword arguments:
reserved -- should be a boolean value(True or False), exact value will be
used to filter on the fixed IP address
"""
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
first()
if fixed_ip_ref is None:
raise exception.FixedIpNotFoundForNetwork(address=address,
network_uuid=network_id)
if fixed_ip_ref.instance_uuid:
raise exception.FixedIpAlreadyInUse(address=address,
instance_uuid=instance_uuid)
params = {'instance_uuid': instance_uuid,
'allocated': virtual_interface_id is not None}
if not fixed_ip_ref.network_id:
params['network_id'] = network_id
if virtual_interface_id:
params['virtual_interface_id'] = virtual_interface_id
rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(id=fixed_ip_ref.id).\
filter(network_or_none).\
filter_by(reserved=reserved).\
filter_by(address=address).\
update(params, synchronize_session='evaluate')
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise db_exc.RetryRequest(
exception.FixedIpAssociateFailed(net=network_id))
return fixed_ip_ref
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
@main_context_manager.writer
def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
host=None, virtual_interface_id=None):
"""allocate a fixed ip out of a fixed ip network pool.
This allocates an unallocated fixed ip out of a specified
network. We sort by updated_at to hand out the oldest address in
the list.
"""
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
network_or_none = or_(models.FixedIp.network_id == network_id,
models.FixedIp.network_id == null())
fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
filter(network_or_none).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
filter_by(leased=False).\
order_by(asc(models.FixedIp.updated_at)).\
first()
if not fixed_ip_ref:
raise exception.NoMoreFixedIps(net=network_id)
params = {'allocated': virtual_interface_id is not None}
if fixed_ip_ref['network_id'] is None:
params['network_id'] = network_id
if instance_uuid:
params['instance_uuid'] = instance_uuid
if host:
params['host'] = host
if virtual_interface_id:
params['virtual_interface_id'] = virtual_interface_id
rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(id=fixed_ip_ref['id']).\
filter_by(network_id=fixed_ip_ref['network_id']).\
filter_by(reserved=False).\
filter_by(instance_uuid=None).\
filter_by(host=None).\
filter_by(leased=False).\
filter_by(address=fixed_ip_ref['address']).\
update(params, synchronize_session='evaluate')
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise db_exc.RetryRequest(
exception.FixedIpAssociateFailed(net=network_id))
return fixed_ip_ref
@require_context
@main_context_manager.writer
def fixed_ip_create(context, values):
fixed_ip_ref = models.FixedIp()
fixed_ip_ref.update(values)
try:
fixed_ip_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FixedIpExists(address=values['address'])
return fixed_ip_ref
@require_context
@main_context_manager.writer
def fixed_ip_bulk_create(context, ips):
try:
tab = models.FixedIp.__table__
context.session.execute(tab.insert(), ips)
except db_exc.DBDuplicateEntry as e:
raise exception.FixedIpExists(address=e.value)
@require_context
@main_context_manager.writer
def fixed_ip_disassociate(context, address):
_fixed_ip_get_by_address(context, address).update(
{'instance_uuid': None,
'virtual_interface_id': None})
@main_context_manager.writer
def fixed_ip_disassociate_all_by_timeout(context, host, time):
# NOTE(vish): only update fixed ips that "belong" to this
# host; i.e. the network host or the instance
# host matches. Two queries necessary because
# join with update doesn't work.
host_filter = or_(and_(models.Instance.host == host,
models.Network.multi_host == true()),
models.Network.host == host)
result = model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no").\
filter(models.FixedIp.allocated == false()).\
filter(models.FixedIp.updated_at < time).\
join((models.Network,
models.Network.id == models.FixedIp.network_id)).\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(host_filter).\
all()
fixed_ip_ids = [fip[0] for fip in result]
if not fixed_ip_ids:
return 0
result = model_query(context, models.FixedIp).\
filter(models.FixedIp.id.in_(fixed_ip_ids)).\
update({'instance_uuid': None,
'leased': False,
'updated_at': timeutils.utcnow()},
synchronize_session='fetch')
return result
@require_context
@main_context_manager.reader
def fixed_ip_get(context, id, get_network=False):
query = model_query(context, models.FixedIp).filter_by(id=id)
if get_network:
query = query.options(joinedload('network'))
result = query.first()
if not result:
raise exception.FixedIpNotFound(id=id)
# FIXME(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context, instance.project_id)
return result
@main_context_manager.reader
def fixed_ip_get_all(context):
result = model_query(context, models.FixedIp, read_deleted="yes").all()
if not result:
raise exception.NoFixedIpsDefined()
return result
@require_context
@main_context_manager.reader
def fixed_ip_get_by_address(context, address, columns_to_join=None):
return _fixed_ip_get_by_address(context, address,
columns_to_join=columns_to_join)
def _fixed_ip_get_by_address(context, address, columns_to_join=None):
if columns_to_join is None:
columns_to_join = []
try:
result = model_query(context, models.FixedIp)
for column in columns_to_join:
result = result.options(joinedload_all(column))
result = result.filter_by(address=address).first()
if not result:
raise exception.FixedIpNotFoundForAddress(address=address)
except db_exc.DBError:
msg = _("Invalid fixed IP Address %s in request") % address
LOG.warning(msg)
raise exception.FixedIpInvalid(msg)
# NOTE(sirp): shouldn't we just use project_only here to restrict the
# results?
if (nova.context.is_user_context(context) and
result['instance_uuid'] is not None):
instance = _instance_get_by_uuid(
context.elevated(read_deleted='yes'),
result['instance_uuid'])
nova.context.authorize_project_context(context,
instance.project_id)
return result
@require_context
@main_context_manager.reader
def fixed_ip_get_by_floating_address(context, floating_address):
return model_query(context, models.FixedIp).\
join(models.FloatingIp,
models.FloatingIp.fixed_ip_id ==
models.FixedIp.id).\
filter(models.FloatingIp.address == floating_address).\
first()
# NOTE(tr3buchet) please don't invent an exception here, None is fine
@require_context
@main_context_manager.reader
def fixed_ip_get_by_instance(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
outerjoin(models.VirtualInterface, vif_and).\
options(contains_eager("virtual_interface")).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
order_by(asc(models.VirtualInterface.created_at),
asc(models.VirtualInterface.id)).\
all()
if not result:
raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
return result
@main_context_manager.reader
def fixed_ip_get_by_host(context, host):
instance_uuids = _instance_get_all_uuids_by_host(context, host)
if not instance_uuids:
return []
return model_query(context, models.FixedIp).\
filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
all()
@require_context
@main_context_manager.reader
def fixed_ip_get_by_network_host(context, network_id, host):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(host=host).\
first()
if not result:
raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
host=host)
return result
@require_context
@main_context_manager.reader
def fixed_ips_by_virtual_interface(context, vif_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(virtual_interface_id=vif_id).\
options(joinedload('network')).\
options(joinedload('floating_ips')).\
all()
return result
@require_context
@main_context_manager.writer
def fixed_ip_update(context, address, values):
_fixed_ip_get_by_address(context, address).update(values)
def _fixed_ip_count_by_project(context, project_id):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.FixedIp, (models.FixedIp.id,),
read_deleted="no").\
join((models.Instance,
models.Instance.uuid == models.FixedIp.instance_uuid)).\
filter(models.Instance.project_id == project_id).\
count()
###################
@require_context
@pick_context_manager_writer
def virtual_interface_create(context, values):
"""Create a new virtual interface record in the database.
:param values: = dict containing column values
"""
try:
vif_ref = models.VirtualInterface()
vif_ref.update(values)
vif_ref.save(context.session)
except db_exc.DBError:
raise exception.VirtualInterfaceCreateException()
return vif_ref
def _virtual_interface_query(context):
return model_query(context, models.VirtualInterface, read_deleted="no")
@require_context
@pick_context_manager_reader
def virtual_interface_get(context, vif_id):
"""Gets a virtual interface from the table.
:param vif_id: = id of the virtual interface
"""
vif_ref = _virtual_interface_query(context).\
filter_by(id=vif_id).\
first()
return vif_ref
@require_context
@pick_context_manager_reader
def virtual_interface_get_by_address(context, address):
"""Gets a virtual interface from the table.
:param address: = the address of the interface you're looking to get
"""
try:
vif_ref = _virtual_interface_query(context).\
filter_by(address=address).\
first()
except db_exc.DBError:
msg = _("Invalid virtual interface address %s in request") % address
LOG.warning(msg)
raise exception.InvalidIpAddressError(msg)
return vif_ref
@require_context
@pick_context_manager_reader
def virtual_interface_get_by_uuid(context, vif_uuid):
"""Gets a virtual interface from the table.
:param vif_uuid: the uuid of the interface you're looking to get
"""
vif_ref = _virtual_interface_query(context).\
filter_by(uuid=vif_uuid).\
first()
return vif_ref
@require_context
@require_instance_exists_using_uuid
@pick_context_manager_reader_allow_async
def virtual_interface_get_by_instance(context, instance_uuid):
"""Gets all virtual interfaces for instance.
:param instance_uuid: = uuid of the instance to retrieve vifs for
"""
vif_refs = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
order_by(asc("created_at"), asc("id")).\
all()
return vif_refs
@require_context
@pick_context_manager_reader
def virtual_interface_get_by_instance_and_network(context, instance_uuid,
network_id):
"""Gets virtual interface for instance that's associated with network."""
vif_ref = _virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(network_id=network_id).\
first()
return vif_ref
@require_context
@pick_context_manager_writer
def virtual_interface_delete_by_instance(context, instance_uuid):
"""Delete virtual interface records that are associated
with the instance given by instance_id.
:param instance_uuid: = uuid of instance
"""
_virtual_interface_query(context).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
@require_context
@pick_context_manager_reader
def virtual_interface_get_all(context):
"""Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.items():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _validate_unique_server_name(context, name):
if not CONF.osapi_compute_unique_server_name_scope:
return
lowername = name.lower()
base_query = model_query(context, models.Instance, read_deleted='no').\
filter(func.lower(models.Instance.hostname) == lowername)
if CONF.osapi_compute_unique_server_name_scope == 'project':
instance_with_same_name = base_query.\
filter_by(project_id=context.project_id).\
count()
elif CONF.osapi_compute_unique_server_name_scope == 'global':
instance_with_same_name = base_query.count()
else:
msg = _('Unknown osapi_compute_unique_server_name_scope value: %s'
' Flag must be empty, "global" or'
' "project"') % CONF.osapi_compute_unique_server_name_scope
LOG.warning(msg)
return
if instance_with_same_name > 0:
raise exception.InstanceExists(name=lowername)
def _handle_objects_related_type_conversions(values):
"""Make sure that certain things in values (which may have come from
an objects.instance.Instance object) are in suitable form for the
database.
"""
# NOTE(danms): Make sure IP addresses are passed as strings to
# the database engine
for key in ('access_ip_v4', 'access_ip_v6'):
if key in values and values[key] is not None:
values[key] = str(values[key])
datetime_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at')
convert_objects_related_datetimes(values, *datetime_keys)
def _check_instance_exists_in_project(context, instance_uuid):
if not model_query(context, models.Instance, read_deleted="no",
project_only=True).filter_by(
uuid=instance_uuid).first():
raise exception.InstanceNotFound(instance_id=instance_uuid)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_create(context, values):
"""Create a new Instance record in the database.
context - request context object
values - dict containing column values.
"""
security_group_ensure_default(context)
values = values.copy()
values['metadata'] = _metadata_refs(
values.get('metadata'), models.InstanceMetadata)
values['system_metadata'] = _metadata_refs(
values.get('system_metadata'), models.InstanceSystemMetadata)
_handle_objects_related_type_conversions(values)
instance_ref = models.Instance()
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
instance_ref['info_cache'] = models.InstanceInfoCache()
info_cache = values.pop('info_cache', None)
if info_cache is not None:
instance_ref['info_cache'].update(info_cache)
security_groups = values.pop('security_groups', [])
instance_ref['extra'] = models.InstanceExtra()
instance_ref['extra'].update(
{'numa_topology': None,
'pci_requests': None,
'vcpu_model': None,
})
instance_ref['extra'].update(values.pop('extra', {}))
instance_ref.update(values)
def _get_sec_group_models(security_groups):
models = []
default_group = _security_group_ensure_default(context)
if 'default' in security_groups:
models.append(default_group)
# Generate a new list, so we don't modify the original
security_groups = [x for x in security_groups if x != 'default']
if security_groups:
models.extend(_security_group_get_by_names(context,
context.project_id, security_groups))
return models
if 'hostname' in values:
_validate_unique_server_name(context, values['hostname'])
instance_ref.security_groups = _get_sec_group_models(security_groups)
context.session.add(instance_ref)
# create the instance uuid to ec2_id mapping entry for instance
ec2_instance_create(context, instance_ref['uuid'])
return instance_ref
def _instance_data_get_for_user(context, project_id, user_id):
result = model_query(context, models.Instance, (
func.count(models.Instance.id),
func.sum(models.Instance.vcpus),
func.sum(models.Instance.memory_mb))).\
filter_by(project_id=project_id)
if user_id:
result = result.filter_by(user_id=user_id).first()
else:
result = result.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0, result[2] or 0)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_destroy(context, instance_uuid, constraint=None):
if uuidutils.is_uuid_like(instance_uuid):
instance_ref = _instance_get_by_uuid(context, instance_uuid)
else:
raise exception.InvalidUUID(instance_uuid)
query = model_query(context, models.Instance).\
filter_by(uuid=instance_uuid)
if constraint is not None:
query = constraint.apply(models.Instance, query)
count = query.soft_delete()
if count == 0:
raise exception.ConstraintNotMet()
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceMetadata).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceFault).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceSystemMetadata).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
model_query(context, models.InstanceGroupMember).\
filter_by(instance_id=instance_uuid).\
soft_delete()
model_query(context, models.BlockDeviceMapping).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
# NOTE(snikitin): We can't use model_query here, because there is no
# column 'deleted' in 'tags' table.
context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).delete()
return instance_ref
@require_context
@pick_context_manager_reader_allow_async
def instance_get_by_uuid(context, uuid, columns_to_join=None):
return _instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join)
def _instance_get_by_uuid(context, uuid, columns_to_join=None):
result = _build_instance_get(context, columns_to_join=columns_to_join).\
filter_by(uuid=uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=uuid)
return result
@require_context
@pick_context_manager_reader
def instance_get(context, instance_id, columns_to_join=None):
try:
result = _build_instance_get(context, columns_to_join=columns_to_join
).filter_by(id=instance_id).first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
except db_exc.DBError:
# NOTE(sdague): catch all in case the db engine chokes on the
# id because it's too long of an int to store.
msg = _("Invalid instance id %s in request") % instance_id
LOG.warning(msg)
raise exception.InvalidID(id=instance_id)
def _build_instance_get(context, columns_to_join=None):
query = model_query(context, models.Instance, project_only=True).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('info_cache'))
if columns_to_join is None:
columns_to_join = ['metadata', 'system_metadata']
for column in columns_to_join:
if column in ['info_cache', 'security_groups']:
# Already always joined above
continue
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
# NOTE(alaski) Stop lazy loading of columns not needed.
for col in ['metadata', 'system_metadata']:
if col not in columns_to_join:
query = query.options(noload(col))
return query
def _instances_fill_metadata(context, instances, manual_joins=None):
"""Selectively fill instances with manually-joined metadata. Note that
instance will be converted to a dict.
:param context: security context
:param instances: list of instances to fill
:param manual_joins: list of tables to manually join (can be any
combination of 'metadata' and 'system_metadata' or
None to take the default of both)
"""
uuids = [inst['uuid'] for inst in instances]
if manual_joins is None:
manual_joins = ['metadata', 'system_metadata']
meta = collections.defaultdict(list)
if 'metadata' in manual_joins:
for row in _instance_metadata_get_multi(context, uuids):
meta[row['instance_uuid']].append(row)
sys_meta = collections.defaultdict(list)
if 'system_metadata' in manual_joins:
for row in _instance_system_metadata_get_multi(context, uuids):
sys_meta[row['instance_uuid']].append(row)
pcidevs = collections.defaultdict(list)
if 'pci_devices' in manual_joins:
for row in _instance_pcidevs_get_multi(context, uuids):
pcidevs[row['instance_uuid']].append(row)
filled_instances = []
for inst in instances:
inst = dict(inst)
inst['system_metadata'] = sys_meta[inst['uuid']]
inst['metadata'] = meta[inst['uuid']]
if 'pci_devices' in manual_joins:
inst['pci_devices'] = pcidevs[inst['uuid']]
filled_instances.append(inst)
return filled_instances
def _manual_join_columns(columns_to_join):
"""Separate manually joined columns from columns_to_join
If columns_to_join contains 'metadata', 'system_metadata', or
'pci_devices' those columns are removed from columns_to_join and added
to a manual_joins list to be used with the _instances_fill_metadata method.
The columns_to_join formal parameter is copied and not modified, the return
tuple has the modified columns_to_join list to be used with joinedload in
a model query.
:param:columns_to_join: List of columns to join in a model query.
:return: tuple of (manual_joins, columns_to_join)
"""
manual_joins = []
columns_to_join_new = copy.copy(columns_to_join)
for column in ('metadata', 'system_metadata', 'pci_devices'):
if column in columns_to_join_new:
columns_to_join_new.remove(column)
manual_joins.append(column)
return manual_joins, columns_to_join_new
@require_context
@pick_context_manager_reader
def instance_get_all(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query = model_query(context, models.Instance)
for column in columns_to_join_new:
query = query.options(joinedload(column))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
query = query.filter_by(project_id=context.project_id)
else:
query = query.filter_by(user_id=context.user_id)
instances = query.all()
return _instances_fill_metadata(context, instances, manual_joins)
@require_context
@pick_context_manager_reader_allow_async
def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
limit=None, marker=None, columns_to_join=None):
"""Return instances matching all filters sorted by the primary key.
See instance_get_all_by_filters_sort for more information.
"""
# Invoke the API with the multiple sort keys and directions using the
# single sort key/direction
return instance_get_all_by_filters_sort(context, filters, limit=limit,
marker=marker,
columns_to_join=columns_to_join,
sort_keys=[sort_key],
sort_dirs=[sort_dir])
@require_context
@pick_context_manager_reader_allow_async
def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
columns_to_join=None, sort_keys=None,
sort_dirs=None):
"""Return instances that match all filters sorted by the given keys.
Deleted instances will be returned by default, unless there's a filter that
says otherwise.
Depending on the name of a filter, matching for that filter is
performed using either exact matching or as regular expression
matching. Exact matching is applied for the following filters::
| ['project_id', 'user_id', 'image_ref',
| 'vm_state', 'instance_type_id', 'uuid',
| 'metadata', 'host', 'system_metadata']
A third type of filter (also using exact matching), filters
based on instance metadata tags when supplied under a special
key named 'filter'::
| filters = {
| 'filter': [
| {'name': 'tag-key', 'value': '<metakey>'},
| {'name': 'tag-value', 'value': '<metaval>'},
| {'name': 'tag:<metakey>', 'value': '<metaval>'}
| ]
| }
Special keys are used to tweek the query further::
| 'changes-since' - only return instances updated after
| 'deleted' - only return (or exclude) deleted instances
| 'soft_deleted' - modify behavior of 'deleted' to either
| include or exclude instances whose
| vm_state is SOFT_DELETED.
A fourth type of filter (also using exact matching), filters
based on instance tags (not metadata tags). There are two types
of these tags:
`tags` -- One or more strings that will be used to filter results
in an AND expression.
`tags-any` -- One or more strings that will be used to filter results in
an OR expression.
Tags should be represented as list::
| filters = {
| 'tags': [some-tag, some-another-tag],
| 'tags-any: [some-any-tag, some-another-any-tag]
| }
"""
# NOTE(mriedem): If the limit is 0 there is no point in even going
# to the database since nothing is going to be returned anyway.
if limit == 0:
return []
sort_keys, sort_dirs = process_sort_params(sort_keys,
sort_dirs,
default_dir='desc')
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
query_prefix = context.session.query(models.Instance)
for column in columns_to_join_new:
if 'extra.' in column:
query_prefix = query_prefix.options(undefer(column))
else:
query_prefix = query_prefix.options(joinedload(column))
# Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
# no need to do it here as well
# Make a copy of the filters dictionary to use going forward, as we'll
# be modifying it and we shouldn't affect the caller's use of it.
filters = filters.copy()
if 'changes-since' in filters:
changes_since = timeutils.normalize_time(filters['changes-since'])
query_prefix = query_prefix.\
filter(models.Instance.updated_at >= changes_since)
if 'deleted' in filters:
# Instances can be soft or hard deleted and the query needs to
# include or exclude both
deleted = filters.pop('deleted')
if deleted:
if filters.pop('soft_deleted', True):
delete = or_(
models.Instance.deleted == models.Instance.id,
models.Instance.vm_state == vm_states.SOFT_DELETED
)
query_prefix = query_prefix.\
filter(delete)
else:
query_prefix = query_prefix.\
filter(models.Instance.deleted == models.Instance.id)
else:
query_prefix = query_prefix.\
filter_by(deleted=0)
if not filters.pop('soft_deleted', False):
# It would be better to have vm_state not be nullable
# but until then we test it explicitly as a workaround.
not_soft_deleted = or_(
models.Instance.vm_state != vm_states.SOFT_DELETED,
models.Instance.vm_state == null()
)
query_prefix = query_prefix.filter(not_soft_deleted)
if 'cleaned' in filters:
if filters.pop('cleaned'):
query_prefix = query_prefix.filter(models.Instance.cleaned == 1)
else:
query_prefix = query_prefix.filter(models.Instance.cleaned == 0)
if 'tags' in filters:
tags = filters.pop('tags')
# We build a JOIN ladder expression for each tag, JOIN'ing
# the first tag to the instances table, and each subsequent
# tag to the last JOIN'd tags table
first_tag = tags.pop(0)
query_prefix = query_prefix.join(models.Instance.tags)
query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
for tag in tags:
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias,
models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag == tag)
if 'tags-any' in filters:
tags = filters.pop('tags-any')
tag_alias = aliased(models.Tag)
query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
if not context.is_admin:
# If we're not admin context, add appropriate filter..
if context.project_id:
filters['project_id'] = context.project_id
else:
filters['user_id'] = context.user_id
# Filters for exact matches that we can do along with the SQL query...
# For other filters that don't match this, we will do regexp matching
exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
'vm_state', 'instance_type_id', 'uuid',
'metadata', 'host', 'task_state',
'system_metadata']
# Filter the query
query_prefix = _exact_instance_filter(query_prefix,
filters, exact_match_filter_names)
if query_prefix is None:
return []
query_prefix = _regex_instance_filter(query_prefix, filters)
query_prefix = _tag_instance_filter(context, query_prefix, filters)
# paginate query
if marker is not None:
try:
marker = _instance_get_by_uuid(
context.elevated(read_deleted='yes'), marker)
except exception.InstanceNotFound:
raise exception.MarkerNotFound(marker)
try:
query_prefix = sqlalchemyutils.paginate_query(query_prefix,
models.Instance, limit,
sort_keys,
marker=marker,
sort_dirs=sort_dirs)
except db_exc.InvalidSortKey:
raise exception.InvalidSortKey()
return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
def _tag_instance_filter(context, query, filters):
"""Applies tag filtering to an Instance query.
Returns the updated query. This method alters filters to remove
keys that are tags. This filters on resources by tags - this
method assumes that the caller will take care of access control
:param context: request context object
:param query: query to apply filters to
:param filters: dictionary of filters
"""
if filters.get('filter') is None:
return query
model = models.Instance
model_metadata = models.InstanceMetadata
model_uuid = model_metadata.instance_uuid
or_query = None
def _to_list(val):
if isinstance(val, dict):
val = val.values()
if not isinstance(val, (tuple, list, set)):
val = (val,)
return val
for filter_block in filters['filter']:
if not isinstance(filter_block, dict):
continue
filter_name = filter_block.get('name')
if filter_name is None:
continue
tag_name = filter_name[4:]
tag_val = _to_list(filter_block.get('value'))
if filter_name.startswith('tag-'):
if tag_name not in ['key', 'value']:
msg = _("Invalid field name: %s") % tag_name
raise exception.InvalidParameterValue(err=msg)
subq = getattr(model_metadata, tag_name).in_(tag_val)
or_query = subq if or_query is None else or_(or_query, subq)
elif filter_name.startswith('tag:'):
subq = model_query(context, model_metadata, (model_uuid,)).\
filter_by(key=tag_name).\
filter(model_metadata.value.in_(tag_val))
query = query.filter(model.uuid.in_(subq))
if or_query is not None:
subq = model_query(context, model_metadata, (model_uuid,)).\
filter(or_query)
query = query.filter(model.uuid.in_(subq))
return query
def _get_regexp_op_for_connection(db_connection):
db_string = db_connection.split(':')[0].split('+')[0]
regexp_op_map = {
'postgresql': '~',
'mysql': 'REGEXP',
'sqlite': 'REGEXP'
}
return regexp_op_map.get(db_string, 'LIKE')
def _regex_instance_filter(query, filters):
"""Applies regular expression filtering to an Instance query.
Returns the updated query.
:param query: query to apply filters to
:param filters: dictionary of filters with regex values
"""
model = models.Instance
db_regexp_op = _get_regexp_op_for_connection(CONF.database.connection)
for filter_name in filters:
try:
column_attr = getattr(model, filter_name)
except AttributeError:
continue
if 'property' == type(column_attr).__name__:
continue
filter_val = filters[filter_name]
# Sometimes the REGEX filter value is not a string
if not isinstance(filter_val, six.string_types):
filter_val = str(filter_val)
if db_regexp_op == 'LIKE':
query = query.filter(column_attr.op(db_regexp_op)(
u'%' + filter_val + u'%'))
else:
query = query.filter(column_attr.op(db_regexp_op)(
filter_val))
return query
def _exact_instance_filter(query, filters, legal_keys):
"""Applies exact match filtering to an Instance query.
Returns the updated query. Modifies filters argument to remove
filters consumed.
:param query: query to apply filters to
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:param legal_keys: list of keys to apply exact filtering to
"""
filter_dict = {}
model = models.Instance
# Walk through all the keys
for key in legal_keys:
# Skip ones we're not filtering on
if key not in filters:
continue
# OK, filtering on this key; what value do we search for?
value = filters.pop(key)
if key in ('metadata', 'system_metadata'):
column_attr = getattr(model, key)
if isinstance(value, list):
for item in value:
for k, v in item.items():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
else:
for k, v in value.items():
query = query.filter(column_attr.any(key=k))
query = query.filter(column_attr.any(value=v))
elif isinstance(value, (list, tuple, set, frozenset)):
if not value:
return None # empty IN-predicate; short circuit
# Looking for values in a list; apply to query directly
column_attr = getattr(model, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter(*[getattr(models.Instance, k) == v
for k, v in filter_dict.items()])
return query
def process_sort_params(sort_keys, sort_dirs,
default_keys=['created_at', 'id'],
default_dir='asc'):
"""Process the sort parameters to include default keys.
Creates a list of sort keys and a list of sort directions. Adds the default
keys to the end of the list if they are not already included.
When adding the default keys to the sort keys list, the associated
direction is:
1) The first element in the 'sort_dirs' list (if specified), else
2) 'default_dir' value (Note that 'asc' is the default value since this is
the default in sqlalchemy.utils.paginate_query)
:param sort_keys: List of sort keys to include in the processed list
:param sort_dirs: List of sort directions to include in the processed list
:param default_keys: List of sort keys that need to be included in the
processed list, they are added at the end of the list
if not already specified.
:param default_dir: Sort direction associated with each of the default
keys that are not supplied, used when they are added
to the processed list
:returns: list of sort keys, list of sort directions
:raise exception.InvalidInput: If more sort directions than sort keys
are specified or if an invalid sort
direction is specified
"""
# Determine direction to use for when adding default keys
if sort_dirs and len(sort_dirs) != 0:
default_dir_value = sort_dirs[0]
else:
default_dir_value = default_dir
# Create list of keys (do not modify the input list)
if sort_keys:
result_keys = list(sort_keys)
else:
result_keys = []
# If a list of directions is not provided, use the default sort direction
# for all provided keys
if sort_dirs:
result_dirs = []
# Verify sort direction
for sort_dir in sort_dirs:
if sort_dir not in ('asc', 'desc'):
msg = _("Unknown sort direction, must be 'desc' or 'asc'")
raise exception.InvalidInput(reason=msg)
result_dirs.append(sort_dir)
else:
result_dirs = [default_dir_value for _sort_key in result_keys]
# Ensure that the key and direction length match
while len(result_dirs) < len(result_keys):
result_dirs.append(default_dir_value)
# Unless more direction are specified, which is an error
if len(result_dirs) > len(result_keys):
msg = _("Sort direction size exceeds sort key size")
raise exception.InvalidInput(reason=msg)
# Ensure defaults are included
for key in default_keys:
if key not in result_keys:
result_keys.append(key)
result_dirs.append(default_dir_value)
return result_keys, result_dirs
@require_context
@pick_context_manager_reader_allow_async
def instance_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None,
columns_to_join=None):
"""Return instances and joins that were active during window."""
query = context.session.query(models.Instance)
if columns_to_join is None:
columns_to_join_new = ['info_cache', 'security_groups']
manual_joins = ['metadata', 'system_metadata']
else:
manual_joins, columns_to_join_new = (
_manual_join_columns(columns_to_join))
for column in columns_to_join_new:
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
query = query.filter(or_(models.Instance.terminated_at == null(),
models.Instance.terminated_at > begin))
if end:
query = query.filter(models.Instance.launched_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
if host:
query = query.filter_by(host=host)
return _instances_fill_metadata(context, query.all(), manual_joins)
def _instance_get_all_query(context, project_only=False, joins=None):
if joins is None:
joins = ['info_cache', 'security_groups']
query = model_query(context,
models.Instance,
project_only=project_only)
for column in joins:
if 'extra.' in column:
query = query.options(undefer(column))
else:
query = query.options(joinedload(column))
return query
@pick_context_manager_reader_allow_async
def instance_get_all_by_host(context, host, columns_to_join=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).all(),
manual_joins=columns_to_join)
def _instance_get_all_uuids_by_host(context, host):
"""Return a list of the instance uuids on a given host.
Returns a list of UUIDs, not Instance model objects.
"""
uuids = []
for tuple in model_query(context, models.Instance, (models.Instance.uuid,),
read_deleted="no").\
filter_by(host=host).\
all():
uuids.append(tuple[0])
return uuids
@pick_context_manager_reader
def instance_get_all_by_host_and_node(context, host, node,
columns_to_join=None):
if columns_to_join is None:
manual_joins = []
else:
candidates = ['system_metadata', 'metadata']
manual_joins = [x for x in columns_to_join if x in candidates]
columns_to_join = list(set(columns_to_join) - set(candidates))
return _instances_fill_metadata(context,
_instance_get_all_query(
context,
joins=columns_to_join).filter_by(host=host).
filter_by(node=node).all(), manual_joins=manual_joins)
@pick_context_manager_reader
def instance_get_all_by_host_and_not_type(context, host, type_id=None):
return _instances_fill_metadata(context,
_instance_get_all_query(context).filter_by(host=host).
filter(models.Instance.instance_type_id != type_id).all())
@pick_context_manager_reader
def instance_get_all_by_grantee_security_groups(context, group_ids):
if not group_ids:
return []
return _instances_fill_metadata(context,
_instance_get_all_query(context).
join(models.Instance.security_groups).
filter(models.SecurityGroup.rules.any(
models.SecurityGroupIngressRule.group_id.in_(group_ids))).
all())
@require_context
@main_context_manager.reader
def instance_floating_address_get_all(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
floating_ips = model_query(context,
models.FloatingIp,
(models.FloatingIp.address,)).\
join(models.FloatingIp.fixed_ip).\
filter_by(instance_uuid=instance_uuid)
return [floating_ip.address for floating_ip in floating_ips]
# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
@pick_context_manager_reader
def instance_get_all_hung_in_rebooting(context, reboot_window):
reboot_window = (timeutils.utcnow() -
datetime.timedelta(seconds=reboot_window))
# NOTE(danms): this is only used in the _poll_rebooting_instances()
# call in compute/manager, so we can avoid the metadata lookups
# explicitly
return _instances_fill_metadata(context,
model_query(context, models.Instance).
filter(models.Instance.updated_at <= reboot_window).
filter_by(task_state=task_states.REBOOTING).all(),
manual_joins=[])
def _retry_instance_update():
"""Wrap with oslo_db_api.wrap_db_retry, and also retry on
UnknownInstanceUpdateConflict.
"""
exception_checker = \
lambda exc: isinstance(exc, (exception.UnknownInstanceUpdateConflict,))
return oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
exception_checker=exception_checker)
@require_context
@_retry_instance_update()
@pick_context_manager_writer
def instance_update(context, instance_uuid, values, expected=None):
return _instance_update(context, instance_uuid, values, expected)
@require_context
@_retry_instance_update()
@pick_context_manager_writer
def instance_update_and_get_original(context, instance_uuid, values,
columns_to_join=None, expected=None):
"""Set the given properties on an instance and update it. Return
a shallow copy of the original instance reference, as well as the
updated one.
:param context: = request context object
:param instance_uuid: = instance uuid
:param values: = dict containing column values
If "expected_task_state" exists in values, the update can only happen
when the task state before update matches expected_task_state. Otherwise
a UnexpectedTaskStateError is thrown.
:returns: a tuple of the form (old_instance_ref, new_instance_ref)
Raises NotFound if instance does not exist.
"""
instance_ref = _instance_get_by_uuid(context, instance_uuid,
columns_to_join=columns_to_join)
return (copy.copy(instance_ref), _instance_update(
context, instance_uuid, values, expected, original=instance_ref))
# NOTE(danms): This updates the instance's metadata list in-place and in
# the database to avoid stale data and refresh issues. It assumes the
# delete=True behavior of instance_metadata_update(...)
def _instance_metadata_update_in_place(context, instance, metadata_type, model,
metadata):
metadata = dict(metadata)
to_delete = []
for keyvalue in instance[metadata_type]:
key = keyvalue['key']
if key in metadata:
keyvalue['value'] = metadata.pop(key)
elif key not in metadata:
to_delete.append(keyvalue)
# NOTE: we have to hard_delete here otherwise we will get more than one
# system_metadata record when we read deleted for an instance;
# regular metadata doesn't have the same problem because we don't
# allow reading deleted regular metadata anywhere.
if metadata_type == 'system_metadata':
for condemned in to_delete:
context.session.delete(condemned)
instance[metadata_type].remove(condemned)
else:
for condemned in to_delete:
condemned.soft_delete(context.session)
for key, value in metadata.items():
newitem = model()
newitem.update({'key': key, 'value': value,
'instance_uuid': instance['uuid']})
context.session.add(newitem)
instance[metadata_type].append(newitem)
def _instance_update(context, instance_uuid, values, expected, original=None):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(instance_uuid)
if expected is None:
expected = {}
else:
# Coerce all single values to singleton lists
expected = {k: [None] if v is None else sqlalchemyutils.to_list(v)
for (k, v) in six.iteritems(expected)}
# Extract 'expected_' values from values dict, as these aren't actually
# updates
for field in ('task_state', 'vm_state'):
expected_field = 'expected_%s' % field
if expected_field in values:
value = values.pop(expected_field, None)
# Coerce all single values to singleton lists
if value is None:
expected[field] = [None]
else:
expected[field] = sqlalchemyutils.to_list(value)
# Values which need to be updated separately
metadata = values.pop('metadata', None)
system_metadata = values.pop('system_metadata', None)
_handle_objects_related_type_conversions(values)
# Hostname is potentially unique, but this is enforced in code rather
# than the DB. The query below races, but the number of users of
# osapi_compute_unique_server_name_scope is small, and a robust fix
# will be complex. This is intentionally left as is for the moment.
if 'hostname' in values:
_validate_unique_server_name(context, values['hostname'])
compare = models.Instance(uuid=instance_uuid, **expected)
try:
instance_ref = model_query(context, models.Instance,
project_only=True).\
update_on_match(compare, 'uuid', values)
except update_match.NoRowsMatched:
# Update failed. Try to find why and raise a specific error.
# We should get here only because our expected values were not current
# when update_on_match executed. Having failed, we now have a hint that
# the values are out of date and should check them.
# This code is made more complex because we are using repeatable reads.
# If we have previously read the original instance in the current
# transaction, reading it again will return the same data, even though
# the above update failed because it has changed: it is not possible to
# determine what has changed in this transaction. In this case we raise
# UnknownInstanceUpdateConflict, which will cause the operation to be
# retried in a new transaction.
# Because of the above, if we have previously read the instance in the
# current transaction it will have been passed as 'original', and there
# is no point refreshing it. If we have not previously read the
# instance, we can fetch it here and we will get fresh data.
if original is None:
original = _instance_get_by_uuid(context, instance_uuid)
conflicts_expected = {}
conflicts_actual = {}
for (field, expected_values) in six.iteritems(expected):
actual = original[field]
if actual not in expected_values:
conflicts_expected[field] = expected_values
conflicts_actual[field] = actual
# Exception properties
exc_props = {
'instance_uuid': instance_uuid,
'expected': conflicts_expected,
'actual': conflicts_actual
}
# There was a conflict, but something (probably the MySQL read view,
# but possibly an exceptionally unlikely second race) is preventing us
# from seeing what it is. When we go round again we'll get a fresh
# transaction and a fresh read view.
if len(conflicts_actual) == 0:
raise exception.UnknownInstanceUpdateConflict(**exc_props)
# Task state gets special handling for convenience. We raise the
# specific error UnexpectedDeletingTaskStateError or
# UnexpectedTaskStateError as appropriate
if 'task_state' in conflicts_actual:
conflict_task_state = conflicts_actual['task_state']
if conflict_task_state == task_states.DELETING:
exc = exception.UnexpectedDeletingTaskStateError
else:
exc = exception.UnexpectedTaskStateError
# Everything else is an InstanceUpdateConflict
else:
exc = exception.InstanceUpdateConflict
raise exc(**exc_props)
if metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'metadata',
models.InstanceMetadata,
metadata)
if system_metadata is not None:
_instance_metadata_update_in_place(context, instance_ref,
'system_metadata',
models.InstanceSystemMetadata,
system_metadata)
return instance_ref
@pick_context_manager_writer
def instance_add_security_group(context, instance_uuid, security_group_id):
"""Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
sec_group_ref.save(context.session)
@require_context
@pick_context_manager_writer
def instance_remove_security_group(context, instance_uuid, security_group_id):
"""Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
soft_delete()
###################
@require_context
@pick_context_manager_reader
def instance_info_cache_get(context, instance_uuid):
"""Gets an instance info cache from the table.
:param instance_uuid: = uuid of the info cache's instance
"""
return model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
@pick_context_manager_writer
def instance_info_cache_update(context, instance_uuid, values):
"""Update an instance info cache record in the table.
:param instance_uuid: = uuid of info cache's instance
:param values: = dict containing column values to update
"""
convert_objects_related_datetimes(values)
info_cache = model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
first()
needs_create = False
if info_cache and info_cache['deleted']:
raise exception.InstanceInfoCacheNotFound(
instance_uuid=instance_uuid)
elif not info_cache:
# NOTE(tr3buchet): just in case someone blows away an instance's
# cache entry, re-create it.
values['instance_uuid'] = instance_uuid
info_cache = models.InstanceInfoCache(**values)
needs_create = True
try:
with main_context_manager.writer.savepoint.using(context):
if needs_create:
info_cache.save(context.session)
else:
info_cache.update(values)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to
# recreate the instance cache entry at the same time. First one
# wins.
pass
return info_cache
@require_context
@pick_context_manager_writer
def instance_info_cache_delete(context, instance_uuid):
"""Deletes an existing instance_info_cache record
:param instance_uuid: = uuid of the instance tied to the cache record
"""
model_query(context, models.InstanceInfoCache).\
filter_by(instance_uuid=instance_uuid).\
soft_delete()
###################
def _instance_extra_create(context, values):
inst_extra_ref = models.InstanceExtra()
inst_extra_ref.update(values)
inst_extra_ref.save(context.session)
return inst_extra_ref
@pick_context_manager_writer
def instance_extra_update_by_uuid(context, instance_uuid, values):
rows_updated = model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid).\
update(values)
if not rows_updated:
LOG.debug("Created instance_extra for %s", instance_uuid)
create_values = copy.copy(values)
create_values["instance_uuid"] = instance_uuid
_instance_extra_create(context, create_values)
rows_updated = 1
return rows_updated
@pick_context_manager_reader
def instance_extra_get_by_instance_uuid(context, instance_uuid,
columns=None):
query = model_query(context, models.InstanceExtra).\
filter_by(instance_uuid=instance_uuid)
if columns is None:
columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model',
'migration_context']
for column in columns:
query = query.options(undefer(column))
instance_extra = query.first()
return instance_extra
###################
@require_context
@main_context_manager.writer
def key_pair_create(context, values):
try:
key_pair_ref = models.KeyPair()
key_pair_ref.update(values)
key_pair_ref.save(context.session)
return key_pair_ref
except db_exc.DBDuplicateEntry:
raise exception.KeyPairExists(key_name=values['name'])
@require_context
@main_context_manager.writer
def key_pair_destroy(context, user_id, name):
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
soft_delete()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
@require_context
@main_context_manager.reader
def key_pair_get(context, user_id, name):
result = model_query(context, models.KeyPair).\
filter_by(user_id=user_id).\
filter_by(name=name).\
first()
if not result:
raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@require_context
@main_context_manager.reader
def key_pair_get_all_by_user(context, user_id):
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
all()
@require_context
@main_context_manager.reader
def key_pair_count_by_user(context, user_id):
return model_query(context, models.KeyPair, read_deleted="no").\
filter_by(user_id=user_id).\
count()
###################
@main_context_manager.writer
def network_associate(context, project_id, network_id=None, force=False):
"""Associate a project with a network.
called by project_get_networks under certain conditions
and network manager add_network_to_project()
only associate if the project doesn't already have a network
or if force is True
force solves race condition where a fresh project has multiple instance
builds simultaneously picked up by multiple network hosts which attempt
to associate the project with multiple networks
force should only be used as a direct consequence of user request
all automated requests should not use force
"""
def network_query(project_filter, id=None):
filter_kwargs = {'project_id': project_filter}
if id is not None:
filter_kwargs['id'] = id
return model_query(context, models.Network, read_deleted="no").\
filter_by(**filter_kwargs).\
with_lockmode('update').\
first()
if not force:
# find out if project has a network
network_ref = network_query(project_id)
if force or not network_ref:
# in force mode or project doesn't have a network so associate
# with a new network
# get new network
network_ref = network_query(None, network_id)
if not network_ref:
raise exception.NoMoreNetworks()
# associate with network
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
network_ref['project_id'] = project_id
context.session.add(network_ref)
return network_ref
def _network_ips_query(context, network_id):
return model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id)
@main_context_manager.reader
def network_count_reserved_ips(context, network_id):
return _network_ips_query(context, network_id).\
filter_by(reserved=True).\
count()
@main_context_manager.writer
def network_create_safe(context, values):
network_ref = models.Network()
network_ref['uuid'] = str(uuid.uuid4())
network_ref.update(values)
try:
network_ref.save(context.session)
return network_ref
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
@main_context_manager.writer
def network_delete_safe(context, network_id):
result = model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
filter_by(allocated=True).\
count()
if result != 0:
raise exception.NetworkInUse(network_id=network_id)
network_ref = _network_get(context, network_id=network_id)
model_query(context, models.FixedIp, read_deleted="no").\
filter_by(network_id=network_id).\
soft_delete()
context.session.delete(network_ref)
@main_context_manager.writer
def network_disassociate(context, network_id, disassociate_host,
disassociate_project):
net_update = {}
if disassociate_project:
net_update['project_id'] = None
if disassociate_host:
net_update['host'] = None
network_update(context, network_id, net_update)
def _network_get(context, network_id, project_only='allow_none'):
result = model_query(context, models.Network, project_only=project_only).\
filter_by(id=network_id).\
first()
if not result:
raise exception.NetworkNotFound(network_id=network_id)
return result
@require_context
@main_context_manager.reader
def network_get(context, network_id, project_only='allow_none'):
return _network_get(context, network_id, project_only=project_only)
@require_context
@main_context_manager.reader
def network_get_all(context, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).all()
if not result:
raise exception.NoNetworksFound()
return result
@require_context
@main_context_manager.reader
def network_get_all_by_uuids(context, network_uuids, project_only):
result = model_query(context, models.Network, read_deleted="no",
project_only=project_only).\
filter(models.Network.uuid.in_(network_uuids)).\
all()
if not result:
raise exception.NoNetworksFound()
# check if the result contains all the networks
# we are looking for
for network_uuid in network_uuids:
for network in result:
if network['uuid'] == network_uuid:
break
else:
if project_only:
raise exception.NetworkNotFoundForProject(
network_uuid=network_uuid, project_id=context.project_id)
raise exception.NetworkNotFound(network_id=network_uuid)
return result
def _get_associated_fixed_ips_query(context, network_id, host=None):
# NOTE(vish): The ugly joins here are to solve a performance issue and
# should be removed once we can add and remove leases
# without regenerating the whole list
vif_and = and_(models.VirtualInterface.id ==
models.FixedIp.virtual_interface_id,
models.VirtualInterface.deleted == 0)
inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
models.Instance.deleted == 0)
# NOTE(vish): This subquery left joins the minimum interface id for each
# instance. If the join succeeds (i.e. the 11th column is not
# null), then the fixed ip is on the first interface.
subq = context.session.query(
func.min(models.VirtualInterface.id).label("id"),
models.VirtualInterface.instance_uuid).\
group_by(models.VirtualInterface.instance_uuid).subquery()
subq_and = and_(subq.c.id == models.FixedIp.virtual_interface_id,
subq.c.instance_uuid == models.VirtualInterface.instance_uuid)
query = context.session.query(
models.FixedIp.address,
models.FixedIp.instance_uuid,
models.FixedIp.network_id,
models.FixedIp.virtual_interface_id,
models.VirtualInterface.address,
models.Instance.hostname,
models.Instance.updated_at,
models.Instance.created_at,
models.FixedIp.allocated,
models.FixedIp.leased,
subq.c.id).\
filter(models.FixedIp.deleted == 0).\
filter(models.FixedIp.network_id == network_id).\
join((models.VirtualInterface, vif_and)).\
join((models.Instance, inst_and)).\
outerjoin((subq, subq_and)).\
filter(models.FixedIp.instance_uuid != null()).\
filter(models.FixedIp.virtual_interface_id != null())
if host:
query = query.filter(models.Instance.host == host)
return query
@main_context_manager.reader
def network_get_associated_fixed_ips(context, network_id, host=None):
# FIXME(sirp): since this returns fixed_ips, this would be better named
# fixed_ip_get_all_by_network.
query = _get_associated_fixed_ips_query(context, network_id, host)
result = query.all()
data = []
for datum in result:
cleaned = {}
cleaned['address'] = datum[0]
cleaned['instance_uuid'] = datum[1]
cleaned['network_id'] = datum[2]
cleaned['vif_id'] = datum[3]
cleaned['vif_address'] = datum[4]
cleaned['instance_hostname'] = datum[5]
cleaned['instance_updated'] = datum[6]
cleaned['instance_created'] = datum[7]
cleaned['allocated'] = datum[8]
cleaned['leased'] = datum[9]
# NOTE(vish): default_route is True if this fixed ip is on the first
# interface its instance.
cleaned['default_route'] = datum[10] is not None
data.append(cleaned)
return data
@main_context_manager.reader
def network_in_use_on_host(context, network_id, host):
query = _get_associated_fixed_ips_query(context, network_id, host)
return query.count() > 0
def _network_get_query(context):
return model_query(context, models.Network, read_deleted="no")
@main_context_manager.reader
def network_get_by_uuid(context, uuid):
result = _network_get_query(context).filter_by(uuid=uuid).first()
if not result:
raise exception.NetworkNotFoundForUUID(uuid=uuid)
return result
@main_context_manager.reader
def network_get_by_cidr(context, cidr):
result = _network_get_query(context).\
filter(or_(models.Network.cidr == cidr,
models.Network.cidr_v6 == cidr)).\
first()
if not result:
raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@main_context_manager.reader
def network_get_all_by_host(context, host):
fixed_host_filter = or_(models.FixedIp.host == host,
and_(models.FixedIp.instance_uuid != null(),
models.Instance.host == host))
fixed_ip_query = model_query(context, models.FixedIp,
(models.FixedIp.network_id,)).\
outerjoin((models.Instance,
models.Instance.uuid ==
models.FixedIp.instance_uuid)).\
filter(fixed_host_filter)
# NOTE(vish): return networks that have host set
# or that have a fixed ip with host set
# or that have an instance with host set
host_filter = or_(models.Network.host == host,
models.Network.id.in_(fixed_ip_query.subquery()))
return _network_get_query(context).filter(host_filter).all()
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
retry_on_request=True)
@main_context_manager.writer
def network_set_host(context, network_id, host_id):
network_ref = _network_get_query(context).\
filter_by(id=network_id).\
first()
if not network_ref:
raise exception.NetworkNotFound(network_id=network_id)
if network_ref.host:
return None
rows_updated = _network_get_query(context).\
filter_by(id=network_id).\
filter_by(host=None).\
update({'host': host_id})
if not rows_updated:
LOG.debug('The row was updated in a concurrent transaction, '
'we will fetch another row')
raise db_exc.RetryRequest(
exception.NetworkSetHostFailed(network_id=network_id))
@require_context
@main_context_manager.writer
def network_update(context, network_id, values):
network_ref = _network_get(context, network_id)
network_ref.update(values)
try:
network_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.DuplicateVlan(vlan=values['vlan'])
return network_ref
###################
@require_context
@main_context_manager.reader
def quota_get(context, project_id, resource, user_id=None):
model = models.ProjectUserQuota if user_id else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
query = query.filter_by(user_id=user_id)
result = query.first()
if not result:
if user_id:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
@main_context_manager.reader
def quota_get_all_by_project_and_user(context, project_id, user_id):
user_quotas = model_query(context, models.ProjectUserQuota,
(models.ProjectUserQuota.resource,
models.ProjectUserQuota.hard_limit)).\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
all()
result = {'project_id': project_id, 'user_id': user_id}
for user_quota in user_quotas:
result[user_quota.resource] = user_quota.hard_limit
return result
@require_context
@main_context_manager.reader
def quota_get_all_by_project(context, project_id):
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@main_context_manager.reader
def quota_get_all(context, project_id):
result = model_query(context, models.ProjectUserQuota).\
filter_by(project_id=project_id).\
all()
return result
@main_context_manager.writer
def quota_create(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
if per_user:
quota_ref.user_id = user_id
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
try:
quota_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.QuotaExists(project_id=project_id, resource=resource)
return quota_ref
@main_context_manager.writer
def quota_update(context, project_id, resource, limit, user_id=None):
per_user = user_id and resource not in PER_PROJECT_QUOTAS
model = models.ProjectUserQuota if per_user else models.Quota
query = model_query(context, model).\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if per_user:
query = query.filter_by(user_id=user_id)
result = query.update({'hard_limit': limit})
if not result:
if per_user:
raise exception.ProjectUserQuotaNotFound(project_id=project_id,
user_id=user_id)
else:
raise exception.ProjectQuotaNotFound(project_id=project_id)
###################
@require_context
@main_context_manager.reader
def quota_class_get(context, class_name, resource):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@main_context_manager.reader
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).\
all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
@main_context_manager.reader
def quota_class_get_all_by_name(context, class_name):
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@main_context_manager.writer
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
quota_class_ref.save(context.session)
return quota_class_ref
@main_context_manager.writer
def quota_class_update(context, class_name, resource, limit):
result = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
update({'hard_limit': limit})
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
###################
@require_context
@main_context_manager.reader
def quota_usage_get(context, project_id, resource, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource)
if user_id:
if resource not in PER_PROJECT_QUOTAS:
result = query.filter_by(user_id=user_id).first()
else:
result = query.filter_by(user_id=None).first()
else:
result = query.first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
def _quota_usage_get_all(context, project_id, user_id=None):
query = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id)
result = {'project_id': project_id}
if user_id:
query = query.filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null()))
result['user_id'] = user_id
rows = query.all()
for row in rows:
if row.resource in result:
result[row.resource]['in_use'] += row.in_use
result[row.resource]['reserved'] += row.reserved
else:
result[row.resource] = dict(in_use=row.in_use,
reserved=row.reserved)
return result
@require_context
@main_context_manager.reader
def quota_usage_get_all_by_project_and_user(context, project_id, user_id):
return _quota_usage_get_all(context, project_id, user_id=user_id)
@require_context
@main_context_manager.reader
def quota_usage_get_all_by_project(context, project_id):
return _quota_usage_get_all(context, project_id)
def _quota_usage_create(project_id, user_id, resource, in_use,
reserved, until_refresh, session):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.user_id = user_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
# updated_at is needed for judgement of max_age
quota_usage_ref.updated_at = timeutils.utcnow()
quota_usage_ref.save(session)
return quota_usage_ref
@main_context_manager.writer
def quota_usage_update(context, project_id, user_id, resource, **kwargs):
updates = {}
for key in ['in_use', 'reserved', 'until_refresh']:
if key in kwargs:
updates[key] = kwargs[key]
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
filter(or_(models.QuotaUsage.user_id == user_id,
models.QuotaUsage.user_id == null())).\
update(updates)
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
###################
def _reservation_create(uuid, usage, project_id, user_id, resource,
delta, expire, session):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.user_id = user_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_project_user_quota_usages(context, project_id, user_id):
rows = model_query(context, models.QuotaUsage,
read_deleted="no").\
filter_by(project_id=project_id).\
order_by(models.QuotaUsage.id.asc()).\
with_lockmode('update').\
all()
proj_result = dict()
user_result = dict()
# Get the total count of in_use,reserved
for row in rows:
proj_result.setdefault(row.resource,
dict(in_use=0, reserved=0, total=0))
proj_result[row.resource]['in_use'] += row.in_use
proj_result[row.resource]['reserved'] += row.reserved
proj_result[row.resource]['total'] += (row.in_use + row.reserved)
if row.user_id is None or row.user_id == user_id:
user_result[row.resource] = row
return proj_result, user_result
def _create_quota_usage_if_missing(user_usages, resource, until_refresh,
project_id, user_id, session):
"""Creates a QuotaUsage record and adds to user_usages if not present.
:param user_usages: dict of resource keys to QuotaUsage records. This is
updated if resource is not in user_usages yet or
until_refresh is not None.
:param resource: The resource being checked for quota usage.
:param until_refresh: Count of reservations until usage is refreshed,
int or None
:param project_id: The project being checked for quota usage.
:param user_id: The user being checked for quota usage.
:param session: DB session holding a transaction lock.
:return: True if a new QuotaUsage record was created and added
to user_usages, False otherwise.
"""
new_usage = None
if resource not in user_usages:
user_id_to_use = user_id
if resource in PER_PROJECT_QUOTAS:
user_id_to_use = None
new_usage = _quota_usage_create(project_id, user_id_to_use, resource,
0, 0, until_refresh or None, session)
user_usages[resource] = new_usage
return new_usage is not None
def _is_quota_refresh_needed(quota_usage, max_age):
"""Determines if a quota usage refresh is needed.
:param quota_usage: A QuotaUsage object for a given resource.
:param max_age: Number of seconds between subsequent usage refreshes.
:return: True if a refresh is needed, False otherwise.
"""
refresh = False
if quota_usage.in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
LOG.debug('in_use has dropped below 0; forcing refresh for '
'QuotaUsage: %s', dict(quota_usage))
refresh = True
elif quota_usage.until_refresh is not None:
quota_usage.until_refresh -= 1
if quota_usage.until_refresh <= 0:
refresh = True
elif max_age and (timeutils.utcnow() -
quota_usage.updated_at).seconds >= max_age:
refresh = True
return refresh
def _refresh_quota_usages(quota_usage, until_refresh, in_use):
"""Refreshes quota usage for the given resource.
:param quota_usage: A QuotaUsage object for a given resource.
:param until_refresh: Count of reservations until usage is refreshed,
int or None
:param in_use: Actual quota usage for the resource.
"""
if quota_usage.in_use != in_use:
LOG.info(_LI('quota_usages out of sync, updating. '
'project_id: %(project_id)s, '
'user_id: %(user_id)s, '
'resource: %(res)s, '
'tracked usage: %(tracked_use)s, '
'actual usage: %(in_use)s'),
{'project_id': quota_usage.project_id,
'user_id': quota_usage.user_id,
'res': quota_usage.resource,
'tracked_use': quota_usage.in_use,
'in_use': in_use})
else:
LOG.debug('QuotaUsage has not changed, refresh is unnecessary for: %s',
dict(quota_usage))
# Update the usage
quota_usage.in_use = in_use
quota_usage.until_refresh = until_refresh or None
def _calculate_overquota(project_quotas, user_quotas, deltas,
project_usages, user_usages):
"""Checks if any resources will go over quota based on the request.
:param project_quotas: dict of resource quotas (limits) for the project.
:param user_quotas: dict of resource quotas (limits) for the user.
:param deltas: dict of resource keys to positive/negative quota
changes for the resources in a given operation.
:param project_usages: dict of resource keys to QuotaUsage records for the
project.
:param user_usages: dict of resource keys to QuotaUsage records for the
user.
:return: list of resources that are over-quota for the
operation.
"""
overs = []
for res, delta in deltas.items():
# We can't go over-quota if we're not reserving anything.
if delta >= 0:
# We can't go over-quota if we have unlimited quotas.
# over if the project usage + delta is more than project quota
if 0 <= project_quotas[res] < delta + project_usages[res]['total']:
LOG.debug('Request is over project quota for resource '
'"%(res)s". Project limit: %(limit)s, delta: '
'%(delta)s, current total project usage: %(total)s',
{'res': res, 'limit': project_quotas[res],
'delta': delta,
'total': project_usages[res]['total']})
overs.append(res)
# We can't go over-quota if we have unlimited quotas.
# over if the user usage + delta is more than user quota
elif 0 <= user_quotas[res] < delta + user_usages[res]['total']:
LOG.debug('Request is over user quota for resource '
'"%(res)s". User limit: %(limit)s, delta: '
'%(delta)s, current total user usage: %(total)s',
{'res': res, 'limit': user_quotas[res],
'delta': delta, 'total': user_usages[res]['total']})
overs.append(res)
return overs
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def quota_reserve(context, resources, project_quotas, user_quotas, deltas,
expire, until_refresh, max_age, project_id=None,
user_id=None):
elevated = context.elevated()
if project_id is None:
project_id = context.project_id
if user_id is None:
user_id = context.user_id
# Get the current usages
project_usages, user_usages = _get_project_user_quota_usages(
context, project_id, user_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
created = _create_quota_usage_if_missing(user_usages, resource,
until_refresh, project_id,
user_id, context.session)
refresh = created or _is_quota_refresh_needed(
user_usages[resource], max_age)
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
updates = sync(elevated, project_id, user_id)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
_create_quota_usage_if_missing(user_usages, res,
until_refresh, project_id,
user_id, context.session)
_refresh_quota_usages(user_usages[res], until_refresh,
in_use)
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [res for res, delta in deltas.items()
if delta < 0 and
delta + user_usages[res].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
for key, value in user_usages.items():
if key not in project_usages:
LOG.debug('Copying QuotaUsage for resource "%(key)s" from '
'user_usages into project_usages: %(value)s',
{'key': key, 'value': dict(value)})
project_usages[key] = value
overs = _calculate_overquota(project_quotas, user_quotas, deltas,
project_usages, user_usages)
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for res, delta in deltas.items():
reservation = _reservation_create(
str(uuid.uuid4()),
user_usages[res],
project_id,
user_id,
res, delta, expire,
context.session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
user_usages[res].reserved += delta
# Apply updates to the usages table
for usage_ref in user_usages.values():
context.session.add(usage_ref)
if unders:
LOG.warning(_LW("Change will make usage less than 0 for the following "
"resources: %s"), unders)
if overs:
if project_quotas == user_quotas:
usages = project_usages
else:
# NOTE(mriedem): user_usages is a dict of resource keys to
# QuotaUsage sqlalchemy dict-like objects and doen't log well
# so convert the user_usages values to something useful for
# logging. Remove this if we ever change how
# _get_project_user_quota_usages returns the user_usages values.
user_usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'],
total=v['total'])
for k, v in user_usages.items()}
usages = user_usages
usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
for k, v in usages.items()}
LOG.debug('Raise OverQuota exception because: '
'project_quotas: %(project_quotas)s, '
'user_quotas: %(user_quotas)s, deltas: %(deltas)s, '
'overs: %(overs)s, project_usages: %(project_usages)s, '
'user_usages: %(user_usages)s',
{'project_quotas': project_quotas,
'user_quotas': user_quotas,
'overs': overs, 'deltas': deltas,
'project_usages': project_usages,
'user_usages': user_usages})
raise exception.OverQuota(overs=sorted(overs), quotas=user_quotas,
usages=usages)
return reservations
def _quota_reservations_query(context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation, read_deleted="no").\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update')
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def reservation_commit(context, reservations, project_id=None, user_id=None):
_project_usages, user_usages = _get_project_user_quota_usages(
context, project_id, user_id)
reservation_query = _quota_reservations_query(context, reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def reservation_rollback(context, reservations, project_id=None, user_id=None):
_project_usages, user_usages = _get_project_user_quota_usages(
context, project_id, user_id)
reservation_query = _quota_reservations_query(context, reservations)
for reservation in reservation_query.all():
usage = user_usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation_query.soft_delete(synchronize_session=False)
@main_context_manager.writer
def quota_destroy_all_by_project_and_user(context, project_id, user_id):
model_query(context, models.ProjectUserQuota, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
soft_delete(synchronize_session=False)
@main_context_manager.writer
def quota_destroy_all_by_project(context, project_id):
model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.ProjectUserQuota, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
model_query(context, models.Reservation, read_deleted="no").\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@main_context_manager.writer
def reservation_expire(context):
current_time = timeutils.utcnow()
reservation_query = model_query(
context, models.Reservation, read_deleted="no").\
filter(models.Reservation.expire < current_time)
for reservation in reservation_query.join(models.QuotaUsage).all():
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
context.session.add(reservation.usage)
reservation_query.soft_delete(synchronize_session=False)
###################
def _ec2_volume_get_query(context):
return model_query(context, models.VolumeIdMapping, read_deleted='yes')
def _ec2_snapshot_get_query(context):
return model_query(context, models.SnapshotIdMapping, read_deleted='yes')
@require_context
@main_context_manager.writer
def ec2_volume_create(context, volume_uuid, id=None):
"""Create ec2 compatible volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
ec2_volume_ref.update({'id': id})
ec2_volume_ref.save(context.session)
return ec2_volume_ref
@require_context
@main_context_manager.reader
def ec2_volume_get_by_uuid(context, volume_uuid):
result = _ec2_volume_get_query(context).\
filter_by(uuid=volume_uuid).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_uuid)
return result
@require_context
@main_context_manager.reader
def ec2_volume_get_by_id(context, volume_id):
result = _ec2_volume_get_query(context).\
filter_by(id=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_context
@main_context_manager.writer
def ec2_snapshot_create(context, snapshot_uuid, id=None):
"""Create ec2 compatible snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
ec2_snapshot_ref.update({'id': id})
ec2_snapshot_ref.save(context.session)
return ec2_snapshot_ref
@require_context
@main_context_manager.reader
def ec2_snapshot_get_by_ec2_id(context, ec2_id):
result = _ec2_snapshot_get_query(context).\
filter_by(id=ec2_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=ec2_id)
return result
@require_context
@main_context_manager.reader
def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
result = _ec2_snapshot_get_query(context).\
filter_by(uuid=snapshot_uuid).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid)
return result
###################
def _block_device_mapping_get_query(context, columns_to_join=None):
if columns_to_join is None:
columns_to_join = []
query = model_query(context, models.BlockDeviceMapping)
for column in columns_to_join:
query = query.options(joinedload(column))
return query
def _scrub_empty_str_values(dct, keys_to_scrub):
"""Remove any keys found in sequence keys_to_scrub from the dict
if they have the value ''.
"""
for key in keys_to_scrub:
if key in dct and dct[key] == '':
del dct[key]
def _from_legacy_values(values, legacy, allow_updates=False):
if legacy:
if allow_updates and block_device.is_safe_for_update(values):
return values
else:
return block_device.BlockDeviceDict.from_legacy(values)
else:
return values
@require_context
@pick_context_manager_writer
def block_device_mapping_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy)
convert_objects_related_datetimes(values)
bdm_ref = models.BlockDeviceMapping()
bdm_ref.update(values)
bdm_ref.save(context.session)
return bdm_ref
@require_context
@pick_context_manager_writer
def block_device_mapping_update(context, bdm_id, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
convert_objects_related_datetimes(values)
query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
query.update(values)
return query.first()
@pick_context_manager_writer
def block_device_mapping_update_or_create(context, values, legacy=True):
_scrub_empty_str_values(values, ['volume_size'])
values = _from_legacy_values(values, legacy, allow_updates=True)
convert_objects_related_datetimes(values)
result = None
# NOTE(xqueralt): Only update a BDM when device_name was provided. We
# allow empty device names so they will be set later by the manager.
if values['device_name']:
query = _block_device_mapping_get_query(context)
result = query.filter_by(instance_uuid=values['instance_uuid'],
device_name=values['device_name']).first()
if result:
result.update(values)
else:
# Either the device_name doesn't exist in the database yet, or no
# device_name was provided. Both cases mean creating a new BDM.
result = models.BlockDeviceMapping(**values)
result.save(context.session)
# NOTE(xqueralt): Prevent from having multiple swap devices for the
# same instance. This will delete all the existing ones.
if block_device.new_format_is_swap(values):
query = _block_device_mapping_get_query(context)
query = query.filter_by(instance_uuid=values['instance_uuid'],
source_type='blank', guest_format='swap')
query = query.filter(models.BlockDeviceMapping.id != result.id)
query.soft_delete()
return result
@require_context
@pick_context_manager_reader_allow_async
def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids):
if not instance_uuids:
return []
return _block_device_mapping_get_query(context).filter(
models.BlockDeviceMapping.instance_uuid.in_(instance_uuids)).all()
@require_context
@pick_context_manager_reader_allow_async
def block_device_mapping_get_all_by_instance(context, instance_uuid):
return _block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
all()
@require_context
@pick_context_manager_reader
def block_device_mapping_get_all_by_volume_id(context, volume_id,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
all()
@require_context
@pick_context_manager_reader
def block_device_mapping_get_by_instance_and_volume_id(context, volume_id,
instance_uuid,
columns_to_join=None):
return _block_device_mapping_get_query(context,
columns_to_join=columns_to_join).\
filter_by(volume_id=volume_id).\
filter_by(instance_uuid=instance_uuid).\
first()
@require_context
@pick_context_manager_writer
def block_device_mapping_destroy(context, bdm_id):
_block_device_mapping_get_query(context).\
filter_by(id=bdm_id).\
soft_delete()
@require_context
@pick_context_manager_writer
def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
volume_id):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(volume_id=volume_id).\
soft_delete()
@require_context
@pick_context_manager_writer
def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
device_name):
_block_device_mapping_get_query(context).\
filter_by(instance_uuid=instance_uuid).\
filter_by(device_name=device_name).\
soft_delete()
###################
@require_context
@main_context_manager.writer
def security_group_create(context, values):
security_group_ref = models.SecurityGroup()
# FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
# once save() is called. This will get cleaned up in next orm pass.
security_group_ref.rules
security_group_ref.update(values)
try:
with main_context_manager.writer.savepoint.using(context):
security_group_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=values['project_id'],
security_group_name=values['name'])
return security_group_ref
def _security_group_get_query(context, read_deleted=None,
project_only=False, join_rules=True):
query = model_query(context, models.SecurityGroup,
read_deleted=read_deleted, project_only=project_only)
if join_rules:
query = query.options(joinedload_all('rules.grantee_group'))
return query
def _security_group_get_by_names(context, project_id, group_names):
"""Get security group models for a project by a list of names.
Raise SecurityGroupNotFoundForProject for a name not found.
"""
query = _security_group_get_query(context, read_deleted="no",
join_rules=False).\
filter_by(project_id=project_id).\
filter(models.SecurityGroup.name.in_(group_names))
sg_models = query.all()
if len(sg_models) == len(group_names):
return sg_models
# Find the first one missing and raise
group_names_from_models = [x.name for x in sg_models]
for group_name in group_names:
if group_name not in group_names_from_models:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
# Not Reached
@require_context
@main_context_manager.reader
def security_group_get_all(context):
return _security_group_get_query(context).all()
@require_context
@main_context_manager.reader
def security_group_get(context, security_group_id, columns_to_join=None):
join_rules = columns_to_join and 'rules' in columns_to_join
if join_rules:
columns_to_join.remove('rules')
query = _security_group_get_query(context, project_only=True,
join_rules=join_rules).\
filter_by(id=security_group_id)
if columns_to_join is None:
columns_to_join = []
for column in columns_to_join:
if column.startswith('instances'):
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
return result
@require_context
@main_context_manager.reader
def security_group_get_by_name(context, project_id, group_name,
columns_to_join=None):
query = _security_group_get_query(context,
read_deleted="no", join_rules=False).\
filter_by(project_id=project_id).\
filter_by(name=group_name)
if columns_to_join is None:
columns_to_join = ['instances', 'rules.grantee_group']
for column in columns_to_join:
query = query.options(joinedload_all(column))
result = query.first()
if not result:
raise exception.SecurityGroupNotFoundForProject(
project_id=project_id, security_group_id=group_name)
return result
@require_context
@main_context_manager.reader
def security_group_get_by_project(context, project_id):
return _security_group_get_query(context, read_deleted="no").\
filter_by(project_id=project_id).\
all()
@require_context
@main_context_manager.reader
def security_group_get_by_instance(context, instance_uuid):
return _security_group_get_query(context, read_deleted="no").\
join(models.SecurityGroup.instances).\
filter_by(uuid=instance_uuid).\
all()
@require_context
@main_context_manager.reader
def security_group_in_use(context, group_id):
# Are there any instances that haven't been deleted
# that include this group?
inst_assoc = model_query(context,
models.SecurityGroupInstanceAssociation,
read_deleted="no").\
filter_by(security_group_id=group_id).\
all()
for ia in inst_assoc:
num_instances = model_query(context, models.Instance,
read_deleted="no").\
filter_by(uuid=ia.instance_uuid).\
count()
if num_instances:
return True
return False
@require_context
@main_context_manager.writer
def security_group_update(context, security_group_id, values,
columns_to_join=None):
query = model_query(context, models.SecurityGroup).filter_by(
id=security_group_id)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload_all(column))
security_group_ref = query.first()
if not security_group_ref:
raise exception.SecurityGroupNotFound(
security_group_id=security_group_id)
security_group_ref.update(values)
name = security_group_ref['name']
project_id = security_group_ref['project_id']
try:
security_group_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.SecurityGroupExists(
project_id=project_id,
security_group_name=name)
return security_group_ref
def security_group_ensure_default(context):
"""Ensure default security group exists for a project_id."""
try:
# NOTE(rpodolyaka): create the default security group, if it doesn't
# exist. This must be done in a separate transaction, so that
# this one is not aborted in case a concurrent one succeeds first
# and the unique constraint for security group names is violated
# by a concurrent INSERT
with main_context_manager.writer.independent.using(context):
return _security_group_ensure_default(context)
except exception.SecurityGroupExists:
# NOTE(rpodolyaka): a concurrent transaction has succeeded first,
# suppress the error and proceed
return security_group_get_by_name(context, context.project_id,
'default')
@main_context_manager.writer
def _security_group_ensure_default(context):
try:
default_group = _security_group_get_by_names(context,
context.project_id,
['default'])[0]
except exception.NotFound:
values = {'name': 'default',
'description': 'default',
'user_id': context.user_id,
'project_id': context.project_id}
default_group = security_group_create(context, values)
usage = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=context.project_id).\
filter_by(user_id=context.user_id).\
filter_by(resource='security_groups')
# Create quota usage for auto created default security group
if not usage.first():
_quota_usage_create(context.project_id,
context.user_id,
'security_groups',
1, 0,
CONF.until_refresh,
context.session)
else:
usage.update({'in_use': int(usage.first().in_use) + 1})
default_rules = _security_group_rule_get_default_query(context).all()
for default_rule in default_rules:
# This is suboptimal, it should be programmatic to know
# the values of the default_rule
rule_values = {'protocol': default_rule.protocol,
'from_port': default_rule.from_port,
'to_port': default_rule.to_port,
'cidr': default_rule.cidr,
'parent_group_id': default_group.id,
}
_security_group_rule_create(context, rule_values)
return default_group
@require_context
@main_context_manager.writer
def security_group_destroy(context, security_group_id):
model_query(context, models.SecurityGroup).\
filter_by(id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(security_group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule).\
filter_by(group_id=security_group_id).\
soft_delete()
model_query(context, models.SecurityGroupIngressRule).\
filter_by(parent_group_id=security_group_id).\
soft_delete()
def _security_group_count_by_project_and_user(context, project_id, user_id):
nova.context.authorize_project_context(context, project_id)
return model_query(context, models.SecurityGroup, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
###################
def _security_group_rule_create(context, values):
security_group_rule_ref = models.SecurityGroupIngressRule()
security_group_rule_ref.update(values)
security_group_rule_ref.save(context.session)
return security_group_rule_ref
def _security_group_rule_get_query(context):
return model_query(context, models.SecurityGroupIngressRule)
@require_context
@main_context_manager.reader
def security_group_rule_get(context, security_group_rule_id):
result = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
first())
if not result:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
return result
@require_context
@main_context_manager.reader
def security_group_rule_get_by_security_group(context, security_group_id,
columns_to_join=None):
if columns_to_join is None:
columns_to_join = ['grantee_group.instances.system_metadata',
'grantee_group.instances.info_cache']
query = (_security_group_rule_get_query(context).
filter_by(parent_group_id=security_group_id))
for column in columns_to_join:
query = query.options(joinedload_all(column))
return query.all()
@require_context
@main_context_manager.reader
def security_group_rule_get_by_instance(context, instance_uuid):
return (_security_group_rule_get_query(context).
join('parent_group', 'instances').
filter_by(uuid=instance_uuid).
options(joinedload('grantee_group')).
all())
@require_context
@main_context_manager.writer
def security_group_rule_create(context, values):
return _security_group_rule_create(context, values)
@require_context
@main_context_manager.writer
def security_group_rule_destroy(context, security_group_rule_id):
count = (_security_group_rule_get_query(context).
filter_by(id=security_group_rule_id).
soft_delete())
if count == 0:
raise exception.SecurityGroupNotFoundForRule(
rule_id=security_group_rule_id)
@require_context
@main_context_manager.reader
def security_group_rule_count_by_group(context, security_group_id):
return (model_query(context, models.SecurityGroupIngressRule,
read_deleted="no").
filter_by(parent_group_id=security_group_id).
count())
###################
def _security_group_rule_get_default_query(context):
return model_query(context, models.SecurityGroupIngressDefaultRule)
@require_context
@main_context_manager.reader
def security_group_default_rule_get(context, security_group_rule_default_id):
result = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
first()
if not result:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
return result
@main_context_manager.writer
def security_group_default_rule_destroy(context,
security_group_rule_default_id):
count = _security_group_rule_get_default_query(context).\
filter_by(id=security_group_rule_default_id).\
soft_delete()
if count == 0:
raise exception.SecurityGroupDefaultRuleNotFound(
rule_id=security_group_rule_default_id)
@main_context_manager.writer
def security_group_default_rule_create(context, values):
security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
security_group_default_rule_ref.update(values)
security_group_default_rule_ref.save(context.session)
return security_group_default_rule_ref
@require_context
@main_context_manager.reader
def security_group_default_rule_list(context):
return _security_group_rule_get_default_query(context).all()
###################
@main_context_manager.writer
def provider_fw_rule_create(context, rule):
fw_rule_ref = models.ProviderFirewallRule()
fw_rule_ref.update(rule)
fw_rule_ref.save(context.session)
return fw_rule_ref
@main_context_manager.reader
def provider_fw_rule_get_all(context):
return model_query(context, models.ProviderFirewallRule).all()
@main_context_manager.writer
def provider_fw_rule_destroy(context, rule_id):
context.session.query(models.ProviderFirewallRule).\
filter_by(id=rule_id).\
soft_delete()
###################
@require_context
@main_context_manager.writer
def project_get_networks(context, project_id, associate=True):
# NOTE(tr3buchet): as before this function will associate
# a project with a network if it doesn't have one and
# associate is true
result = model_query(context, models.Network, read_deleted="no").\
filter_by(project_id=project_id).\
all()
if not result:
if not associate:
return []
return [network_associate(context, project_id)]
return result
###################
@pick_context_manager_writer
def migration_create(context, values):
migration = models.Migration()
migration.update(values)
migration.save(context.session)
return migration
@pick_context_manager_writer
def migration_update(context, id, values):
migration = migration_get(context, id)
migration.update(values)
return migration
@pick_context_manager_reader
def migration_get(context, id):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(id=id).\
first()
if not result:
raise exception.MigrationNotFound(migration_id=id)
return result
@pick_context_manager_reader
def migration_get_by_id_and_instance(context, id, instance_uuid):
result = model_query(context, models.Migration).\
filter_by(id=id).\
filter_by(instance_uuid=instance_uuid).\
first()
if not result:
raise exception.MigrationNotFoundForInstance(migration_id=id,
instance_id=instance_uuid)
return result
@pick_context_manager_reader
def migration_get_by_instance_and_status(context, instance_uuid, status):
result = model_query(context, models.Migration, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid).\
filter_by(status=status).\
first()
if not result:
raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
status=status)
return result
@pick_context_manager_reader_allow_async
def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
dest_compute):
confirm_window = (timeutils.utcnow() -
datetime.timedelta(seconds=confirm_window))
return model_query(context, models.Migration, read_deleted="yes").\
filter(models.Migration.updated_at <= confirm_window).\
filter_by(status="finished").\
filter_by(dest_compute=dest_compute).\
all()
@pick_context_manager_reader
def migration_get_in_progress_by_host_and_node(context, host, node):
return model_query(context, models.Migration).\
filter(or_(and_(models.Migration.source_compute == host,
models.Migration.source_node == node),
and_(models.Migration.dest_compute == host,
models.Migration.dest_node == node))).\
filter(~models.Migration.status.in_(['accepted', 'confirmed',
'reverted', 'error',
'failed', 'completed'])).\
options(joinedload_all('instance.system_metadata')).\
all()
@pick_context_manager_reader
def migration_get_in_progress_by_instance(context, instance_uuid,
migration_type=None):
# TODO(Shaohe Feng) we should share the in-progress list.
# TODO(Shaohe Feng) will also summarize all status to a new
# MigrationStatus class.
query = model_query(context, models.Migration).\
filter_by(instance_uuid=instance_uuid).\
filter(models.Migration.status.in_(['queued', 'preparing',
'running',
'post-migrating']))
if migration_type:
query = query.filter(models.Migration.migration_type == migration_type)
return query.all()
@pick_context_manager_reader
def migration_get_all_by_filters(context, filters):
query = model_query(context, models.Migration)
if "status" in filters:
status = filters["status"]
status = [status] if isinstance(status, str) else status
query = query.filter(models.Migration.status.in_(status))
if "host" in filters:
host = filters["host"]
query = query.filter(or_(models.Migration.source_compute == host,
models.Migration.dest_compute == host))
elif "source_compute" in filters:
host = filters['source_compute']
query = query.filter(models.Migration.source_compute == host)
if "migration_type" in filters:
migtype = filters["migration_type"]
query = query.filter(models.Migration.migration_type == migtype)
if "hidden" in filters:
hidden = filters["hidden"]
query = query.filter(models.Migration.hidden == hidden)
return query.all()
##################
@pick_context_manager_writer
def console_pool_create(context, values):
pool = models.ConsolePool()
pool.update(values)
try:
pool.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.ConsolePoolExists(
host=values["host"],
console_type=values["console_type"],
compute_host=values["compute_host"],
)
return pool
@pick_context_manager_reader
def console_pool_get_by_host_type(context, compute_host, host,
console_type):
result = model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
filter_by(compute_host=compute_host).\
options(joinedload('consoles')).\
first()
if not result:
raise exception.ConsolePoolNotFoundForHostType(
host=host, console_type=console_type,
compute_host=compute_host)
return result
@pick_context_manager_reader
def console_pool_get_all_by_host_type(context, host, console_type):
return model_query(context, models.ConsolePool, read_deleted="no").\
filter_by(host=host).\
filter_by(console_type=console_type).\
options(joinedload('consoles')).\
all()
##################
@pick_context_manager_writer
def console_create(context, values):
console = models.Console()
console.update(values)
console.save(context.session)
return console
@pick_context_manager_writer
def console_delete(context, console_id):
# NOTE(mdragon): consoles are meant to be transient.
context.session.query(models.Console).\
filter_by(id=console_id).\
delete()
@pick_context_manager_reader
def console_get_by_pool_instance(context, pool_id, instance_uuid):
result = model_query(context, models.Console, read_deleted="yes").\
filter_by(pool_id=pool_id).\
filter_by(instance_uuid=instance_uuid).\
options(joinedload('pool')).\
first()
if not result:
raise exception.ConsoleNotFoundInPoolForInstance(
pool_id=pool_id, instance_uuid=instance_uuid)
return result
@pick_context_manager_reader
def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(instance_uuid=instance_uuid)
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
return query.all()
@pick_context_manager_reader
def console_get(context, console_id, instance_uuid=None):
query = model_query(context, models.Console, read_deleted="yes").\
filter_by(id=console_id).\
options(joinedload('pool'))
if instance_uuid is not None:
query = query.filter_by(instance_uuid=instance_uuid)
result = query.first()
if not result:
if instance_uuid:
raise exception.ConsoleNotFoundForInstance(
console_id=console_id, instance_uuid=instance_uuid)
else:
raise exception.ConsoleNotFound(console_id=console_id)
return result
##################
@main_context_manager.writer
def flavor_create(context, values, projects=None):
"""Create a new instance type. In order to pass in extra specs,
the values dict should contain a 'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
specs = values.get('extra_specs')
specs_refs = []
if specs:
for k, v in specs.items():
specs_ref = models.InstanceTypeExtraSpecs()
specs_ref['key'] = k
specs_ref['value'] = v
specs_refs.append(specs_ref)
values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
if projects is None:
projects = []
try:
instance_type_ref.save(context.session)
except db_exc.DBDuplicateEntry as e:
if 'flavorid' in e.columns:
raise exception.FlavorIdExists(flavor_id=values['flavorid'])
raise exception.FlavorExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
for project in set(projects):
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_ref.id,
"project_id": project})
access_ref.save(context.session)
return _dict_with_extra_specs(instance_type_ref)
def _dict_with_extra_specs(inst_type_query):
"""Takes an instance or instance type query returned
by sqlalchemy and returns it as a dictionary, converting the
extra_specs entry from a list of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = {x['key']: x['value']
for x in inst_type_query['extra_specs']}
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
def _flavor_get_query(context, read_deleted=None):
query = model_query(context, models.InstanceTypes,
read_deleted=read_deleted).\
options(joinedload('extra_specs'))
if not context.is_admin:
the_filter = [models.InstanceTypes.is_public == true()]
the_filter.extend([
models.InstanceTypes.projects.any(project_id=context.project_id)
])
query = query.filter(or_(*the_filter))
return query
@require_context
@main_context_manager.reader
def flavor_get_all(context, inactive=False, filters=None,
sort_key='flavorid', sort_dir='asc', limit=None,
marker=None):
"""Returns all flavors.
"""
filters = filters or {}
# FIXME(sirp): now that we have the `disabled` field for flavors, we
# should probably remove the use of `deleted` to mark inactive. `deleted`
# should mean truly deleted, e.g. we can safely purge the record out of the
# database.
read_deleted = "yes" if inactive else "no"
query = _flavor_get_query(context, read_deleted=read_deleted)
if 'min_memory_mb' in filters:
query = query.filter(
models.InstanceTypes.memory_mb >= filters['min_memory_mb'])
if 'min_root_gb' in filters:
query = query.filter(
models.InstanceTypes.root_gb >= filters['min_root_gb'])
if 'disabled' in filters:
query = query.filter(
models.InstanceTypes.disabled == filters['disabled'])
if 'is_public' in filters and filters['is_public'] is not None:
the_filter = [models.InstanceTypes.is_public == filters['is_public']]
if filters['is_public'] and context.project_id is not None:
the_filter.extend([
models.InstanceTypes.projects.any(
project_id=context.project_id, deleted=0)
])
if len(the_filter) > 1:
query = query.filter(or_(*the_filter))
else:
query = query.filter(the_filter[0])
marker_row = None
if marker is not None:
marker_row = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=marker).\
first()
if not marker_row:
raise exception.MarkerNotFound(marker)
query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit,
[sort_key, 'id'],
marker=marker_row,
sort_dir=sort_dir)
inst_types = query.all()
return [_dict_with_extra_specs(i) for i in inst_types]
def _flavor_get_id_from_flavor_query(context, flavor_id):
return model_query(context, models.InstanceTypes,
(models.InstanceTypes.id,),
read_deleted="no").\
filter_by(flavorid=flavor_id)
def _flavor_get_id_from_flavor(context, flavor_id):
result = _flavor_get_id_from_flavor_query(context, flavor_id).first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return result[0]
@require_context
@main_context_manager.reader
def flavor_get(context, id):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(id=id).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=id)
return _dict_with_extra_specs(result)
@require_context
@main_context_manager.reader
def flavor_get_by_name(context, name):
"""Returns a dict describing specific flavor."""
result = _flavor_get_query(context).\
filter_by(name=name).\
first()
if not result:
raise exception.FlavorNotFoundByName(flavor_name=name)
return _dict_with_extra_specs(result)
@require_context
@main_context_manager.reader
def flavor_get_by_flavor_id(context, flavor_id, read_deleted):
"""Returns a dict describing specific flavor_id."""
result = _flavor_get_query(context, read_deleted=read_deleted).\
filter_by(flavorid=flavor_id).\
order_by(asc(models.InstanceTypes.deleted),
asc(models.InstanceTypes.id)).\
first()
if not result:
raise exception.FlavorNotFound(flavor_id=flavor_id)
return _dict_with_extra_specs(result)
@main_context_manager.writer
def flavor_destroy(context, name):
"""Marks specific flavor as deleted."""
ref = model_query(context, models.InstanceTypes, read_deleted="no").\
filter_by(name=name).\
first()
if not ref:
raise exception.FlavorNotFoundByName(flavor_name=name)
ref.soft_delete(context.session)
model_query(context, models.InstanceTypeExtraSpecs, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
model_query(context, models.InstanceTypeProjects, read_deleted="no").\
filter_by(instance_type_id=ref['id']).\
soft_delete()
def _flavor_access_query(context):
return model_query(context, models.InstanceTypeProjects, read_deleted="no")
@main_context_manager.reader
def flavor_access_get_by_flavor_id(context, flavor_id):
"""Get flavor access list by flavor id."""
instance_type_id_subq = _flavor_get_id_from_flavor_query(context,
flavor_id)
access_refs = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id_subq).\
all()
return access_refs
@main_context_manager.writer
def flavor_access_add(context, flavor_id, project_id):
"""Add given tenant to the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
access_ref = models.InstanceTypeProjects()
access_ref.update({"instance_type_id": instance_type_id,
"project_id": project_id})
try:
access_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.FlavorAccessExists(flavor_id=flavor_id,
project_id=project_id)
return access_ref
@main_context_manager.writer
def flavor_access_remove(context, flavor_id, project_id):
"""Remove given tenant from the flavor access list."""
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
count = _flavor_access_query(context).\
filter_by(instance_type_id=instance_type_id).\
filter_by(project_id=project_id).\
soft_delete(synchronize_session=False)
if count == 0:
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
project_id=project_id)
def _flavor_extra_specs_get_query(context, flavor_id):
instance_type_id_subq = _flavor_get_id_from_flavor_query(context,
flavor_id)
return model_query(context, models.InstanceTypeExtraSpecs,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id_subq)
@require_context
@main_context_manager.reader
def flavor_extra_specs_get(context, flavor_id):
rows = _flavor_extra_specs_get_query(context, flavor_id).all()
return {row['key']: row['value'] for row in rows}
@require_context
@main_context_manager.writer
def flavor_extra_specs_delete(context, flavor_id, key):
result = _flavor_extra_specs_get_query(context, flavor_id).\
filter(models.InstanceTypeExtraSpecs.key == key).\
soft_delete(synchronize_session=False)
# did not find the extra spec
if result == 0:
raise exception.FlavorExtraSpecsNotFound(
extra_specs_key=key, flavor_id=flavor_id)
@require_context
@main_context_manager.writer
def flavor_extra_specs_update_or_create(context, flavor_id, specs,
max_retries=10):
for attempt in range(max_retries):
try:
instance_type_id = _flavor_get_id_from_flavor(context, flavor_id)
spec_refs = model_query(context, models.InstanceTypeExtraSpecs,
read_deleted="no").\
filter_by(instance_type_id=instance_type_id).\
filter(models.InstanceTypeExtraSpecs.key.in_(specs.keys())).\
all()
existing_keys = set()
for spec_ref in spec_refs:
key = spec_ref["key"]
existing_keys.add(key)
with main_context_manager.writer.savepoint.using(context):
spec_ref.update({"value": specs[key]})
for key, value in specs.items():
if key in existing_keys:
continue
spec_ref = models.InstanceTypeExtraSpecs()
with main_context_manager.writer.savepoint.using(context):
spec_ref.update({"key": key, "value": value,
"instance_type_id": instance_type_id})
context.session.add(spec_ref)
return specs
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
if attempt == max_retries - 1:
raise exception.FlavorExtraSpecUpdateCreateFailed(
id=flavor_id, retries=max_retries)
####################
@main_context_manager.writer
def cell_create(context, values):
cell = models.Cell()
cell.update(values)
try:
cell.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.CellExists(name=values['name'])
return cell
def _cell_get_by_name_query(context, cell_name):
return model_query(context, models.Cell).filter_by(name=cell_name)
@main_context_manager.writer
def cell_update(context, cell_name, values):
cell_query = _cell_get_by_name_query(context, cell_name)
if not cell_query.update(values):
raise exception.CellNotFound(cell_name=cell_name)
cell = cell_query.first()
return cell
@main_context_manager.writer
def cell_delete(context, cell_name):
return _cell_get_by_name_query(context, cell_name).soft_delete()
@main_context_manager.reader
def cell_get(context, cell_name):
result = _cell_get_by_name_query(context, cell_name).first()
if not result:
raise exception.CellNotFound(cell_name=cell_name)
return result
@main_context_manager.reader
def cell_get_all(context):
return model_query(context, models.Cell, read_deleted="no").all()
########################
# User-provided metadata
def _instance_metadata_get_multi(context, instance_uuids):
if not instance_uuids:
return []
return model_query(context, models.InstanceMetadata).filter(
models.InstanceMetadata.instance_uuid.in_(instance_uuids))
def _instance_metadata_get_query(context, instance_uuid):
return model_query(context, models.InstanceMetadata, read_deleted="no").\
filter_by(instance_uuid=instance_uuid)
@require_context
@pick_context_manager_reader
def instance_metadata_get(context, instance_uuid):
rows = _instance_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_metadata_delete(context, instance_uuid, key):
_instance_metadata_get_query(context, instance_uuid).\
filter_by(key=key).\
soft_delete()
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def instance_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
if delete:
_instance_metadata_get_query(context, instance_uuid).\
filter(~models.InstanceMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_metadata_get_query(context, instance_uuid).\
filter(models.InstanceMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
context.session.add(meta_ref)
return metadata
#######################
# System-owned metadata
def _instance_system_metadata_get_multi(context, instance_uuids):
if not instance_uuids:
return []
return model_query(context, models.InstanceSystemMetadata,
read_deleted='yes').filter(
models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
def _instance_system_metadata_get_query(context, instance_uuid):
return model_query(context, models.InstanceSystemMetadata).\
filter_by(instance_uuid=instance_uuid)
@require_context
@pick_context_manager_reader
def instance_system_metadata_get(context, instance_uuid):
rows = _instance_system_metadata_get_query(context, instance_uuid).all()
return {row['key']: row['value'] for row in rows}
@require_context
@pick_context_manager_writer
def instance_system_metadata_update(context, instance_uuid, metadata, delete):
all_keys = metadata.keys()
if delete:
_instance_system_metadata_get_query(context, instance_uuid).\
filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = []
meta_refs = _instance_system_metadata_get_query(context, instance_uuid).\
filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
all()
for meta_ref in meta_refs:
already_existing_keys.append(meta_ref.key)
meta_ref.update({"value": metadata[meta_ref.key]})
new_keys = set(all_keys) - set(already_existing_keys)
for key in new_keys:
meta_ref = models.InstanceSystemMetadata()
meta_ref.update({"key": key, "value": metadata[key],
"instance_uuid": instance_uuid})
context.session.add(meta_ref)
return metadata
####################
@main_context_manager.writer
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
try:
agent_build_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
os=values['os'], architecture=values['architecture'])
return agent_build_ref
@main_context_manager.reader
def agent_build_get_by_triple(context, hypervisor, os, architecture):
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
filter_by(os=os).\
filter_by(architecture=architecture).\
first()
@main_context_manager.reader
def agent_build_get_all(context, hypervisor=None):
if hypervisor:
return model_query(context, models.AgentBuild, read_deleted="no").\
filter_by(hypervisor=hypervisor).\
all()
else:
return model_query(context, models.AgentBuild, read_deleted="no").\
all()
@main_context_manager.writer
def agent_build_destroy(context, agent_build_id):
rows_affected = model_query(context, models.AgentBuild).filter_by(
id=agent_build_id).soft_delete()
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
@main_context_manager.writer
def agent_build_update(context, agent_build_id, values):
rows_affected = model_query(context, models.AgentBuild).\
filter_by(id=agent_build_id).\
update(values)
if rows_affected == 0:
raise exception.AgentBuildNotFound(id=agent_build_id)
####################
@require_context
@pick_context_manager_reader_allow_async
def bw_usage_get(context, uuid, start_period, mac):
values = {'start_period': start_period}
values = convert_objects_related_datetimes(values, 'start_period')
return model_query(context, models.BandwidthUsage, read_deleted="yes").\
filter_by(start_period=values['start_period']).\
filter_by(uuid=uuid).\
filter_by(mac=mac).\
first()
@require_context
@pick_context_manager_reader_allow_async
def bw_usage_get_by_uuids(context, uuids, start_period):
values = {'start_period': start_period}
values = convert_objects_related_datetimes(values, 'start_period')
return (
model_query(context, models.BandwidthUsage, read_deleted="yes").
filter(models.BandwidthUsage.uuid.in_(uuids)).
filter_by(start_period=values['start_period']).
all()
)
@require_context
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
@pick_context_manager_writer
def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
last_ctr_in, last_ctr_out, last_refreshed=None):
if last_refreshed is None:
last_refreshed = timeutils.utcnow()
# NOTE(comstud): More often than not, we'll be updating records vs
# creating records. Optimize accordingly, trying to update existing
# records. Fall back to creation when no rows are updated.
ts_values = {'last_refreshed': last_refreshed,
'start_period': start_period}
ts_keys = ('start_period', 'last_refreshed')
ts_values = convert_objects_related_datetimes(ts_values, *ts_keys)
values = {'last_refreshed': ts_values['last_refreshed'],
'last_ctr_in': last_ctr_in,
'last_ctr_out': last_ctr_out,
'bw_in': bw_in,
'bw_out': bw_out}
bw_usage = model_query(context, models.BandwidthUsage,
read_deleted='yes').\
filter_by(start_period=ts_values['start_period']).\
filter_by(uuid=uuid).\
filter_by(mac=mac).first()
if bw_usage:
bw_usage.update(values)
return bw_usage
bwusage = models.BandwidthUsage()
bwusage.start_period = ts_values['start_period']
bwusage.uuid = uuid
bwusage.mac = mac
bwusage.last_refreshed = ts_values['last_refreshed']
bwusage.bw_in = bw_in
bwusage.bw_out = bw_out
bwusage.last_ctr_in = last_ctr_in
bwusage.last_ctr_out = last_ctr_out
try:
bwusage.save(context.session)
except db_exc.DBDuplicateEntry:
# NOTE(sirp): Possible race if two greenthreads attempt to create
# the usage entry at the same time. First one wins.
pass
return bwusage
####################
@require_context
@pick_context_manager_reader
def vol_get_usage_by_time(context, begin):
"""Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == null(),
models.VolumeUsage.tot_last_refreshed > begin,
models.VolumeUsage.curr_last_refreshed == null(),
models.VolumeUsage.curr_last_refreshed > begin,
)).all()
@require_context
@pick_context_manager_writer
def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
instance_id, project_id, user_id, availability_zone,
update_totals=False):
refreshed = timeutils.utcnow()
values = {}
# NOTE(dricco): We will be mostly updating current usage records vs
# updating total or creating records. Optimize accordingly.
if not update_totals:
values = {'curr_last_refreshed': refreshed,
'curr_reads': rd_req,
'curr_read_bytes': rd_bytes,
'curr_writes': wr_req,
'curr_write_bytes': wr_bytes,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
else:
values = {'tot_last_refreshed': refreshed,
'tot_reads': models.VolumeUsage.tot_reads + rd_req,
'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
rd_bytes,
'tot_writes': models.VolumeUsage.tot_writes + wr_req,
'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
wr_bytes,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'instance_uuid': instance_id,
'project_id': project_id,
'user_id': user_id,
'availability_zone': availability_zone}
current_usage = model_query(context, models.VolumeUsage,
read_deleted="yes").\
filter_by(volume_id=id).\
first()
if current_usage:
if (rd_req < current_usage['curr_reads'] or
rd_bytes < current_usage['curr_read_bytes'] or
wr_req < current_usage['curr_writes'] or
wr_bytes < current_usage['curr_write_bytes']):
LOG.info(_LI("Volume(%s) has lower stats then what is in "
"the database. Instance must have been rebooted "
"or crashed. Updating totals."), id)
if not update_totals:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'])
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'])
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'])
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'])
else:
values['tot_reads'] = (models.VolumeUsage.tot_reads +
current_usage['curr_reads'] +
rd_req)
values['tot_read_bytes'] = (
models.VolumeUsage.tot_read_bytes +
current_usage['curr_read_bytes'] + rd_bytes)
values['tot_writes'] = (models.VolumeUsage.tot_writes +
current_usage['curr_writes'] +
wr_req)
values['tot_write_bytes'] = (
models.VolumeUsage.tot_write_bytes +
current_usage['curr_write_bytes'] + wr_bytes)
current_usage.update(values)
current_usage.save(context.session)
context.session.refresh(current_usage)
return current_usage
vol_usage = models.VolumeUsage()
vol_usage.volume_id = id
vol_usage.instance_uuid = instance_id
vol_usage.project_id = project_id
vol_usage.user_id = user_id
vol_usage.availability_zone = availability_zone
if not update_totals:
vol_usage.curr_last_refreshed = refreshed
vol_usage.curr_reads = rd_req
vol_usage.curr_read_bytes = rd_bytes
vol_usage.curr_writes = wr_req
vol_usage.curr_write_bytes = wr_bytes
else:
vol_usage.tot_last_refreshed = refreshed
vol_usage.tot_reads = rd_req
vol_usage.tot_read_bytes = rd_bytes
vol_usage.tot_writes = wr_req
vol_usage.tot_write_bytes = wr_bytes
vol_usage.save(context.session)
return vol_usage
####################
@main_context_manager.reader
def s3_image_get(context, image_id):
"""Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_id)
return result
@main_context_manager.reader
def s3_image_get_by_uuid(context, image_uuid):
"""Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
if not result:
raise exception.ImageNotFound(image_id=image_uuid)
return result
@main_context_manager.writer
def s3_image_create(context, image_uuid):
"""Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
s3_image_ref.save(context.session)
except Exception as e:
raise db_exc.DBError(e)
return s3_image_ref
####################
def _aggregate_get_query(context, model_class, id_field=None, id=None,
read_deleted=None):
columns_to_join = {models.Aggregate: ['_hosts', '_metadata']}
query = model_query(context, model_class, read_deleted=read_deleted)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
@main_context_manager.writer
def aggregate_create(context, values, metadata=None):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
read_deleted='no')
aggregate = query.first()
if not aggregate:
aggregate = models.Aggregate()
aggregate.update(values)
aggregate.save(context.session)
# We don't want these to be lazy loaded later. We know there is
# nothing here since we just created this aggregate.
aggregate._hosts = []
aggregate._metadata = []
else:
raise exception.AggregateNameExists(aggregate_name=values['name'])
if metadata:
aggregate_metadata_add(context, aggregate.id, metadata)
# NOTE(pkholkin): '_metadata' attribute was updated during
# 'aggregate_metadata_add' method, so it should be expired and
# read from db
context.session.expire(aggregate, ['_metadata'])
aggregate._metadata
return aggregate
@main_context_manager.reader
def aggregate_get(context, aggregate_id):
query = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id)
aggregate = query.first()
if not aggregate:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
return aggregate
@main_context_manager.reader
def aggregate_get_by_host(context, host, key=None):
"""Return rows that match host (mandatory) and metadata key (optional).
:param host matches host, and is required.
:param key Matches metadata key, if not None.
"""
query = model_query(context, models.Aggregate)
query = query.options(joinedload('_hosts'))
query = query.options(joinedload('_metadata'))
query = query.join('_hosts')
query = query.filter(models.AggregateHost.host == host)
if key:
query = query.join("_metadata").filter(
models.AggregateMetadata.key == key)
return query.all()
@main_context_manager.reader
def aggregate_metadata_get_by_host(context, host, key=None):
query = model_query(context, models.Aggregate)
query = query.join("_hosts")
query = query.join("_metadata")
query = query.filter(models.AggregateHost.host == host)
query = query.options(contains_eager("_metadata"))
if key:
query = query.filter(models.AggregateMetadata.key == key)
rows = query.all()
metadata = collections.defaultdict(set)
for agg in rows:
for kv in agg._metadata:
metadata[kv['key']].add(kv['value'])
return dict(metadata)
@main_context_manager.reader
def aggregate_get_by_metadata_key(context, key):
"""Return rows that match metadata key.
:param key Matches metadata key.
"""
query = model_query(context, models.Aggregate)
query = query.join("_metadata")
query = query.filter(models.AggregateMetadata.key == key)
query = query.options(contains_eager("_metadata"))
query = query.options(joinedload("_hosts"))
return query.all()
@main_context_manager.writer
def aggregate_update(context, aggregate_id, values):
if "name" in values:
aggregate_by_name = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.name,
values['name'],
read_deleted='no').first())
if aggregate_by_name and aggregate_by_name.id != aggregate_id:
# there is another aggregate with the new name
raise exception.AggregateNameExists(aggregate_name=values['name'])
aggregate = (_aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id).first())
set_delete = True
if aggregate:
if "availability_zone" in values:
az = values.pop('availability_zone')
if 'metadata' not in values:
values['metadata'] = {'availability_zone': az}
set_delete = False
else:
values['metadata']['availability_zone'] = az
metadata = values.get('metadata')
if metadata is not None:
aggregate_metadata_add(context,
aggregate_id,
values.pop('metadata'),
set_delete=set_delete)
aggregate.update(values)
aggregate.save(context.session)
return aggregate_get(context, aggregate.id)
else:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
@main_context_manager.writer
def aggregate_delete(context, aggregate_id):
count = _aggregate_get_query(context,
models.Aggregate,
models.Aggregate.id,
aggregate_id).\
soft_delete()
if count == 0:
raise exception.AggregateNotFound(aggregate_id=aggregate_id)
# Delete Metadata
model_query(context, models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).\
soft_delete()
@main_context_manager.reader
def aggregate_get_all(context):
return _aggregate_get_query(context, models.Aggregate).all()
def _aggregate_metadata_get_query(context, aggregate_id, read_deleted="yes"):
return model_query(context,
models.AggregateMetadata,
read_deleted=read_deleted).\
filter_by(aggregate_id=aggregate_id)
def aggregate_host_get_by_metadata_key(context, key):
rows = aggregate_get_by_metadata_key(context, key)
metadata = collections.defaultdict(set)
for agg in rows:
for agghost in agg._hosts:
metadata[agghost.host].add(agg._metadata[0]['value'])
return dict(metadata)
@require_aggregate_exists
@main_context_manager.reader
def aggregate_metadata_get(context, aggregate_id):
rows = model_query(context,
models.AggregateMetadata).\
filter_by(aggregate_id=aggregate_id).all()
return {r['key']: r['value'] for r in rows}
@require_aggregate_exists
@main_context_manager.writer
def aggregate_metadata_delete(context, aggregate_id, key):
count = _aggregate_get_query(context,
models.AggregateMetadata,
models.AggregateMetadata.aggregate_id,
aggregate_id).\
filter_by(key=key).\
soft_delete()
if count == 0:
raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id,
metadata_key=key)
@require_aggregate_exists
@main_context_manager.writer
def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False,
max_retries=10):
all_keys = metadata.keys()
for attempt in range(max_retries):
try:
query = _aggregate_metadata_get_query(context, aggregate_id,
read_deleted='no')
if set_delete:
query.filter(~models.AggregateMetadata.key.in_(all_keys)).\
soft_delete(synchronize_session=False)
already_existing_keys = set()
if all_keys:
query = query.filter(
models.AggregateMetadata.key.in_(all_keys))
for meta_ref in query.all():
key = meta_ref.key
meta_ref.update({"value": metadata[key]})
already_existing_keys.add(key)
new_entries = []
for key, value in metadata.items():
if key in already_existing_keys:
continue
new_entries.append({"key": key,
"value": value,
"aggregate_id": aggregate_id})
if new_entries:
context.session.execute(
models.AggregateMetadata.__table__.insert(),
new_entries)
return metadata
except db_exc.DBDuplicateEntry:
# a concurrent transaction has been committed,
# try again unless this was the last attempt
with excutils.save_and_reraise_exception() as ctxt:
if attempt < max_retries - 1:
ctxt.reraise = False
else:
msg = _("Add metadata failed for aggregate %(id)s after "
"%(retries)s retries") % {"id": aggregate_id,
"retries": max_retries}
LOG.warning(msg)
@require_aggregate_exists
@main_context_manager.reader
def aggregate_host_get_all(context, aggregate_id):
rows = model_query(context,
models.AggregateHost).\
filter_by(aggregate_id=aggregate_id).all()
return [r.host for r in rows]
@require_aggregate_exists
@main_context_manager.writer
def aggregate_host_delete(context, aggregate_id, host):
count = _aggregate_get_query(context,
models.AggregateHost,
models.AggregateHost.aggregate_id,
aggregate_id).\
filter_by(host=host).\
soft_delete()
if count == 0:
raise exception.AggregateHostNotFound(aggregate_id=aggregate_id,
host=host)
@require_aggregate_exists
@main_context_manager.writer
def aggregate_host_add(context, aggregate_id, host):
host_ref = models.AggregateHost()
host_ref.update({"host": host, "aggregate_id": aggregate_id})
try:
host_ref.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.AggregateHostExists(host=host,
aggregate_id=aggregate_id)
return host_ref
################
@pick_context_manager_writer
def instance_fault_create(context, values):
"""Create a new InstanceFault."""
fault_ref = models.InstanceFault()
fault_ref.update(values)
fault_ref.save(context.session)
return dict(fault_ref)
@pick_context_manager_reader
def instance_fault_get_by_instance_uuids(context, instance_uuids):
"""Get all instance faults for the provided instance_uuids."""
if not instance_uuids:
return {}
rows = model_query(context, models.InstanceFault, read_deleted='no').\
filter(models.InstanceFault.instance_uuid.in_(
instance_uuids)).\
order_by(desc("created_at"), desc("id")).\
all()
output = {}
for instance_uuid in instance_uuids:
output[instance_uuid] = []
for row in rows:
data = dict(row)
output[row['instance_uuid']].append(data)
return output
##################
@pick_context_manager_writer
def action_start(context, values):
convert_objects_related_datetimes(values, 'start_time')
action_ref = models.InstanceAction()
action_ref.update(values)
action_ref.save(context.session)
return action_ref
@pick_context_manager_writer
def action_finish(context, values):
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
query = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=values['instance_uuid']).\
filter_by(request_id=values['request_id'])
if query.update(values) != 1:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
return query.one()
@pick_context_manager_reader
def actions_get(context, instance_uuid):
"""Get all instance actions for the provided uuid."""
actions = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
order_by(desc("created_at"), desc("id")).\
all()
return actions
@pick_context_manager_reader
def action_get_by_request_id(context, instance_uuid, request_id):
"""Get the action by request_id and given instance."""
action = _action_get_by_request_id(context, instance_uuid, request_id)
return action
def _action_get_by_request_id(context, instance_uuid, request_id):
result = model_query(context, models.InstanceAction).\
filter_by(instance_uuid=instance_uuid).\
filter_by(request_id=request_id).\
first()
return result
def _action_get_last_created_by_instance_uuid(context, instance_uuid):
result = (model_query(context, models.InstanceAction).
filter_by(instance_uuid=instance_uuid).
order_by(desc("created_at"), desc("id")).
first())
return result
@pick_context_manager_writer
def action_event_start(context, values):
"""Start an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time')
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'])
# When nova-compute restarts, the context is generated again in
# init_host workflow, the request_id was different with the request_id
# recorded in InstanceAction, so we can't get the original record
# according to request_id. Try to get the last created action so that
# init_instance can continue to finish the recovery action, like:
# powering_off, unpausing, and so on.
if not action and not context.project_id:
action = _action_get_last_created_by_instance_uuid(
context, values['instance_uuid'])
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
values['action_id'] = action['id']
event_ref = models.InstanceActionEvent()
event_ref.update(values)
context.session.add(event_ref)
return event_ref
@pick_context_manager_writer
def action_event_finish(context, values):
"""Finish an event on an instance action."""
convert_objects_related_datetimes(values, 'start_time', 'finish_time')
action = _action_get_by_request_id(context, values['instance_uuid'],
values['request_id'])
# When nova-compute restarts, the context is generated again in
# init_host workflow, the request_id was different with the request_id
# recorded in InstanceAction, so we can't get the original record
# according to request_id. Try to get the last created action so that
# init_instance can continue to finish the recovery action, like:
# powering_off, unpausing, and so on.
if not action and not context.project_id:
action = _action_get_last_created_by_instance_uuid(
context, values['instance_uuid'])
if not action:
raise exception.InstanceActionNotFound(
request_id=values['request_id'],
instance_uuid=values['instance_uuid'])
event_ref = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action['id']).\
filter_by(event=values['event']).\
first()
if not event_ref:
raise exception.InstanceActionEventNotFound(action_id=action['id'],
event=values['event'])
event_ref.update(values)
if values['result'].lower() == 'error':
action.update({'message': 'Error'})
return event_ref
@pick_context_manager_reader
def action_events_get(context, action_id):
events = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
order_by(desc("created_at"), desc("id")).\
all()
return events
@pick_context_manager_reader
def action_event_get_by_id(context, action_id, event_id):
event = model_query(context, models.InstanceActionEvent).\
filter_by(action_id=action_id).\
filter_by(id=event_id).\
first()
return event
##################
@require_context
@pick_context_manager_writer
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatible instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
ec2_instance_ref.update({'id': id})
ec2_instance_ref.save(context.session)
return ec2_instance_ref
@require_context
@pick_context_manager_reader
def ec2_instance_get_by_uuid(context, instance_uuid):
result = _ec2_instance_get_query(context).\
filter_by(uuid=instance_uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return result
@require_context
@pick_context_manager_reader
def ec2_instance_get_by_id(context, instance_id):
result = _ec2_instance_get_query(context).\
filter_by(id=instance_id).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_id)
return result
@require_context
@pick_context_manager_reader
def get_instance_uuid_by_ec2_id(context, ec2_id):
result = ec2_instance_get_by_id(context, ec2_id)
return result['uuid']
def _ec2_instance_get_query(context):
return model_query(context, models.InstanceIdMapping, read_deleted='yes')
##################
def _task_log_get_query(context, task_name, period_beginning,
period_ending, host=None, state=None):
values = {'period_beginning': period_beginning,
'period_ending': period_ending}
values = convert_objects_related_datetimes(values, *values.keys())
query = model_query(context, models.TaskLog).\
filter_by(task_name=task_name).\
filter_by(period_beginning=values['period_beginning']).\
filter_by(period_ending=values['period_ending'])
if host is not None:
query = query.filter_by(host=host)
if state is not None:
query = query.filter_by(state=state)
return query
@pick_context_manager_reader
def task_log_get(context, task_name, period_beginning, period_ending, host,
state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).first()
@pick_context_manager_reader
def task_log_get_all(context, task_name, period_beginning, period_ending,
host=None, state=None):
return _task_log_get_query(context, task_name, period_beginning,
period_ending, host, state).all()
@pick_context_manager_writer
def task_log_begin_task(context, task_name, period_beginning, period_ending,
host, task_items=None, message=None):
values = {'period_beginning': period_beginning,
'period_ending': period_ending}
values = convert_objects_related_datetimes(values, *values.keys())
task = models.TaskLog()
task.task_name = task_name
task.period_beginning = values['period_beginning']
task.period_ending = values['period_ending']
task.host = host
task.state = "RUNNING"
if message:
task.message = message
if task_items:
task.task_items = task_items
try:
task.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
@pick_context_manager_writer
def task_log_end_task(context, task_name, period_beginning, period_ending,
host, errors, message=None):
values = dict(state="DONE", errors=errors)
if message:
values["message"] = message
rows = _task_log_get_query(context, task_name, period_beginning,
period_ending, host).update(values)
if rows == 0:
# It's not running!
raise exception.TaskNotRunning(task_name=task_name, host=host)
##################
def _archive_if_instance_deleted(table, shadow_table, instances, conn,
max_rows):
"""Look for records that pertain to deleted instances, but may not be
deleted themselves. This catches cases where we delete an instance,
but leave some residue because of a failure in a cleanup path or
similar.
Logic is: if I have a column called instance_uuid, and that instance
is deleted, then I can be deleted.
"""
# NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils
# imports nova.db.sqlalchemy.api.
from nova.db.sqlalchemy import utils as db_utils
query_insert = shadow_table.insert(inline=True).\
from_select(
[c.name for c in table.c],
sql.select(
[table],
and_(instances.c.deleted != instances.c.deleted.default.arg,
instances.c.uuid == table.c.instance_uuid)).
order_by(table.c.id).limit(max_rows))
query_delete = sql.select(
[table.c.id],
and_(instances.c.deleted != instances.c.deleted.default.arg,
instances.c.uuid == table.c.instance_uuid)).\
order_by(table.c.id).limit(max_rows)
delete_statement = db_utils.DeleteFromSelect(table, query_delete,
table.c.id)
try:
with conn.begin():
conn.execute(query_insert)
result_delete = conn.execute(delete_statement)
return result_delete.rowcount
except db_exc.DBReferenceError as ex:
LOG.warning(_LW('Failed to archive %(table)s: %(error)s'),
{'table': table.__tablename__,
'error': six.text_type(ex)})
return 0
def _archive_deleted_rows_for_table(tablename, max_rows):
"""Move up to max_rows rows from one tables to the corresponding
shadow table.
:returns: number of rows archived
"""
# NOTE(guochbo): There is a circular import, nova.db.sqlalchemy.utils
# imports nova.db.sqlalchemy.api.
from nova.db.sqlalchemy import utils as db_utils
engine = get_engine()
conn = engine.connect()
metadata = MetaData()
metadata.bind = engine
# NOTE(tdurakov): table metadata should be received
# from models, not db tables. Default value specified by SoftDeleteMixin
# is known only by models, not DB layer.
# IMPORTANT: please do not change source of metadata information for table.
table = models.BASE.metadata.tables[tablename]
shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
rows_archived = 0
try:
shadow_table = Table(shadow_tablename, metadata, autoload=True)
except NoSuchTableError:
# No corresponding shadow table; skip it.
return rows_archived
if tablename == "dns_domains":
# We have one table (dns_domains) where the key is called
# "domain" rather than "id"
column = table.c.domain
else:
column = table.c.id
# NOTE(guochbo): Use DeleteFromSelect to avoid
# database's limit of maximum parameter in one SQL statement.
deleted_column = table.c.deleted
columns = [c.name for c in table.c]
# NOTE(clecomte): Tables instance_actions and instances_actions_events
# have to be manage differently so we soft-delete them here to let
# the archive work the same for all tables
if tablename == "instance_actions":
instances = models.BASE.metadata.tables["instances"]
deleted_instances = sql.select([instances.c.uuid]).\
where(instances.c.deleted != instances.c.deleted.default.arg)
update_statement = table.update().values(deleted=table.c.id).\
where(table.c.instance_uuid.in_(deleted_instances))
conn.execute(update_statement)
elif tablename == "instance_actions_events":
# NOTE(clecomte): we have to grab all the relation from
# instances because instance_actions_events rely on
# action_id and not uuid
instances = models.BASE.metadata.tables["instances"]
instance_actions = models.BASE.metadata.tables["instance_actions"]
deleted_instances = sql.select([instances.c.uuid]).\
where(instances.c.deleted != instances.c.deleted.default.arg)
deleted_actions = sql.select([instance_actions.c.id]).\
where(instance_actions.c.instance_uuid.in_(deleted_instances))
update_statement = table.update().values(deleted=table.c.id).\
where(table.c.action_id.in_(deleted_actions))
conn.execute(update_statement)
insert = shadow_table.insert(inline=True).\
from_select(columns,
sql.select([table],
deleted_column != deleted_column.default.arg).
order_by(column).limit(max_rows))
query_delete = sql.select([column],
deleted_column != deleted_column.default.arg).\
order_by(column).limit(max_rows)
delete_statement = db_utils.DeleteFromSelect(table, query_delete, column)
try:
# Group the insert and delete in a transaction.
with conn.begin():
conn.execute(insert)
result_delete = conn.execute(delete_statement)
rows_archived = result_delete.rowcount
except db_exc.DBReferenceError as ex:
# A foreign key constraint keeps us from deleting some of
# these rows until we clean up a dependent table. Just
# skip this table for now; we'll come back to it later.
LOG.warning(_LW("IntegrityError detected when archiving table "
"%(tablename)s: %(error)s"),
{'tablename': tablename, 'error': six.text_type(ex)})
if ((max_rows is None or rows_archived < max_rows)
and 'instance_uuid' in columns):
instances = models.BASE.metadata.tables['instances']
limit = max_rows - rows_archived if max_rows is not None else None
extra = _archive_if_instance_deleted(table, shadow_table, instances,
conn, limit)
rows_archived += extra
return rows_archived
def archive_deleted_rows(max_rows=None):
"""Move up to max_rows rows from production tables to the corresponding
shadow tables.
:returns: dict that maps table name to number of rows archived from that
table, for example:
::
{
'instances': 5,
'block_device_mapping': 5,
'pci_devices': 2,
}
"""
table_to_rows_archived = {}
total_rows_archived = 0
meta = MetaData(get_engine(use_slave=True))
meta.reflect()
# Reverse sort the tables so we get the leaf nodes first for processing.
for table in reversed(meta.sorted_tables):
tablename = table.name
# skip the special sqlalchemy-migrate migrate_version table and any
# shadow tables
if (tablename == 'migrate_version' or
tablename.startswith(_SHADOW_TABLE_PREFIX)):
continue
rows_archived = _archive_deleted_rows_for_table(
tablename, max_rows=max_rows - total_rows_archived)
total_rows_archived += rows_archived
# Only report results for tables that had updates.
if rows_archived:
table_to_rows_archived[tablename] = rows_archived
if total_rows_archived >= max_rows:
break
return table_to_rows_archived
@main_context_manager.writer
def pcidevice_online_data_migration(context, max_count):
from nova.objects import pci_device as pci_dev_obj
count_all = 0
count_hit = 0
if not pci_dev_obj.PciDevice.should_migrate_data():
LOG.error(_LE("Data migrations for PciDevice are not safe, likely "
"because not all services that access the DB directly "
"are updated to the latest version"))
else:
results = model_query(context, models.PciDevice).filter_by(
parent_addr=None).limit(max_count)
for db_dict in results:
count_all += 1
pci_dev = pci_dev_obj.PciDevice._from_db_object(
context, pci_dev_obj.PciDevice(), db_dict)
if pci_dev.obj_what_changed():
pci_dev.save()
count_hit += 1
return count_all, count_hit
@main_context_manager.writer
def aggregate_uuids_online_data_migration(context, max_count):
from nova.objects import aggregate
count_all = 0
count_hit = 0
results = model_query(context, models.Aggregate).filter_by(
uuid=None).limit(max_count)
for db_agg in results:
count_all += 1
agg = aggregate.Aggregate._from_db_object(context,
aggregate.Aggregate(),
db_agg)
if 'uuid' in agg:
count_hit += 1
return count_all, count_hit
@main_context_manager.writer
def computenode_uuids_online_data_migration(context, max_count):
from nova.objects import compute_node
count_all = 0
count_hit = 0
results = model_query(context, models.ComputeNode).filter_by(
uuid=None).limit(max_count)
for db_cn in results:
count_all += 1
cn = compute_node.ComputeNode._from_db_object(
context, compute_node.ComputeNode(), db_cn)
if 'uuid' in cn:
count_hit += 1
return count_all, count_hit
####################
def _instance_group_get_query(context, model_class, id_field=None, id=None,
read_deleted=None):
columns_to_join = {models.InstanceGroup: ['_policies', '_members']}
query = model_query(context, model_class, read_deleted=read_deleted,
project_only=True)
for c in columns_to_join.get(model_class, []):
query = query.options(joinedload(c))
if id and id_field:
query = query.filter(id_field == id)
return query
@main_context_manager.writer
def instance_group_create(context, values, policies=None, members=None):
"""Create a new group."""
uuid = values.get('uuid', None)
if uuid is None:
uuid = uuidutils.generate_uuid()
values['uuid'] = uuid
try:
group = models.InstanceGroup()
group.update(values)
group.save(context.session)
except db_exc.DBDuplicateEntry:
raise exception.InstanceGroupIdExists(group_uuid=uuid)
# We don't want '_policies' and '_members' attributes to be lazy loaded
# later. We know there is nothing here since we just created this
# instance group.
if policies:
_instance_group_policies_add(context, group.id, policies)
else:
group._policies = []
if members:
_instance_group_members_add(context, group.id, members)
else:
group._members = []
return instance_group_get(context, uuid)
@main_context_manager.reader
def instance_group_get(context, group_uuid):
"""Get a specific group by uuid."""
group = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return group
@main_context_manager.reader
def instance_group_get_by_instance(context, instance_uuid):
group_member = model_query(context, models.InstanceGroupMember).\
filter_by(instance_id=instance_uuid).\
first()
if not group_member:
raise exception.InstanceGroupNotFound(group_uuid='')
group = _instance_group_get_query(context, models.InstanceGroup,
models.InstanceGroup.id,
group_member.group_id).first()
if not group:
raise exception.InstanceGroupNotFound(
group_uuid=group_member.group_id)
return group
@main_context_manager.writer
def instance_group_update(context, group_uuid, values):
"""Update the attributes of a group.
If values contains a metadata key, it updates the aggregate metadata
too. Similarly for the policies and members.
"""
group = model_query(context, models.InstanceGroup).\
filter_by(uuid=group_uuid).\
first()
if not group:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
policies = values.get('policies')
if policies is not None:
_instance_group_policies_add(context,
group.id,
values.pop('policies'),
set_delete=True)
members = values.get('members')
if members is not None:
_instance_group_members_add(context,
group.id,
values.pop('members'),
set_delete=True)
group.update(values)
if policies:
values['policies'] = policies
if members:
values['members'] = members
@main_context_manager.writer
def instance_group_delete(context, group_uuid):
"""Delete a group."""
group_id = _instance_group_id(context, group_uuid)
count = _instance_group_get_query(context,
models.InstanceGroup,
models.InstanceGroup.uuid,
group_uuid).soft_delete()
if count == 0:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
# Delete policies, metadata and members
instance_models = [models.InstanceGroupPolicy,
models.InstanceGroupMember]
for model in instance_models:
model_query(context, model).filter_by(group_id=group_id).soft_delete()
@main_context_manager.reader
def instance_group_get_all(context):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).all()
@main_context_manager.reader
def instance_group_get_all_by_project_id(context, project_id):
"""Get all groups."""
return _instance_group_get_query(context, models.InstanceGroup).\
filter_by(project_id=project_id).\
all()
def _instance_group_count_by_project_and_user(context, project_id, user_id):
return model_query(context, models.InstanceGroup, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(user_id=user_id).\
count()
def _instance_group_model_get_query(context, model_class, group_id,
read_deleted='no'):
return model_query(context,
model_class,
read_deleted=read_deleted).\
filter_by(group_id=group_id)
def _instance_group_id(context, group_uuid):
"""Returns the group database ID for the group UUID."""
result = model_query(context,
models.InstanceGroup,
(models.InstanceGroup.id,)).\
filter_by(uuid=group_uuid).\
first()
if not result:
raise exception.InstanceGroupNotFound(group_uuid=group_uuid)
return result.id
def _instance_group_members_add(context, id, members, set_delete=False):
all_members = set(members)
query = _instance_group_model_get_query(context,
models.InstanceGroupMember, id)
if set_delete:
query.filter(~models.InstanceGroupMember.instance_id.in_(
all_members)).\
soft_delete(synchronize_session=False)
query = query.filter(
models.InstanceGroupMember.instance_id.in_(all_members))
already_existing = set()
for member_ref in query.all():
already_existing.add(member_ref.instance_id)
for instance_id in members:
if instance_id in already_existing:
continue
member_ref = models.InstanceGroupMember()
member_ref.update({'instance_id': instance_id,
'group_id': id})
context.session.add(member_ref)
return members
@main_context_manager.writer
def instance_group_members_add(context, group_uuid, members,
set_delete=False):
id = _instance_group_id(context, group_uuid)
return _instance_group_members_add(context, id, members,
set_delete=set_delete)
@main_context_manager.writer
def instance_group_member_delete(context, group_uuid, instance_id):
id = _instance_group_id(context, group_uuid)
count = _instance_group_model_get_query(context,
models.InstanceGroupMember,
id).\
filter_by(instance_id=instance_id).\
soft_delete()
if count == 0:
raise exception.InstanceGroupMemberNotFound(group_uuid=group_uuid,
instance_id=instance_id)
@main_context_manager.reader
def instance_group_members_get(context, group_uuid):
id = _instance_group_id(context, group_uuid)
instances = model_query(context,
models.InstanceGroupMember,
(models.InstanceGroupMember.instance_id,)).\
filter_by(group_id=id).all()
return [instance[0] for instance in instances]
def _instance_group_policies_add(context, id, policies, set_delete=False):
allpols = set(policies)
query = _instance_group_model_get_query(context,
models.InstanceGroupPolicy, id)
if set_delete:
query.filter(~models.InstanceGroupPolicy.policy.in_(allpols)).\
soft_delete(synchronize_session=False)
query = query.filter(models.InstanceGroupPolicy.policy.in_(allpols))
already_existing = set()
for policy_ref in query.all():
already_existing.add(policy_ref.policy)
for policy in policies:
if policy in already_existing:
continue
policy_ref = models.InstanceGroupPolicy()
policy_ref.update({'policy': policy,
'group_id': id})
context.session.add(policy_ref)
return policies
####################
@pick_context_manager_reader
def pci_device_get_by_addr(context, node_id, dev_addr):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=dev_addr).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
return pci_dev_ref
@pick_context_manager_reader
def pci_device_get_by_id(context, id):
pci_dev_ref = model_query(context, models.PciDevice).\
filter_by(id=id).\
first()
if not pci_dev_ref:
raise exception.PciDeviceNotFoundById(id=id)
return pci_dev_ref
@pick_context_manager_reader
def pci_device_get_all_by_node(context, node_id):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
all()
@pick_context_manager_reader
def pci_device_get_all_by_parent_addr(context, node_id, parent_addr):
return model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(parent_addr=parent_addr).\
all()
@require_context
@pick_context_manager_reader
def pci_device_get_all_by_instance_uuid(context, instance_uuid):
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter_by(instance_uuid=instance_uuid).\
all()
@pick_context_manager_reader
def _instance_pcidevs_get_multi(context, instance_uuids):
if not instance_uuids:
return []
return model_query(context, models.PciDevice).\
filter_by(status='allocated').\
filter(models.PciDevice.instance_uuid.in_(instance_uuids))
@pick_context_manager_writer
def pci_device_destroy(context, node_id, address):
result = model_query(context, models.PciDevice).\
filter_by(compute_node_id=node_id).\
filter_by(address=address).\
soft_delete()
if not result:
raise exception.PciDeviceNotFound(node_id=node_id, address=address)
@pick_context_manager_writer
def pci_device_update(context, node_id, address, values):
query = model_query(context, models.PciDevice, read_deleted="no").\
filter_by(compute_node_id=node_id).\
filter_by(address=address)
if query.update(values) == 0:
device = models.PciDevice()
device.update(values)
context.session.add(device)
return query.one()
####################
@pick_context_manager_writer
def instance_tag_add(context, instance_uuid, tag):
tag_ref = models.Tag()
tag_ref.resource_id = instance_uuid
tag_ref.tag = tag
try:
_check_instance_exists_in_project(context, instance_uuid)
with get_context_manager(context).writer.savepoint.using(context):
context.session.add(tag_ref)
except db_exc.DBDuplicateEntry:
# NOTE(snikitin): We should ignore tags duplicates
pass
return tag_ref
@pick_context_manager_writer
def instance_tag_set(context, instance_uuid, tags):
_check_instance_exists_in_project(context, instance_uuid)
existing = context.session.query(models.Tag.tag).filter_by(
resource_id=instance_uuid).all()
existing = set(row.tag for row in existing)
tags = set(tags)
to_delete = existing - tags
to_add = tags - existing
if to_delete:
context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).filter(
models.Tag.tag.in_(to_delete)).delete(
synchronize_session=False)
if to_add:
data = [
{'resource_id': instance_uuid, 'tag': tag} for tag in to_add]
context.session.execute(models.Tag.__table__.insert(), data)
return context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).all()
@pick_context_manager_reader
def instance_tag_get_by_instance_uuid(context, instance_uuid):
_check_instance_exists_in_project(context, instance_uuid)
return context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).all()
@pick_context_manager_writer
def instance_tag_delete(context, instance_uuid, tag):
_check_instance_exists_in_project(context, instance_uuid)
result = context.session.query(models.Tag).filter_by(
resource_id=instance_uuid, tag=tag).delete()
if not result:
raise exception.InstanceTagNotFound(instance_id=instance_uuid,
tag=tag)
@pick_context_manager_writer
def instance_tag_delete_all(context, instance_uuid):
_check_instance_exists_in_project(context, instance_uuid)
context.session.query(models.Tag).filter_by(
resource_id=instance_uuid).delete()
@main_context_manager.reader
def instance_tag_exists(context, instance_uuid, tag):
_check_instance_exists_in_project(context, instance_uuid)
q = context.session.query(models.Tag).filter_by(
resource_id=instance_uuid, tag=tag)
return context.session.query(q.exists()).scalar()
| apache-2.0 |
tkdchen/Nitrate | src/tcms/core/views/search.py | 2 | 1377 | # -*- coding: utf-8 -*-
import re
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.views.decorators.http import require_GET
from django.http import Http404
from tcms.testplans.models import TestPlan
from tcms.testcases.models import TestCase
from tcms.testruns.models import TestRun
@require_GET
def search(request):
"""
Redirect to correct url of the search content
"""
models = {"plans": TestPlan, "cases": TestCase, "runs": TestRun}
search_content = request.GET.get("search_content")
search_type = request.GET.get("search_type")
if not search_content or not search_type:
raise Http404
if search_type not in models:
raise Http404
try_to_get_object = re.match(r"^\d+$", search_content) is not None
model = models[search_type]
if try_to_get_object:
pk = int(search_content)
objects = model.objects.filter(pk=pk).only("pk")
if objects:
return HttpResponseRedirect(
reverse(
"{}-get".format(model._meta.app_label.strip("s").replace("test", "")),
args=[pk],
)
)
url = "{}?a=search&search={}".format(
reverse("{}-all".format(model._meta.app_label.replace("test", ""))),
search_content,
)
return HttpResponseRedirect(url)
| gpl-2.0 |
michhar/flask-webapp-aml | env/Lib/site-packages/werkzeug/test.py | 36 | 34131 | # -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from io import BytesIO
try:
from urllib2 import Request as U2Request
except ImportError:
from urllib.request import Request as U2Request
try:
from http.cookiejar import CookieJar
except ImportError: # Py2
from cookielib import CookieJar
from werkzeug._compat import iterlists, iteritems, itervalues, to_bytes, \
string_types, text_type, reraise, wsgi_encoding_dance, \
make_literal_wrapper
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, url_unquote, \
url_unparse, url_parse
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [BytesIO(), 0, False]
if use_tempfile:
def write_binary(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write_binary = _closure[0].write
def write(string):
write_binary(string.encode(charset))
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in iterlists(values):
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write_binary(chunk)
else:
if not isinstance(value, string_types):
value = str(value)
value = to_bytes(value, charset)
write('\r\n\r\n')
write_binary(value)
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
def get_all(self, name, default=None):
rv = []
for k, v in self.headers:
if k.lower() == name.lower():
rv.append(v)
return rv or default or []
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in iterlists(data):
for value in values:
yield key, value
else:
for key, values in iteritems(data):
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
path_s = make_literal_wrapper(path)
if query_string is None and path_s('?') in path:
path, query_string = path.split(path_s('?'), 1)
self.charset = charset
self.path = iri_to_uri(path)
if base_url is not None:
base_url = url_fix(iri_to_uri(base_url, charset), charset)
self.base_url = base_url
if isinstance(query_string, (bytes, text_type)):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
if content_type is not None:
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, text_type):
data = data.encode(self.charset)
if isinstance(data, bytes):
self.input_stream = BytesIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return url_unparse((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = url_parse(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self._files:
return 'multipart/form-data'
elif self._form:
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc=doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
try:
self.close()
except Exception:
pass
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = itervalues(self.files)
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
# XXX: py2v3 review
values = url_encode(self.form, charset=self.charset)
values = values.encode('ascii')
content_length = len(values)
input_stream = BytesIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset)
qs = wsgi_encoding_dance(self.query_string)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': qs,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_wsgi_list():
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def run_wsgi_app(self, environ, buffered=False):
"""Runs the wrapped WSGI app with the given environment."""
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
return rv
def resolve_redirect(self, response, new_location, environ, buffered=False):
"""Resolves a single redirect and triggers the request again
directly on this redirect client.
"""
scheme, netloc, script_root, qs, anchor = url_parse(new_location)
base_url = url_unparse((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).rsplit(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
status_code = int(response[1].split(None, 1)[0])
if status_code == 307:
method = environ['REQUEST_METHOD']
else:
method = 'GET'
# For redirect handling we temporarily disable the response
# wrapper. This is not threadsafe but not a real concern
# since the test client must not be shared anyways.
old_response_wrapper = self.response_wrapper
self.response_wrapper = None
try:
return self.open(path=script_root, base_url=base_url,
query_string=qs, as_tuple=True,
buffered=buffered, method=method)
finally:
self.response_wrapper = old_response_wrapper
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
response = self.run_wsgi_app(environ, buffered=buffered)
# handle redirects
redirect_chain = []
while 1:
status_code = int(response[1].split(None, 1)[0])
if status_code not in (301, 302, 303, 305, 307) \
or not follow_redirects:
break
new_location = response[2]['location']
new_redirect_entry = (new_location, status_code)
if new_redirect_entry in redirect_chain:
raise ClientRedirectError('loop detected')
redirect_chain.append(new_redirect_entry)
environ, response = self.resolve_redirect(response, new_location,
environ,
buffered=buffered)
if self.response_wrapper is not None:
response = self.response_wrapper(*response)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def options(self, *args, **kw):
"""Like open but method is enforced to OPTIONS."""
kw['method'] = 'OPTIONS'
return self.open(*args, **kw)
def trace(self, *args, **kw):
"""Like open but method is enforced to TRACE."""
kw['method'] = 'TRACE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
reraise(*exc_info)
response[:] = [status, headers]
return buffer.append
app_rv = app(environ, start_response)
close_func = getattr(app_rv, 'close', None)
app_iter = iter(app_rv)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have a response, chain
# the already received data with the already collected data and wrap it in
# a new `ClosingIterator` if we need to restore a `close` callable from the
# original return value.
else:
while not response:
buffer.append(next(app_iter))
if buffer:
app_iter = chain(buffer, app_iter)
if close_func is not None and app_iter is not app_rv:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], Headers(response[1])
| mit |
LIS/lis-tempest | setup.py | 334 | 1028 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=1.8'],
pbr=True)
| apache-2.0 |
ojengwa/migrate | ibu/backends/messages.py | 2 | 2183 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Levels
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
class CheckMessage(object):
def __init__(self, level, msg, hint=None, obj=None, id=None):
assert isinstance(level, int), "The first argument should be level."
self.level = level
self.msg = msg
self.hint = hint
self.obj = obj
self.id = id
def __eq__(self, other):
return all(getattr(self, attr) == getattr(other, attr)
for attr in ['level', 'msg', 'hint', 'obj', 'id'])
def __ne__(self, other):
return not (self == other)
def __str__(self):
from django.db import models
if self.obj is None:
obj = "?"
elif isinstance(self.obj, models.base.ModelBase):
# We need to hardcode ModelBase and Field cases because its __str__
# method doesn't return "applabel.modellabel" and cannot be
# changed.
obj = self.obj._meta.label
else:
obj = self.obj
id = "(%s) " % self.id if self.id else ""
hint = "\n\tHINT: %s" % self.hint if self.hint else ''
return "%s: %s%s%s" % (obj, id, self.msg, hint)
def __repr__(self):
return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % \
(self.__class__.__name__, self.level,
self.msg, self.hint, self.obj, self.id)
def is_serious(self, level=ERROR):
return self.level >= level
class Debug(CheckMessage):
def __init__(self, *args, **kwargs):
super(Debug, self).__init__(DEBUG, *args, **kwargs)
class Info(CheckMessage):
def __init__(self, *args, **kwargs):
super(Info, self).__init__(INFO, *args, **kwargs)
class Warning(CheckMessage):
def __init__(self, *args, **kwargs):
super(Warning, self).__init__(WARNING, *args, **kwargs)
class Error(CheckMessage):
def __init__(self, *args, **kwargs):
super(Error, self).__init__(ERROR, *args, **kwargs)
class Critical(CheckMessage):
def __init__(self, *args, **kwargs):
super(Critical, self).__init__(CRITICAL, *args, **kwargs)
| mit |
MAPC/cedac | cedac/wsgi.py | 1 | 1132 | """
WSGI config for cedac project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cedac.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause |
firmlyjin/brython | www/tests/unittests/test/make_ssl_certs.py | 89 | 1886 | """Make the custom certificate and private key files used by test_ssl
and friends."""
import os
import sys
import tempfile
from subprocess import *
req_template = """
[req]
distinguished_name = req_distinguished_name
x509_extensions = req_x509_extensions
prompt = no
[req_distinguished_name]
C = XY
L = Castle Anthrax
O = Python Software Foundation
CN = {hostname}
[req_x509_extensions]
subjectAltName = DNS:{hostname}
"""
here = os.path.abspath(os.path.dirname(__file__))
def make_cert_key(hostname):
tempnames = []
for i in range(3):
with tempfile.NamedTemporaryFile(delete=False) as f:
tempnames.append(f.name)
req_file, cert_file, key_file = tempnames
try:
with open(req_file, 'w') as f:
f.write(req_template.format(hostname=hostname))
args = ['req', '-new', '-days', '3650', '-nodes', '-x509',
'-newkey', 'rsa:1024', '-keyout', key_file,
'-out', cert_file, '-config', req_file]
check_call(['openssl'] + args)
with open(cert_file, 'r') as f:
cert = f.read()
with open(key_file, 'r') as f:
key = f.read()
return cert, key
finally:
for name in tempnames:
os.remove(name)
if __name__ == '__main__':
os.chdir(here)
cert, key = make_cert_key('localhost')
with open('ssl_cert.pem', 'w') as f:
f.write(cert)
with open('ssl_key.pem', 'w') as f:
f.write(key)
with open('keycert.pem', 'w') as f:
f.write(key)
f.write(cert)
# For certificate matching tests
cert, key = make_cert_key('fakehostname')
with open('keycert2.pem', 'w') as f:
f.write(key)
f.write(cert)
| bsd-3-clause |
oaastest/azure-linux-extensions | OSPatching/test/check.py | 8 | 1497 | #!/usr/bin/python
#
# OSPatching extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.4+
import os
import sys
import datetime
def main():
intervalOfWeeks = int(sys.argv[1])
if intervalOfWeeks == 1:
sys.exit(0)
history_scheduled = os.path.join(os.getcwd(), 'scheduled/history')
today = datetime.date.today()
today_dayOfWeek = today.strftime("%a")
last_scheduled_date = None
with open(history_scheduled) as f:
lines = f.readlines()
lines.reverse()
for line in lines:
line = line.strip()
if line.endswith(today_dayOfWeek):
last_scheduled_date = datetime.datetime.strptime(line, '%Y-%m-%d %a')
break
if last_scheduled_date is not None and last_scheduled_date.date() + datetime.timedelta(days=intervalOfWeeks*7) > today:
sys.exit(1)
else:
sys.exit(0)
if __name__ == '__main__':
main()
| apache-2.0 |
hahalml/newfies-dialer | newfies/dialer_gateway/models.py | 7 | 5615 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2012 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <[email protected]>
#
from django.db import models
from django.utils.translation import ugettext_lazy as _
from common.intermediate_model_base_class import Model
GATEWAY_STATUS = (
(1, _('ACTIVE')),
(0, _('INACTIVE')),
)
GATEWAY_PROTOCOL = (
('SIP', _('SIP')),
('LOCAL', _('LOCAL')),
('GSM', _('GSM')),
('SKINNY', _('SKINNY')),
('JINGLE', _('JINGLE')),
)
"""
class GatewayGroup(Model):
name = models.CharField(max_length=90)
description = models.TextField(null=True, blank=True,
help_text=_("Short description \
about the Gateway Group"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name='Date')
updated_date = models.DateTimeField(auto_now=True)
class Meta:
db_table = u'dialer_gateway_group'
verbose_name = _("Dialer Gateway Group")
verbose_name_plural = _("Dialer Gateway Groups")
def __unicode__(self):
return u"%s" % self.name
"""
class Gateway(Model):
"""This defines the trunk to deliver the Voip Calls.
Each of the Gateways are routes that support different protocols and
sets of rules to alter the dialed number.
**Attributes**:
* ``name`` - Gateway name.
* ``description`` - Description about the Gateway.
* ``addprefix`` - Add prefix.
* ``removeprefix`` - Remove prefix.
* ``gateways`` - "user/,user/", # Gateway string to try dialing \
separated by comma. First in the list will be tried first
* ``gateway_codecs`` - "'PCMA,PCMU','PCMA,PCMU'", \
# Codec string as needed by FS for each gateway separated by comma
* ``gateway_timeouts`` - "10,10", \
# Seconds to timeout in string for each gateway separated by comma
* ``gateway_retries`` - "2,1", \
# Retry String for Gateways separated by comma, \
on how many times each gateway should be retried
* ``originate_dial_string`` - originate_dial_string
* ``secondused`` -
* ``failover`` -
* ``addparameter`` -
* ``count_call`` -
* ``count_in_use`` -
* ``maximum_call`` -
* ``status`` - Gateway status
**Name of DB table**: dialer_gateway
"""
name = models.CharField(unique=True, max_length=255, verbose_name=_('Name'),
help_text=_("Gateway name"))
status = models.IntegerField(choices=GATEWAY_STATUS, default='1',
verbose_name=_("Gateway Status"), blank=True, null=True)
description = models.TextField(verbose_name=_('Description'), blank=True,
help_text=_("Gateway provider notes"))
addprefix = models.CharField(verbose_name=_('Add prefix'),
max_length=60, blank=True)
removeprefix = models.CharField(verbose_name=_('Remove prefix'),
max_length=60, blank=True)
gateways = models.CharField(max_length=500, verbose_name=_("Gateways"),
help_text=_('Example : "sofia/gateway/myprovider/" or 2 for failover "sofia/gateway/myprovider/, user/", # Gateway string to try dialing separated by comma. First in list will be tried first'))
gateway_codecs = models.CharField(max_length=500, blank=True, verbose_name=_("Gateway codecs"),
help_text=_('"\'PCMA,PCMU\',\'PCMA,PCMU\'", # Codec string as needed by FS for each gateway separated by comma'))
gateway_timeouts = models.CharField(max_length=500, blank=True, verbose_name=_("Gateway timeouts"),
help_text=_('"10,10", # Seconds to timeout in string for each gateway separated by comma'))
gateway_retries = models.CharField(max_length=500, blank=True, verbose_name=_("Gateway retries"),
help_text=_('"2,1", # Retry String for Gateways separated by comma, on how many times each gateway should be retried'))
originate_dial_string = models.CharField(max_length=500, blank=True, verbose_name=_("Originate dial string"),
help_text=_('Add Channels Variables : http://wiki.freeswitch.org/wiki/Channel_Variables, ie: bridge_early_media=true,hangup_after_bridge=true'))
secondused = models.IntegerField(null=True, blank=True, verbose_name=_("Second used"))
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_('Date'))
updated_date = models.DateTimeField(auto_now=True)
failover = models.ForeignKey('self', null=True, blank=True,
related_name="Failover Gateway", help_text=_("Select Gateway"))
addparameter = models.CharField(verbose_name=_('Add parameter'),
max_length=360, blank=True)
count_call = models.IntegerField(null=True, blank=True, verbose_name=_("Call count"))
count_in_use = models.IntegerField(null=True, blank=True, verbose_name=_("Count in use"))
maximum_call = models.IntegerField(verbose_name=_('Max concurrent calls'),
null=True, blank=True)
#gatewaygroup = models.ManyToManyField(GatewayGroup)
class Meta:
db_table = u'dialer_gateway'
verbose_name = _("Dialer Gateway")
verbose_name_plural = _("Dialer Gateways")
def __unicode__(self):
return u"%s" % self.name
| mpl-2.0 |
DARKPOP/external_chromium_org | native_client_sdk/src/tools/create_html.py | 52 | 5938 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates simple HTML for running a NaCl module.
This script is designed to make the process of creating running
Native Client executables in the browers simple by creating
boilderplate a .html (and optionally a .nmf) file for a given
Native Client executable (.nexe).
If the script if given a .nexe file it will produce both html
the nmf files. If it is given an nmf it will only create
the html file.
"""
import optparse
import os
import sys
import subprocess
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
HTML_TEMPLATE = '''\
<!DOCTYPE html>
<!--
Sample html container for embedded NaCl module. This file was auto-generated
by the create_html tool which is part of the NaCl SDK.
The embed tag is setup with PS_STDOUT, PS_STDERR and PS_TTY_PREFIX attributes
which, for applications linked with ppapi_simple, will cause stdout and stderr
to be sent to javascript via postMessage. Also, the postMessage listener
assumes that all messages sent via postMessage are strings to be displayed in
the output textarea.
-->
<html>
<head>
<meta http-equiv="Pragma" content="no-cache">
<meta http-equiv="Expires" content="-1">
<title>%(title)s</title>
</head>
<body>
<h2>Native Client Module: %(module_name)s</h2>
<p>Status: <code id="status">Loading</code></p>
<div id="listener">
<embed id="nacl_module" name="%(module_name)s" src="%(nmf)s"
type="application/x-nacl" width=640 height=480
PS_TTY_PREFIX="tty:"
PS_STDOUT="/dev/tty"
PS_STDERR="/dev/tty" >/
</div>
<p>Standard output/error:</p>
<textarea id="stdout" rows="25" cols="80">
</textarea>
<script>
listenerDiv = document.getElementById("listener")
stdout = document.getElementById("stdout")
nacl_module = document.getElementById("nacl_module")
function updateStatus(message) {
document.getElementById("status").innerHTML = message
}
function addToStdout(message) {
stdout.value += message;
stdout.scrollTop = stdout.scrollHeight;
}
function handleMessage(message) {
var payload = message.data;
var prefix = "tty:";
if (typeof(payload) == 'string' && payload.indexOf(prefix) == 0) {
addToStdout(payload.slice(prefix.length));
}
}
function handleCrash(event) {
updateStatus("Crashed/exited with status: " + nacl_module.exitStatus)
}
function handleLoad(event) {
updateStatus("Loaded")
}
listenerDiv.addEventListener("load", handleLoad, true);
listenerDiv.addEventListener("message", handleMessage, true);
listenerDiv.addEventListener("crash", handleCrash, true);
</script>
</body>
</html>
'''
class Error(Exception):
pass
def Log(msg):
if Log.enabled:
sys.stderr.write(str(msg) + '\n')
Log.enabled = False
def CreateHTML(filenames, options):
nmf = None
for filename in filenames:
if not os.path.exists(filename):
raise Error('file not found: %s' % filename)
if not os.path.isfile(filename):
raise Error('specified input is not a file: %s' % filename)
basename, ext = os.path.splitext(filename)
if ext not in ('.nexe', '.pexe', '.nmf'):
raise Error('input file must be .nexe, .pexe or .nmf: %s' % filename)
if ext == '.nmf':
if len(filenames) > 1:
raise Error('Only one .nmf argument can be specified')
nmf = filename
elif len(filenames) > 1 and not options.output:
raise Error('When specifying muliple input files -o must'
' also be specified.')
htmlfile = options.output
if not htmlfile:
htmlfile = basename + '.html'
basename = os.path.splitext(os.path.basename(htmlfile))[0]
if not nmf:
nmf = os.path.splitext(htmlfile)[0] + '.nmf'
Log('creating nmf: %s' % nmf)
create_nmf = os.path.join(SCRIPT_DIR, 'create_nmf.py')
staging = os.path.dirname(nmf)
if not staging:
staging = '.'
cmd = [create_nmf, '-s', staging, '-o', nmf] + filenames
if options.verbose:
cmd.append('-v')
if options.debug_libs:
cmd.append('--debug-libs')
Log(cmd)
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
raise Error('create_nmf failed')
Log('creating html: %s' % htmlfile)
with open(htmlfile, 'w') as outfile:
args = {}
args['title'] = basename
args['module_name'] = basename
args['nmf'] = os.path.basename(nmf)
outfile.write(HTML_TEMPLATE % args)
def main(argv):
usage = 'Usage: %prog [options] <.nexe/.pexe or .nmf>'
epilog = 'Example: create_html.py -o index.html my_nexe.nexe'
parser = optparse.OptionParser(usage, description=__doc__, epilog=epilog)
parser.add_option('-v', '--verbose', action='store_true',
help='Verbose output')
parser.add_option('-d', '--debug-libs', action='store_true',
help='When calling create_nmf request debug libaries')
parser.add_option('-o', '--output', dest='output',
help='Name of html file to write (default is '
'input name with .html extension)',
metavar='FILE')
# To enable bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete create_html.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
options, args = parser.parse_args(argv)
if not args:
parser.error('no input file specified')
if options.verbose:
Log.enabled = True
CreateHTML(args, options)
return 0
if __name__ == '__main__':
try:
rtn = main(sys.argv[1:])
except Error, e:
sys.stderr.write('%s: %s\n' % (os.path.basename(__file__), e))
rtn = 1
except KeyboardInterrupt:
sys.stderr.write('%s: interrupted\n' % os.path.basename(__file__))
rtn = 1
sys.exit(rtn)
| bsd-3-clause |
0x726d77/storm | storm-client/src/py/storm/DistributedRPCInvocations.py | 20 | 26118 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:utf8strings
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import logging
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def result(self, id, result):
"""
Parameters:
- id
- result
"""
pass
def fetchRequest(self, functionName):
"""
Parameters:
- functionName
"""
pass
def failRequest(self, id):
"""
Parameters:
- id
"""
pass
def failRequestV2(self, id, e):
"""
Parameters:
- id
- e
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot is not None:
self._oprot = oprot
self._seqid = 0
def result(self, id, result):
"""
Parameters:
- id
- result
"""
self.send_result(id, result)
self.recv_result()
def send_result(self, id, result):
self._oprot.writeMessageBegin('result', TMessageType.CALL, self._seqid)
args = result_args()
args.id = id
args.result = result
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_result(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = result_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
def fetchRequest(self, functionName):
"""
Parameters:
- functionName
"""
self.send_fetchRequest(functionName)
return self.recv_fetchRequest()
def send_fetchRequest(self, functionName):
self._oprot.writeMessageBegin('fetchRequest', TMessageType.CALL, self._seqid)
args = fetchRequest_args()
args.functionName = functionName
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_fetchRequest(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = fetchRequest_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
if result.aze is not None:
raise result.aze
raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchRequest failed: unknown result")
def failRequest(self, id):
"""
Parameters:
- id
"""
self.send_failRequest(id)
self.recv_failRequest()
def send_failRequest(self, id):
self._oprot.writeMessageBegin('failRequest', TMessageType.CALL, self._seqid)
args = failRequest_args()
args.id = id
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_failRequest(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = failRequest_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
def failRequestV2(self, id, e):
"""
Parameters:
- id
- e
"""
self.send_failRequestV2(id, e)
self.recv_failRequestV2()
def send_failRequestV2(self, id, e):
self._oprot.writeMessageBegin('failRequestV2', TMessageType.CALL, self._seqid)
args = failRequestV2_args()
args.id = id
args.e = e
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_failRequestV2(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = failRequestV2_result()
result.read(iprot)
iprot.readMessageEnd()
if result.aze is not None:
raise result.aze
return
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["result"] = Processor.process_result
self._processMap["fetchRequest"] = Processor.process_fetchRequest
self._processMap["failRequest"] = Processor.process_failRequest
self._processMap["failRequestV2"] = Processor.process_failRequestV2
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_result(self, seqid, iprot, oprot):
args = result_args()
args.read(iprot)
iprot.readMessageEnd()
result = result_result()
try:
self._handler.result(args.id, args.result)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("result", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_fetchRequest(self, seqid, iprot, oprot):
args = fetchRequest_args()
args.read(iprot)
iprot.readMessageEnd()
result = fetchRequest_result()
try:
result.success = self._handler.fetchRequest(args.functionName)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("fetchRequest", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_failRequest(self, seqid, iprot, oprot):
args = failRequest_args()
args.read(iprot)
iprot.readMessageEnd()
result = failRequest_result()
try:
self._handler.failRequest(args.id)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("failRequest", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_failRequestV2(self, seqid, iprot, oprot):
args = failRequestV2_args()
args.read(iprot)
iprot.readMessageEnd()
result = failRequestV2_result()
try:
self._handler.failRequestV2(args.id, args.e)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except AuthorizationException as aze:
msg_type = TMessageType.REPLY
result.aze = aze
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("failRequestV2", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class result_args:
"""
Attributes:
- id
- result
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'result', None, None, ), # 2
)
def __init__(self, id=None, result=None,):
self.id = id
self.result = result
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.result = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('result_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.result is not None:
oprot.writeFieldBegin('result', TType.STRING, 2)
oprot.writeString(self.result.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.result)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class result_result:
"""
Attributes:
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('result_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class fetchRequest_args:
"""
Attributes:
- functionName
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'functionName', None, None, ), # 1
)
def __init__(self, functionName=None,):
self.functionName = functionName
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.functionName = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('fetchRequest_args')
if self.functionName is not None:
oprot.writeFieldBegin('functionName', TType.STRING, 1)
oprot.writeString(self.functionName.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.functionName)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class fetchRequest_result:
"""
Attributes:
- success
- aze
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (DRPCRequest, DRPCRequest.thrift_spec), None, ), # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, success=None, aze=None,):
self.success = success
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = DRPCRequest()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('fetchRequest_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.success)
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class failRequest_args:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('failRequest_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class failRequest_result:
"""
Attributes:
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('failRequest_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class failRequestV2_args:
"""
Attributes:
- id
- e
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRUCT, 'e', (DRPCExecutionException, DRPCExecutionException.thrift_spec), None, ), # 2
)
def __init__(self, id=None, e=None,):
self.id = id
self.e = e
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString().decode('utf-8')
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.e = DRPCExecutionException()
self.e.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('failRequestV2_args')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id.encode('utf-8'))
oprot.writeFieldEnd()
if self.e is not None:
oprot.writeFieldBegin('e', TType.STRUCT, 2)
self.e.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.id)
value = (value * 31) ^ hash(self.e)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class failRequestV2_result:
"""
Attributes:
- aze
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'aze', (AuthorizationException, AuthorizationException.thrift_spec), None, ), # 1
)
def __init__(self, aze=None,):
self.aze = aze
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.aze = AuthorizationException()
self.aze.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('failRequestV2_result')
if self.aze is not None:
oprot.writeFieldBegin('aze', TType.STRUCT, 1)
self.aze.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.aze)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| apache-2.0 |
nikhilprathapani/python-for-android | python-modules/zope/zope/interface/tests/test_sorting.py | 50 | 1398 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test interface sorting
$Id: test_sorting.py 110699 2010-04-09 08:16:17Z regebro $
"""
from unittest import TestCase, TestSuite, main, makeSuite
from zope.interface import Interface
class I1(Interface): pass
class I2(I1): pass
class I3(I1): pass
class I4(Interface): pass
class I5(I4): pass
class I6(I2): pass
class Test(TestCase):
def test(self):
l = [I1, I3, I5, I6, I4, I2]
l.sort()
self.assertEqual(l, [I1, I2, I3, I4, I5, I6])
def test_w_None(self):
l = [I1, None, I3, I5, I6, I4, I2]
l.sort()
self.assertEqual(l, [I1, I2, I3, I4, I5, I6, None])
def test_suite():
return TestSuite((
makeSuite(Test),
))
if __name__=='__main__':
main(defaultTest='test_suite')
| apache-2.0 |
jkthompson/nupic | py/regions/ImageSensorFilters/GaborFilter.py | 2 | 63268 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines GaborFilter, an ImageSensor filter that applies
one or more gabor filter masks to incoming images.
"""
import os
import shutil
import math
import numpy
from PIL import (Image,
ImageChops)
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
from nupic.math import GetNTAReal
# Separable convolution is not supported in the public release
try:
from nupic.bindings.algorithms import (Float32SeparableConvolution2D,
Float32Rotation45)
except:
pass
dtype = GetNTAReal()
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# GaborFilter
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
class GaborFilter(BaseFilter):
"""
Apply a bank of Gabor filters to the original image, and
return one or more images (same dimensions as the original)
containing the Gabor responses.
"""
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Public API methods
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
def __init__(self, gaborBankParams=None,
debugMode=False,
debugOutputDir='gabor.d',
suppressLobes=False,
wipeOutsideMask=False,
convolutionMethod='1D'):
"""
@param gaborBankParams -- A list of sub-lists that specify the parameters of
the individual Gabor filters that comprise the filter bank.
Each sub-list contains one or more dicts; each such dict specifies
a particular Gabor filter. The responses of each Gabor filter in
a particular sub-list are combined together additively (using an
L2 norm) to generate a single response image.
Thus, the total number of response images generated by GaborFilter
will equal the length of the main 'gaborBankParams' list. The total
number of individual Gabor filters used to generate these responses
will equal to the total number of dicts contained in all of the
sub-lists of 'gaborBankParams'.
Each of the dicts is expected to contain the following
required keys:
'scale': an integer between 3 and 18 that specifies the 'radius'
of the gabor filter. For example, if 'scale' is set to 5,
then the resulting filter will have size 11x11 pixels.
'orient': the orientation of the filter, in degrees, with zero
corresponding to vertical, and increasing in a clockwise
manner (e.g., a value of 90 produces a horizontally-oriented
filter; a value of 45 degrees produces a diagonally-oriented
filter along the southwest-to-northeast axis, etc.)
Legal values are: 0, 45, 90, and 135.
'phase': the phase of the filter, in degrees. Legal values are: 0,
90, 180, and 270. Phases of 0 and 180 produce single-lobed
symmetric gabor filters that are generally selective to lines.
Phases of 90 and 270 produce dual-lobed (asymmetric)
gabor filters that are generally selective to edges.
'mode': specifies the post-processing mode to apply to the raw
filter output. Legal values are:
'raw' Perform no post-processing. Outputs will be in (-1.0, +1.0)
'abs' Output is absolute value of raw response. Output will
lie in range (0.0, +1.0)
'positive' Clip negative raw response values to zero. Output will lie
in range (0.0, +1.0)
'rectified' Output is square of input. Output will lie in the
range (0.0, +1.0)
'power' Positive raw responses are squared; negative raw responses
are clipped to zero. Output will be in (0.0, +1.0)
'hardFull' Apply a hard threshold to the input; if raw response is
>= 'threshold', then output is 1.0, else output is -1.0.
Output lies in (-1.0, +1.0)
'hardHalf' Apply a hard threshold to the input; if raw response is
>= 'threshold', then output is 1.0, else output is 0.0.
Output lies in (0.0, +1.0)
'sigmoidFull': Apply a sigmoidal threshold function to the input using
'threshold' and 'steepness'. Output lies in (-1.0, +1.0).
'sigmoidHalf': Apply a sigmoidal threshold function to the input using
'threshold' and 'steepness'. Output lies in (0.0, +1.0).
'name': a human-meaningful name (primarily used for debugging purposes.)
In addition, the following key is required if 'mode' is set to 'hardFull',
'hardHalf', 'sigmoidFull', or 'sigmoidHalf'
'threshold': the threshold to use for either the hard ('hardFull' or 'hardHalf')
or soft ('sigmoidFull' or 'sigmoidHalf') thresholding post-processing modes.
In addition, the following key is required if 'mode' is set to 'sigmoidFull'
or 'sigmoidHalf':
'steepness': controls the steepness of the sigmoidal function that performs
post-processing on the raw response.
If 'gaborBankParams' is None, then a default set of filter
bank parameters will be used. This default set will consist of 36
separate responses, each computed from a single underlying Gabor filter.
@param debugMode -- A boolean flag indicating whether or not the filter
should output debugging information to a file (default is False.)
@param debugOutputDir -- The name of the output directory that will be
created to store gabor response images in the event that the
debugging flag 'debugMode' is set to True. Defaults to 'gabor.d'
@param suppressLobes -- A boolean flag indicating whether or not the secondary
lobes of the gabor filters should be zeroed out and thus removed
from the mask. Defaults to False.
@param convolutionMethod -- Method to use for convolving filters with images.
'2D' is straightforward convolution of 2D filter with image. Other options
are 'FFT' for convolution by multiplication in Fourier space, and '1D' for
convolution with two 1D filters formed from a separable 2D filter. '1D'
convolution is not yet available in our public release.
"""
BaseFilter.__init__(self)
# Debugging
self._debugMode = debugMode
self._debugOutputDir = debugOutputDir
self._suppressLobes = suppressLobes
self._wipeOutsideMask = wipeOutsideMask
self._convolutionMethod = convolutionMethod
# Separable convolution is not supported in the public release
if self._convolutionMethod == '1D':
try:
Float32SeparableConvolution2D()
Float32Rotation45()
except:
self._convolutionMethod = 'FFT'
if self._convolutionMethod == '1D':
# Create an instance of the rotation class
self._rotation = Float32Rotation45()
self._debugCompositeDir = os.path.join(self._debugOutputDir, 'composite')
# Store the parameter set
if gaborBankParams is None:
gaborBankParams = self.getDefaultParamSet()
self._gaborBankParams = gaborBankParams
self._defAspectRatio = 1.30
if self._debugMode:
print 'Using Gabor bank parameters:'
print self._gaborBankParams
# Prepare the filters
self._gaborBank = self._makeGaborBank()
if self._debugMode:
print 'Gabor Bank:'
for f in self._gaborBank:
print '+=+=+=+=+=+=+=+=+=+=+=+=+=+=+'
print 'Scale: %d' % f[0][1]['scale']
print 'Orient: %d' % f[0][1]['orient']
print 'Phase: %d' % int(f[0][1]['phase'])
self._printGaborFilter(f[0][0])
if self._debugMode:
print 'Gabor bank generated with %d filter pairs.' % len(self._gaborBankParams)
# Initialize
self._imageCounter = 0
def getOutputCount(self):
"""
Return the number of images returned by each call to process().
If the filter creates multiple simultaneous outputs, return a tuple:
(outputCount, simultaneousOutputCount).
"""
return (1, len(self._gaborBank))
def process(self, origImage):
"""
Perform Gabor filtering on the input image and return one or more
response maps having dimension identical to the input image.
@param origImage -- The image to process.
Returns a list containing a list of the response maps.
"""
if self.mode != 'gray':
raise RuntimeError("GaborFilter only supports grayscale images.")
BaseFilter.process(self, origImage)
responses = []
if self._debugMode:
print 'GaborFilter: process()'
print 'GaborFilter: origImage size:', origImage.size
# Convert image data from PIL to numpy array
imageData = numpy.asarray(origImage.split()[0], dtype=dtype)
# Save the mask to put back later
mask = origImage.split()[1]
if self._wipeOutsideMask:
maskData = numpy.asarray(mask, dtype=dtype)
else:
maskData = None
# Perform the actual Gabor filtering
responseSet = self._doProcessing(imageData, maskData)
if self._debugMode:
print 'Responses generated: %d' % len(responseSet)
# Convert from numpy to PIL
imageSet = self._convertToPIL(responseSet)
# Dump the gabor responses to disk
if self._debugMode:
self._saveDebugImages(imageSet)
# Add the mask back
for image in imageSet:
image.putalpha(mask)
self._imageCounter += 1
return [imageSet]
def processArray(self, origImageData):
"""
Perform Gabor filtering on the input array and return one or more
response maps having dimension identical to the input image array.
@param origImageData -- two-dimensional numpy array representing the image.
Returns a list containing the response maps. Each map is a 2D numpy array.
"""
if self._debugMode:
print 'GaborFilter: processArray()'
# Perform the actual Gabor filtering
responseSet = self._doProcessing(origImageData)
if self._debugMode:
print 'Responses generated: %d' % len(responseSet)
self._imageCounter += 1
return responseSet
def getDefaultParamSet(self):
"""
Provide a default set of Gabor filter bank parameters in the event
that none were provided by the application.
Note: this method is classified as a public method so as to allow
the application to query the default parameter set.
"""
postProcMode = 'sigmoidHalf'
scale = {
'small': 3,
'medium': 6,
'large': 12,
}
orient = {
'vert': 0,
'swne': 45,
'horz': 90,
'nwse': 135,
}
phase = {
'line-bright': 0,
'edge-bright-to-dark': 90,
'line-dark': 180,
'edge-dark-to-bright': 270,
}
return [
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Small vertical lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Small vertical lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['small'],
'orient': orient['vert'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.400, #0.300,
'steepness': 8,
'name': 'small-vert-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['small'],
# 'orient': orient['vert'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.400, #0.300,
# 'steepness': 8,
# 'name': 'small-vert-line-dark',
# },
# ],
# Small vertical edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['small'],
'orient': orient['vert'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.400, #0.300,
'steepness': 8,
'name': 'small-vert-edge-bright',
},
],
# Small vertical edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['small'],
'orient': orient['vert'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.400, #0.300,
'steepness': 8,
'name': 'small-vert-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Small SW-NE lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Small SW-NE lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['small'],
'orient': orient['swne'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.400,
'steepness': 8,
'name': 'small-swne-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['small'],
# 'orient': orient['swne'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.400,
# 'steepness': 8,
# 'name': 'small-swne-line-dark',
# },
# ],
# Small SW-NE edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['small'],
'orient': orient['swne'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.400,
'steepness': 8,
'name': 'small-swne-edge-bright',
},
],
# Small SW-NE edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['small'],
'orient': orient['swne'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.400,
'steepness': 8,
'name': 'small-swne-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Small horizontal lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Small horizontal lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['small'],
'orient': orient['horz'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.400, #0.300,
'steepness': 8,
'name': 'small-horz-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['small'],
# 'orient': orient['horz'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.400,
# 'steepness': 8,
# 'name': 'small-horz-line-dark',
# },
# ],
# Small horizontal edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['small'],
'orient': orient['horz'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.400, #0.300,
'steepness': 8,
'name': 'small-horz-edge-bright',
},
],
# Small horizontal edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['small'],
'orient': orient['horz'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.400, #0.300,
'steepness': 8,
'name': 'small-horz-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Small SE-NW lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Small SE-NW lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['small'],
'orient': orient['nwse'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.400,
'steepness': 8,
'name': 'small-nwse-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['small'],
# 'orient': orient['nwse'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.400,
# 'steepness': 8,
# 'name': 'small-nwse-line-dark',
# },
# ],
# Small SE-NW edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['small'],
'orient': orient['nwse'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.400,
'steepness': 8,
'name': 'small-nwse-edge-bright',
},
],
# Small SE-NW edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['small'],
'orient': orient['nwse'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.400,
'steepness': 8,
'name': 'small-nwse-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Medium vertical lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Medium vertical lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['medium'],
'orient': orient['vert'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.500, #0.450,
'steepness': 6,
'name': 'medium-vert-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['medium'],
# 'orient': orient['vert'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.500,
# 'steepness': 6,
# 'name': 'medium-vert-line-dark',
# },
# ],
# Medium vertical edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['medium'],
'orient': orient['vert'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.500, #0.450,
'steepness': 6,
'name': 'medium-vert-edge-bright',
},
],
# Medium vertical edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['medium'],
'orient': orient['vert'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.500, #0.450,
'steepness': 6,
'name': 'medium-vert-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Medium SW-NE lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Medium SW-NE lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['medium'],
'orient': orient['swne'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'medium-swne-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['medium'],
# 'orient': orient['swne'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.600,
# 'steepness': 6,
# 'name': 'medium-swne-line-dark',
# },
# ],
# Medium SW-NE edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['medium'],
'orient': orient['swne'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'medium-swne-edge-bright',
},
],
# Medium SW-NE edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['medium'],
'orient': orient['swne'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'medium-swne-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Medium horizontal lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['medium'],
'orient': orient['horz'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.500, #0.450,
'steepness': 6,
'name': 'medium-horz-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['medium'],
# 'orient': orient['horz'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.500,
# 'steepness': 6,
# 'name': 'medium-horz-line-dark',
# },
# ],
# Medium horizontal edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['medium'],
'orient': orient['horz'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.500, #0.450,
'steepness': 6,
'name': 'medium-horz-edge-bright',
},
],
# Medium horizontal edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['medium'],
'orient': orient['horz'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.500, #0.450,
'steepness': 6,
'name': 'medium-horz-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Medium SE-NW lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Medium SE-NW lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['medium'],
'orient': orient['nwse'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'medium-nwse-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['medium'],
# 'orient': orient['nwse'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.600,
# 'steepness': 6,
# 'name': 'medium-nwse-line-dark',
# },
# ],
# Medium SE-NW edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['medium'],
'orient': orient['nwse'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'medium-nwse-edge-bright',
},
],
# Medium SE-NW edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['medium'],
'orient': orient['nwse'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'medium-nwse-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Large vertical lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Large vertical lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['large'],
'orient': orient['vert'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-vert-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['large'],
# 'orient': orient['vert'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.600,
# 'steepness': 6,
# 'name': 'large-vert-line-dark',
# },
# ],
# Large vertical edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['large'],
'orient': orient['vert'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-vert-edge-bright',
},
],
# Large vertical edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['large'],
'orient': orient['vert'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-vert-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Large SW-NE lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Large SW-NE lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['large'],
'orient': orient['swne'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-swne-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['large'],
# 'orient': orient['swne'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.600,
# 'steepness': 6,
# 'name': 'large-swne-line-dark',
# },
# ],
# Large SW-NE edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['large'],
'orient': orient['swne'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-swne-edge-bright',
},
],
# Large SW-NE edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['large'],
'orient': orient['swne'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-swne-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Large horizontal lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Large horizontal lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['large'],
'orient': orient['horz'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-horz-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['large'],
# 'orient': orient['horz'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.600,
# 'steepness': 6,
# 'name': 'large-horz-line-dark',
# },
# ],
# Large horizontal edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['large'],
'orient': orient['horz'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-horz-edge-bright',
},
],
# Large horizontal edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['large'],
'orient': orient['horz'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-horz-edge-dark',
},
],
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Large SE-NW lines and edges
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Large SE-NW lines
[
# Phase-0 (detects bright line against dark background)
{
'scale': scale['large'],
'orient': orient['nwse'],
'phase': phase['line-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-nwse-line-bright',
},
],
# [
# # Phase-180 (detects dark line against bright background)
# {
# 'scale': scale['large'],
# 'orient': orient['nwse'],
# 'phase': phase['line-dark'],
# 'mode': postProcMode,
# 'threshold': 0.600,
# 'steepness': 6,
# 'name': 'large-nwse-line-dark',
# },
# ],
# Large SW-NE edges (bright to dark)
[
# Phase-90 (detects bright to dark edges)
{
'scale': scale['large'],
'orient': orient['nwse'],
'phase': phase['edge-bright-to-dark'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-nwse-edge-bright',
},
],
# Large SW-NE edges (dark to bright)
[
# Phase-270 (detects dark to bright edges)
{
'scale': scale['large'],
'orient': orient['nwse'],
'phase': phase['edge-dark-to-bright'],
'mode': postProcMode,
'threshold': 0.600,
'steepness': 6,
'name': 'large-nwse-edge-dark',
},
],
]
def createDebugImages(self, imageSet):
"""
Organize and return Gabor response images and composite images, given the
filter responses in imageSet.
Returns a list of dictionaries, one per filter, where each
dictionary item contains:
'scale' - scale
'orient' - orientation
'image' - raw response image for this scale and orientation
'name' - filter name
'counter' - internal image counter, incremented for each new source image
Returns a list of dictionaries, one per scale, where each
dictionary item contains:
'scale' - scale
'image' - composite image for this scale, over all orientations
'counter' - internal image counter, incremented for each new source image
"""
# Collect the raw responses
rawResponses = []
scaleDict = {}
for responseIndex in range(len(imageSet)):
r = {}
r['image'] = imageSet[responseIndex].split()[0]
r['name'] = self._gaborBankParams[responseIndex][0]['name']
r['scale'] = self._gaborBankParams[responseIndex][0]['scale']
r['orient'] = self._gaborBankParams[responseIndex][0]['orient']
r['counter'] = self._imageCounter
rawResponses.append(r)
# Accounting for composite image creation
if self._gaborBankParams[responseIndex][0]['scale'] not in scaleDict:
scaleDict[self._gaborBankParams[responseIndex][0]['scale']] = []
scaleDict[self._gaborBankParams[responseIndex][0]['scale']] += [(responseIndex,
self._gaborBankParams[responseIndex][0]['orient'])]
# Create the composite responses
compositeResponses = []
for scale in scaleDict.keys():
# Accumulate image indices for each orientation in this scale
composites = {'red': [], 'green': [], 'blue': []}
for responseIndex, orient in scaleDict[scale]:
if orient == 90:
composites['red'] += [responseIndex]
elif orient == 0:
composites['green'] += [responseIndex]
else:
composites['blue'] += [responseIndex]
# Generate RGB composite images for this scale
images = {'red': None, 'green': None, 'blue': None}
bands = []
for color in composites.keys():
imgList = composites[color]
im = Image.new('L',imageSet[0].size)
for indx in imgList:
im2 = rawResponses[indx]['image']
im = ImageChops.add(im, im2)
images[color] = im
bands += [images[color]]
# Make final composite for this scale
m = Image.merge(mode='RGB', bands=bands)
r = {}
r['scale'] = scale
r['counter'] = self._imageCounter
r['image'] = m
compositeResponses.append(r)
return rawResponses, compositeResponses
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
# Private (internal) methods
#+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
def _createDir(self, directory):
"""
Create a directory for writing debug images
"""
print 'Wiping directory tree: [%s]...' % directory
if os.path.exists(directory):
shutil.rmtree(directory)
print 'Creating directory: [%s]...' % directory
os.makedirs(directory)
def _saveDebugImages(self, imageSet, amplification=1.0):
"""
Save the Gabor responses to disk.
"""
# Create composites and organize response images
rawResponses, compositeResponses = self.createDebugImages(imageSet)
# Create output directory if needed
if self._imageCounter == 0:
self._createDir(self._debugOutputDir)
self._createDir(self._debugCompositeDir)
# Save the responses
for r in rawResponses:
dstPath = os.path.join(self._debugOutputDir,
'image-%04d.%s.png' % (r['counter'], r['name']))
r['image'].save(dstPath)
# Save each composite image
for c in compositeResponses:
dstPath = os.path.join(self._debugCompositeDir,
'image-%04d.composite-%02d.png' % (c['counter'], c['scale']))
fp = open(dstPath, 'w')
c['image'].save(fp)
fp.close()
def _makeGaborBank(self):
"""
Generate a complete bank of Gabor filters to the specification
contained in self._gaborBankParams.
Return value: a list of sub-lists, each sub-list containing a
2-tuple of the form (filter, filterParams), where filter is
a numpy array instantiating the Gabor filter, and 'filterParams'
is a reference to the relevant filter description from
'self._gaborBankParams'.
"""
gaborBank = []
for responseSet in self._gaborBankParams:
filterSet = []
for filterParams in responseSet:
filter = self._makeGaborFilter(filterParams)
self._normalizeGain(filter, filterParams)
filterSet += [(filter, filterParams)]
gaborBank += [filterSet]
if self._debugMode:
print '_makeGaborBank: %d filters generated' % len(gaborBank)
return gaborBank
def _normalizeGain(self, filter, filterParams):
"""
This method normalizes the gain of the filter by adding a DC offset. On the
two orientations used by the convolve1D function, it noramlizes rows and
columns individually to ensure that the filter remains separable.
Previously, this method always shifted the mean of the entire 2D filter,
which produced a filter that was no longer separable. This method performs
differently on the 0 and 90 degree orientations. It normalizes rows when
rows hold the sinusoidal component of the Gabor filter (orientation 0) and
columns otherwise (orientation 90). It falls back to normalizing the entire
filter together for the other two orientations, because they are not used
by the convolve1D function and are not separable.
It is only necessary on even-symmetric filters (phase 0 and 180), as the
sinusoidal components of the odd-symmetric filters always have 0 mean.
The responses created using this method and those created using the old
version of _normalizeGain differ slightly when orientation is 0 or 90 and
phase is 0 or 180, but not significantly.
"""
# if filterParams['orient'] in (0, 90):
if filterParams['phase'] in (0, 180):
# Only necessary on the even-symmetric filters
if filterParams['orient'] in (0, 45):
for row in xrange(filter.shape[0]):
filter[row,:] -= filter[row,:].mean()
else:
for col in xrange(filter.shape[1]):
filter[:,col] -= filter[:,col].mean()
# else:
# # Old normalizeGain method
# filter -= filter.mean()
def _applyDefaults(self, filterParams):
"""
Instantiate filter parameters 'aspect', 'width', and 'wavelength'
(if they are not already explicitly specified in the filter
description) by deriving values based upon existing values.
"""
if 'aspect' not in filterParams:
filterParams['aspect'] = self._defAspectRatio
defWidth, defWavelength = self._chooseParams(filterParams['scale'])
if 'width' not in filterParams:
filterParams['width'] = defWidth
if 'wavelength' not in filterParams:
filterParams['wavelength'] = defWavelength
def _makeGaborFilter(self, filterParams):
"""
Generate a single Gabor filter.
@param filterParams -- a dict containing a description of the
desired filter.
"""
self._applyDefaults(filterParams)
if self._debugMode:
print '_makeGaborFilter():'
print ' maskRadius:', filterParams['scale']
print ' effectiveWidth:', filterParams['width']
print ' aspectRatio:', filterParams['aspect']
print ' orientation:', filterParams['orient']
print ' wavelength:', filterParams['wavelength']
print ' phase:', filterParams['phase']
# Deg-to-rad
orientation = filterParams['orient']
if self._convolutionMethod == '1D' and orientation in (45, 135):
# Rotate the filter 45 degrees counterclockwise for separable convolution
# Filter will be vertical or horizontal, and image will be rotated
orientation -= 45
orientation = self._deg2rad(orientation)
phase = self._deg2rad(filterParams['phase'])
# Create the mask lattice
maskDim = filterParams['scale'] * 2 + 1
# if filterParams['phase'] in [0, 180]:
# maskDim += 1
g = numpy.zeros((maskDim, maskDim), dtype=dtype)
x = numpy.zeros((maskDim, maskDim), dtype=dtype)
y = numpy.zeros((maskDim, maskDim), dtype=dtype)
# Create the latttice points
halfWidth = 0.5 * float(maskDim - 1)
for j in range(maskDim):
for i in range(maskDim):
x[j,i] = float(i-halfWidth)
y[j,i] = float(j-halfWidth)
# Generate gabor mask
cosTheta = math.cos(orientation)
sinTheta = math.sin(orientation)
sinusoidalConstant = 2.0 * math.pi / filterParams['wavelength']
exponentialConstant = -0.5 / (filterParams['width']* filterParams['width'])
aspectConstant = filterParams['aspect'] * filterParams['aspect']
for j in range(maskDim):
for i in range(maskDim):
x0 = x[j,i] * cosTheta + y[j,i] * sinTheta
y0 = y[j,i] * cosTheta - x[j,i] * sinTheta
sinusoidalTerm = math.cos((sinusoidalConstant * x0) + phase)
exponentialTerm = math.exp(exponentialConstant * (x0*x0 + aspectConstant*y0*y0))
g[j,i] = exponentialTerm * sinusoidalTerm
# Suppress lobes (optional)
if self._suppressLobes:
g = self._doLobeSuppression(g, orientation, phase)
return g
def _deg2rad(self, degrees):
"""
Utility macro for converting from degrees to radians.
"""
return degrees * math.pi / 180.0
def _rad2deg(self, radians):
"""
Utility macro for converting from radians to degrees.
"""
return radians * 180.0 / math.pi
def _chooseParams(self, scale):
"""
Returns a 2-tuple of (width, wavelength) containing reasonable
default values for a particular 'scale'.
"""
paramTable = [
(1, 0.8, 1.3),
(2, 1.7, 2.4),
(3, 2.8, 3.5),
(4, 3.6, 4.6),
(5, 4.5, 5.6),
(6, 5.4, 6.8),
(7, 6.3, 7.9),
(8, 7.3, 9.1),
(9, 8.2, 10.3),
(10, 9.2, 11.5),
(11, 10.2, 12.7),
(12, 11.3, 14.1),
(13, 12.3, 15.4),
(14, 13.4, 16.8),
(15, 14.6, 18.2),
(16, 15.8, 19.7),
(17, 17.0, 21.2),
(18, 18.2, 22.8),
]
for paramSet in paramTable:
if paramSet[0] == scale:
return paramSet[1], paramSet[2]
def _suppressionRules(self):
"""
Return a table of algorithmic parameters used in lobe suppression.
The table contains a list of 2-tuples of the form:
(orientation, phase), (numPosZones, numNegZones, deltaX, deltaY)
"""
return [
# Verticals
((0, 0), (1, 1, 1, 0)),
((0, 90), (0, 1, 1, 0)),
((0, 180), (1, 1, 1, 0)),
((0, 270), (1, 0, 1, 0)),
# SW-NE
((45, 0), (1, 1, 1, 1)),
((45, 90), (0, 1, 1, 1)),
((45, 180), (1, 1, 1, 1)),
((45, 270), (1, 0, 1, 1)),
# Horizontals
((90, 0), (1, 1, 0, 1)),
((90, 90), (0, 1, 0, 1)),
((90, 180), (1, 1, 0, 1)),
((90, 270), (1, 0, 0, 1)),
# SE-NW
((135, 0), (1, 1, -1, 1)),
((135, 90), (0, 1, -1, 1)),
((135, 180), (1, 1, -1, 1)),
((135, 270), (1, 0, -1, 1)),
]
def _findSuppressionRules(self, orientation, phase):
"""
Return a set of algorithmic parameters for performing lobe suppression
given the specified values of 'orientation' and 'phase'.
"""
# (orientation, phase) ==> (numPosZones, numNegZones, deltaX, deltaY)
phase = int(self._rad2deg(phase))
orientation = int(self._rad2deg(orientation))
return [x[1] for x in self._suppressionRules() if x[0][0] == orientation and x[0][1] == phase][0]
def _doLobeSuppression(self, filter, orientation, phase):
"""
Suppress (set to zero) the filter values outside of the main (primary)
lobes for particular filter.
Returns a copy of the filter with lobes suppressed.
"""
# Obtain rules for scanning
(numPosZones, numNegZones, deltaX, deltaY) = self._findSuppressionRules(orientation, phase)
# Do the actual lobe-scanning
sideLen = filter.shape[0]
lobeFound = self._scanForLobes(filter, numPosZones, numNegZones, deltaX, deltaY,
sideLen, self._rad2deg(phase))
# Zero out the lobes found
if lobeFound is not None:
filter = self._zeroLobes(filter, lobeFound, (deltaX, deltaY))
return filter
def _zeroLobes(self, filter, (lobeX, lobeY), (deltaX, deltaY)):
"""
Perform the actual suppression of the secondary lobes of a
filter, assuming these secondary lobes have already been located
and are identified by the 2-tuple (lobeX, lobeY).
@param (lobeX lobeY) -- The position (relative to the filter's own
local coordinate frame) where the lobe suppression should
commence.
@param (deltaX, deltaY) -- The direction (from filter center) in
which the search for secondary lobes proceeded.
"""
wipes = []
sideLen = filter.shape[0]
# Vertical scan ==> horizontal wipes
if deltaX == 0 and deltaY > 0:
for y in range(lobeY, sideLen):
wipes += [(lobeX, y, 1, 0)]
wipes += [(lobeX, y, -1, 0)]
for y in range(sideLen - lobeY - 1, -1, -1):
wipes += [(lobeX, y, 1, 0)]
wipes += [(lobeX, y, -1, 0)]
# Horizontal scan ==> vertical wipes
elif deltaX > 0 and deltaY == 0:
for x in range(lobeX, sideLen):
wipes += [(x, lobeY, 0, 1)]
wipes += [(x, lobeY, 0, -1)]
for x in range(sideLen - lobeX - 1, -1, -1):
wipes += [(x, lobeY, 0, 1)]
wipes += [(x, lobeY, 0, -1)]
# SW-bound scan ==> NW-SE wipes
elif deltaX < 0 and deltaY > 0:
for k in range(lobeY, sideLen):
wipes += [(sideLen-k-1, k, -1, -1)]
wipes += [(sideLen-k-1, k, 1, 1)]
wipes += [(max(0, sideLen-k-2), k, -1, -1)]
wipes += [(max(0, sideLen-k-2), k, 1, 1)]
for k in range(sideLen - lobeY - 1, -1, -1):
wipes += [(sideLen-k-1, k, -1, -1)]
wipes += [(sideLen-k-1, k, 1, 1)]
wipes += [(min(sideLen-1, sideLen-k), k, -1, -1)]
wipes += [(min(sideLen-1, sideLen-k), k, 1, 1)]
# SE-bound scan ==> SW-NE wipes
elif deltaX > 0 and deltaY > 0:
for k in range(lobeY, sideLen):
wipes += [(k, k, -1, 1)]
wipes += [(k, k, 1, -1)]
wipes += [(min(sideLen-1, k+1), k, -1, 1)]
wipes += [(min(sideLen-1, k+1), k, 1, -1)]
for k in range(sideLen - lobeY - 1, -1, -1):
wipes += [(k, k, -1, 1)]
wipes += [(k, k, 1, -1)]
wipes += [(max(0, k-1), k, -1, 1)]
wipes += [(max(0, k-1), k, 1, -1)]
# Do the wipes
for wipe in wipes:
filter = self._wipeStripe(filter, wipe)
return filter
def _wipeStripe(self, filter, (x, y, dX, dY)):
"""
Zero out a particular row, column, or diagonal within the filter.
@param filter -- The filter to be modified (a numpy array).
@param x, y -- The starting point (in filter coordinate frame)
of a stripe that is to be zeroed out.
@param dX, dY -- The direction to proceed (and zero) until
the edge of the filter is encountered.
"""
sideLen = filter.shape[0]
while True:
filter[y, x] = 0.0
x += dX
y += dY
if min(x, y) < 0 or max(x, y) >= sideLen:
return filter
def _scanForLobes(self, filter, numPosZones, numNegZones, deltaX, deltaY,
sideLen, phase):
"""
Search a filter for the location of it's secondary lobes, starting from
the center of the filter.
@param filter -- The filter to be searched for secondary lobe(s).
@param numPosZones -- The number of regions of positive filter values that
are contained within the primary lobes.
@param numNegZones -- The number of regions of negative filter values that
are contained within the primary lobes.
@param deltaX, deltaY -- The direction in which to proceed (either vertically,
horizontally, or diagonally) during the search.
@param sideLen -- The length of one side of the filter (in pixels.)
@param phase -- Phase of the filter (in degrees).
"""
# Choose starting point
x = sideLen/2
y = sideLen/2
if sideLen % 2 == 0:
x += min(deltaX, 0)
y += min(deltaY, 0)
elif phase in (90, 270):
# Odd-symmetric filters of odd dimension are 0 in the center
# Skip the center point in order to not over-count lobes
x += deltaX
y += deltaY
posZonesFound = 0
negZonesFound = 0
curZone = None
while True:
# Determine zone type
maskVal = filter[y,x]
if maskVal > 0.0 and curZone is not 'positive':
curZone = 'positive'
posZonesFound += 1
elif maskVal < 0.0 and curZone is not 'negative':
curZone = 'negative'
negZonesFound += 1
# Are we done?
if posZonesFound > numPosZones or negZonesFound > numNegZones:
return (x, y)
else:
x += deltaX
y += deltaY
# Hit the edge?
if x == sideLen or x < 0 or y == sideLen or y < 0:
return None
def _doProcessing(self, imageData, maskData=None):
"""
Apply the bank of pre-computed Gabor filters against a submitted image.
@param imageData -- The image to be filtered.
@param maskData -- Used if _wipeOutsideMask is True.
"""
if self._debugMode:
print 'GaborFilter._doProcessing(): imageData:'
print imageData
imageData *= (1.0/255.0)
filterIndex = 0
responseSet = []
if self._convolutionMethod == '1D':
# Create rotated version of image if necessary
createRotated = False
imageDataRotated = None
for filterSet in self._gaborBank:
for (filter, filterSpecs) in filterSet:
if filterSpecs['orient'] in (45, 135):
createRotated = True
break
if createRotated:
break
if createRotated:
y, x = imageData.shape
z = int(round((x+y) * 1/(2**.5)))
imageDataRotated = numpy.zeros((z,z), dtype)
self._rotation.rotate(imageData, imageDataRotated, y, x, z)
if self._wipeOutsideMask:
# Get all the scales
scales = []
for filter in self._gaborBank:
if filter[0][1]['scale'] not in scales:
scales.append(filter[0][1]['scale'])
# Create an eroded mask for each scale
erodedMaskData = {}
for scale in scales:
# Create a uniform filter at the specified scale
filter = numpy.ones((scale*2 + 1), dtype)
# Convolve the filter with the mask
convolution = Float32SeparableConvolution2D()
convolution.init(maskData.shape[0], maskData.shape[1],
filter.shape[0], filter.shape[0], filter, filter)
erodedMaskData[scale] = numpy.zeros(maskData.shape, dtype)
convolution.compute(maskData, erodedMaskData[scale])
for filterSet in self._gaborBank:
filterResponse = []
for (filter, filterSpecs) in filterSet:
if self._debugMode:
print 'Applying filter: phase=%f scale=%f orient=%d' % (filterSpecs['phase'],
filterSpecs['scale'],
filterSpecs['orient'])
# Perform the convolution
if self._convolutionMethod == '2D':
response = convolve2D(imageData, filter)
elif self._convolutionMethod == '1D':
response = convolve1D(imageData, imageDataRotated, filter,
phase=filterSpecs['phase'], orientation=filterSpecs['orient'],
rotation=self._rotation)
elif self._convolutionMethod == 'FFT':
response = convolveFFT(imageData, filter)
else:
raise RuntimeError("Unknown convolution method: "
+ self._convolutionMethod)
if self._wipeOutsideMask:
# Zero the response outside the mask
mask = erodedMaskData[filterSpecs['scale']]
maskMax = 255 * (filterSpecs['scale'] * 2 + 1) ** 2
response[mask < maskMax] = 0
maxResponse = response.max()
if maxResponse > 0.0:
response *= (1.0 / maxResponse)
postProcessingMode = filterSpecs['mode']
threshold = filterSpecs['threshold']
steepness = filterSpecs['steepness']
# Perform post-processing
if postProcessingMode != 'raw':
response = self._postProcess(response, postProcessingMode, threshold, steepness)
filterResponse += [response]
# Combine sequential filters to compute energy
if len(filterResponse) > 1:
if self._debugMode:
print 'Computing combined energy...'
combinedResponse = self._combineResponses(filterResponse)
else:
combinedResponse = filterResponse[0]
responseSet += [combinedResponse]
return responseSet
def _combineResponses(self, responseSet):
"""
Combine a list of one or more individual Gabor response maps
into a single combined response map.
Uses L2 norm to combine the responses.
"""
combinedResponse = numpy.zeros(responseSet[0].shape)
for response in responseSet:
combinedResponse += (response * response)
combinedResponse = numpy.sqrt(combinedResponse.clip(min=0.0, max=1.0))
return combinedResponse
def _postProcess(self, preResponse, postProcessingMode, threshold=0.0, steepness=500.0):
"""performs post-processing on the raw Gabor responses.
Modes are as follows:
'raw' Perform no post-processing. Outputs will be in (-1.0, +1.0)
'abs' Output is absolute value of raw response. Output will
lie in range (0.0, +1.0)
'positive' Clip negative raw response values to zero. Output will lie
in range (0.0, +1.0)
'rectified' Output is square of input. Output will lie in the
range (0.0, +1.0)
'power' Positive raw responses are squared; negative raw responses
are clipped to zero. Output will be in (0.0, +1.0)
'hardFull' Apply a hard threshold to the input; if raw response is
>= 'threshold', then output is 1.0, else output is -1.0.
Output lies in (-1.0, +1.0)
'hardHalf' Apply a hard threshold to the input; if raw response is
>= 'threshold', then output is 1.0, else output is 0.0.
Output lies in (0.0, +1.0)
'sigmoidFull': Apply a sigmoidal threshold function to the input using
'threshold' and 'steepness'. Output lies in (-1.0, +1.0).
'sigmoidHalf': Apply a sigmoidal threshold function to the input using
'threshold' and 'steepness'. Output lies in (0.0, +1.0).
"""
# No processing
if postProcessingMode == 'raw':
postResponse = preResponse
# Compute absolute value
elif postProcessingMode == 'abs':
postResponse = abs(preResponse)
# Negative values set to 0.0
elif postProcessingMode == 'positive':
postResponse = preResponse.clip(min=0.0, max=1.0)
# Compute square of response
elif postProcessingMode == 'rectified':
postResponse = preResponse * preResponse
# Compute square of response for positive values
elif postProcessingMode == 'power':
intResponse = preResponse.clip(min=0.0, max=1.0)
postResponse = intResponse * intResponse
# Compute polynomial response for positive values
elif postProcessingMode == 'polynomial':
#intResponse = preResponse.clip(min=0.0, max=1.0)
#postResponse = intResponse ** int(steepness)
gain = 1.0 / ((1.0 - threshold) ** steepness)
intResponse = (preResponse - threshold).clip(min=0.0, max=1.0)
postResponse = gain * (intResponse ** steepness)
# If output is > threshold, set to 1.0, else -1.0
elif postProcessingMode == 'hardFull':
postResponse = (preResponse > threshold).astype(dtype) * 2.0 - 1.0
# If output is > threshold, set to 1.0, else 0.0
elif postProcessingMode == 'hardHalf':
postResponse = (preResponse > threshold).astype(dtype)
# Sigmoid
elif postProcessingMode == 'sigmoidHalf':
postResponse = 1.0 / (numpy.exp(numpy.clip(steepness * (threshold - preResponse), -40.0, 40.0)) + 1.0)
# Sigmoid
elif postProcessingMode == 'sigmoidFull':
postResponse = (2.0 / (numpy.exp(numpy.clip(steepness * (threshold - preResponse), -40.0, 40.0)) + 1.0)) - 1.0
return postResponse
def _printGaborFilter(self, g):
"""
Print a Gabor filter mask in reasonably pretty format.
@param g -- numpy array embodying the filter to be printed.
"""
for j in range(g.shape[0]):
for i in range(g.shape[1]):
print '%7.4f' % g[j,i],
print
def _convertToPIL(self, responseSet, amplification=1.0):
"""
Convert a list of gabor responses (represented as numpy arrays)
into a list of PIL images.
"""
imageSet = []
for responseIndex in range(len(responseSet)):
response = responseSet[responseIndex]
maxPixelVal = 255.0
halfMaxPixelVal = 0.5 * maxPixelVal
# im = Image.new('L', (response.shape[1], response.shape[0]))
# im.putdata(response.flatten(), scale=maxPixelVal * amplification, offset=0.0)
im = Image.fromarray(((response*maxPixelVal*amplification).clip(min=0, max=255.0)).astype(numpy.uint8))
imageSet += [im]
return imageSet
def convolve2D(image, filter):
"""
Convolve 2D filter with 2D image.
"""
filterDim = filter.shape[0]
# filterRadius = (filterDim - 1) / 2
filterRadius = filterDim / 2
flatFilter = filter.flatten()
numPosnX = image.shape[1] - filterDim + 1
numPosnY = image.shape[0] - filterDim + 1
# response = scipy.signal.convolve(image, filter, mode='same')
response = numpy.zeros(image.shape, dtype=dtype)
for j in range(numPosnY):
for i in range(numPosnX):
response[j+filterRadius, i+filterRadius] = numpy.inner(flatFilter,
image[j:j+filterDim,
i:i+filterDim].flatten())
return response
def convolve1D(image, imageRotated, filter, phase, orientation, rotation):
"""
Convolve 2D filter with 2D image by approximating the filter as the outer
product of two 1D filters and performing two separate convolutions.
Results nearly match convolve2D if the filter is separable, with an average
pixel intensity difference of about 1 / 1,000,000.
"""
# Separate the 2D filter into two 1D filters
if orientation in (0, 45):
filterX = filter[filter.shape[0]/2,:].copy()
if phase in (0, 180):
filterY = filter[:,filter.shape[1]/2].copy()
else:
# Sinusoid is zero in the center
filterY = filter[:,filter.shape[1]/2-1].copy()
elif orientation in (90, 135):
filterY = filter[:,filter.shape[1]/2].copy()
if phase in (0, 180):
filterX = filter[filter.shape[1]/2,:].copy()
else:
# Sinusoid is zero in the center
filterX = filter[filter.shape[1]/2-1,:].copy()
else:
raise RuntimeError("convolve1D cannot do orientation %d" % orientation)
if phase in (180, 270):
if orientation in (0, 45):
filterY *= -1
elif orientation in (90, 135):
filterX *= -1
if orientation in (45, 135):
imageToUse = imageRotated
else:
imageToUse = image
# Process the 2D convolution as two 1D convolutions
convolution = Float32SeparableConvolution2D()
convolution.init(imageToUse.shape[0], imageToUse.shape[1],
filterX.shape[0], filterY.shape[0], filterX, filterY)
response = numpy.zeros(imageToUse.shape, dtype)
convolution.compute(imageToUse, response)
if orientation in (45, 135):
# The image has been rotated
# Unrotate the image
y, x = image.shape
z = response.shape[0]
unrotatedResponse = numpy.zeros((y, x), dtype)
rotation.unrotate(unrotatedResponse, response, y, x, z)
response = unrotatedResponse
# Zero the edges
response[:filter.shape[0]/2,:] = 0.0
response[:,:filter.shape[1]/2] = 0.0
response[-filter.shape[0]/2+1:,:] = 0.0
response[:,-filter.shape[1]/2+1:] = 0.0
return response
def convolveFFT(image, filter):
"""
Convolve 2D filter with 2D image using FFT -> multiply -> IFFT.
Results nearly match convolve2D, with an average pixel intensity difference
of about 1 / 1,000,000.
"""
size = [
pow(2,int(math.ceil(math.log(image.shape[0] + filter.shape[0] - 1, 2)))),
pow(2,int(math.ceil(math.log(image.shape[1] + filter.shape[1] - 1, 2))))
]
image2 = numpy.zeros(size)
image2[0:image.shape[0], 0:image.shape[1]] = image
image2 = numpy.fft.fft2(image2)
filter = numpy.fliplr(numpy.flipud(filter))
filter2 = numpy.zeros(size)
filter2[0:filter.shape[0], 0:filter.shape[1]] = filter
filter2 = numpy.fft.fft2(filter2)
response = numpy.fft.ifft2(image2 * filter2)
x = (filter.shape[0] - 1) / 2
y = (filter.shape[1] - 1) / 2
response = response[x:x+image.shape[0], y:y+image.shape[1]]
response[:filter.shape[0]/2,:] = 0.0
response[:,:filter.shape[1]/2] = 0.0
response[-filter.shape[0]/2+1:,:] = 0.0
response[:,-filter.shape[1]/2+1:] = 0.0
return response
| gpl-3.0 |
kustodian/ansible | lib/ansible/modules/network/f5/bigip_profile_http.py | 23 | 61362 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_profile_http
short_description: Manage HTTP profiles on a BIG-IP
description:
- Manage HTTP profiles on a BIG-IP.
version_added: 2.7
options:
name:
description:
- Specifies the name of the profile.
type: str
required: True
parent:
description:
- Specifies the profile from which this profile inherits settings.
- When creating a new profile, if this parameter is not specified, the default
is the system-supplied C(http) profile.
type: str
default: /Common/http
description:
description:
- Description of the profile.
type: str
proxy_type:
description:
- Specifies the proxy mode for the profile.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: str
choices:
- reverse
- transparent
- explicit
dns_resolver:
description:
- Specifies the name of a configured DNS resolver, this option is mandatory when C(proxy_type)
is set to C(explicit).
- Format of the name can be either be prepended by partition (C(/Common/foo)), or specified
just as an object name (C(foo)).
- To remove the entry a value of C(none) or C('') can be set, however the profile C(proxy_type)
must not be set as C(explicit).
type: str
insert_xforwarded_for:
description:
- When specified system inserts an X-Forwarded-For header in an HTTP request
with the client IP address, to use with connection pooling.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: bool
redirect_rewrite:
description:
- Specifies whether the system rewrites the URIs that are part of HTTP
redirect (3XX) responses.
- When set to C(none) the system will not rewrite the URI in any
HTTP redirect responses.
- When set to C(all) the system rewrites the URI in all HTTP redirect responses.
- When set to C(matching) the system rewrites the URI in any
HTTP redirect responses that match the request URI.
- When set to C(nodes) if the URI contains a node IP address instead of a host name,
the system changes it to the virtual server address.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: str
choices:
- none
- all
- matching
- nodes
encrypt_cookies:
description:
- Cookie names for the system to encrypt.
- To remove the entry completely a value of C(none) or C('') should be set.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: list
encrypt_cookie_secret:
description:
- Passphrase for cookie encryption.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: str
update_password:
description:
- C(always) will update passwords if the C(encrypt_cookie_secret) is specified.
- C(on_create) will only set the password for newly created profiles.
type: str
choices:
- always
- on_create
default: always
header_erase:
description:
- The name of a header, in an HTTP request, which the system removes from request.
- To remove the entry completely a value of C(none) or C('') should be set.
- The format of the header must be in C(KEY:VALUE) format, otherwise error is raised.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: str
version_added: 2.8
header_insert:
description:
- A string that the system inserts as a header in an HTTP request.
- To remove the entry completely a value of C(none) or C('') should be set.
- The format of the header must be in C(KEY:VALUE) format, otherwise error is raised.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: str
version_added: 2.8
server_agent_name:
description:
- Specifies the string used as the server name in traffic generated by BIG-IP.
- To remove the entry completely a value of C(none) or C('') should be set.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: str
version_added: 2.8
include_subdomains:
description:
- When set to C(yes), applies the HSTS policy to the HSTS host and its sub-domains.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: bool
version_added: 2.8
maximum_age:
description:
- Specifies the maximum length of time, in seconds, that HSTS functionality
requests that clients only use HTTPS to connect to the current host and
any sub-domains of the current host's domain name.
- The accepted value range is C(0 - 4294967295) seconds, a value of C(0) seconds
re-enables plaintext HTTP access, while specifying C(indefinite) will set it to the maximum value.
- When creating a new profile, if this parameter is not specified, the
default is provided by the parent profile.
type: str
version_added: 2.8
hsts_mode:
description:
- When set to C(yes), enables the HSTS settings.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: bool
version_added: 2.8
accept_xff:
description:
- Enables or disables trusting the client IP address, and statistics from the client IP address,
based on the request's XFF (X-forwarded-for) headers, if they exist.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: bool
version_added: 2.9
xff_alternative_names:
description:
- Specifies alternative XFF headers instead of the default X-forwarded-for header.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: list
version_added: 2.9
fallback_host:
description:
- Specifies an HTTP fallback host.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: str
version_added: 2.9
fallback_status_codes:
description:
- Specifies one or more HTTP error codes from server responses that should trigger
a redirection to the fallback host.
- The accepted valid error codes are as defined by rfc2616.
- The codes can be specified as individual items or as valid ranges e.g. C(400-417) or C(500-505).
- Mixing response code range across error types is invalid e.g. defining C(400-505) will raise an error.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: list
version_added: 2.9
oneconnect_transformations:
description:
- Enables the system to perform HTTP header transformations for the purpose of keeping server-side
connections open. This feature requires configuration of a OneConnect profile.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: bool
version_added: 2.9
request_chunking:
description:
- Specifies how to handle chunked and unchunked requests.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: str
choices:
- rechunk
- selective
- preserve
version_added: 2.9
response_chunking:
description:
- Specifies how to handle chunked and unchunked responses.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
type: str
choices:
- rechunk
- selective
- preserve
version_added: 2.9
enforcement:
description:
- Specifies protocol enforcement settings for the HTTP profile.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
suboptions:
truncated_redirects:
description:
- Specifies what happens if a truncated redirect is seen from a server.
- If C(yes), the redirect will be forwarded to the client, otherwise the malformed HTTP
will be silently ignored.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: bool
excess_client_headers:
description:
- Specifies the behavior when too many client headers are received.
- If set to C(pass-through), will switch to pass through mode, when C(reject) the connection will be rejected.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: str
choices:
- reject
- pass-through
excess_server_headers:
description:
- Specifies the behavior when too many server headers are received.
- If set to C(pass-through), will switch to pass through mode, when C(reject) the connection will be rejected.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: str
choices:
- reject
- pass-through
oversize_client_headers:
description:
- Specifies the behavior when too-large client headers are received.
- If set to C(pass-through), will switch to pass through mode, when C(reject) the connection will be rejected.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: str
choices:
- reject
- pass-through
oversize_server_headers:
description:
- Specifies the behavior when too-large server headers are received.
- If set to C(pass-through), will switch to pass through mode, when C(reject) the connection will be rejected.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: str
choices:
- reject
- pass-through
pipeline:
description:
- Enables HTTP/1.1 pipelining, allowing clients to make requests even when prior requests have not received
a response.
- In order for this to succeed, however, destination servers must include support for pipelining.
- If set to C(pass-through), pipelined data will cause the BIG-IP to immediately switch to pass-through mode
and disable the HTTP filter.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: str
choices:
- allow
- reject
- pass-through
unknown_method:
description:
- Specifies whether to allow, reject or switch to pass-through mode when an unknown HTTP method is parsed.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: str
choices:
- allow
- reject
- pass-through
max_header_count:
description:
- Specifies the maximum number of headers allowed in HTTP request/response.
- The valid value range is between 16 and 4096 inclusive.
- When set to C(default) the value of this parameter will be C(64)
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: str
max_header_size:
description:
- Specifies the maximum header size specified in bytes.
- The valid value range is between 0 and 4294967295 inclusive.
- When set to C(default) the value of this parameter will be C(32768) bytes
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: str
max_requests:
description:
- Specifies the number of requests that the system accepts on a per-connection basis.
- The valid value range is between 0 and 4294967295 inclusive.
- When set to C(default) the value of this parameter will be C(0), which means the system
will not limit the number of requests per connection.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: str
known_methods:
description:
- Specifies which HTTP methods count as being known, removing RFC-defined methods from this list
will cause the HTTP filter to not recognize them.
- "The default list provided with the system include: C(CONNECT), C(DELETE), C(GET),
C(HEAD), C(LOCK), C(OPTIONS), C(POST), C(PROPFIND), C(PUT), C(TRACE) ,C(UNLOCK). The list can be appended by
by specifying C(default) keyword as one of the list elements."
- The C(default) keyword can also be used to restore the default C(known_methods) on the system.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: list
type: dict
version_added: 2.9
sflow:
description:
- Specifies sFlow settings for the HTTP profile.
- When creating a new profile, if this parameter is not specified, the default is provided by the parent profile.
suboptions:
poll_interval:
description:
- Specifies the maximum interval in seconds between two pollings.
- The valid value range is between 0 and 4294967295 seconds inclusive.
- For this setting to take effect the C(poll_interval_global) parameter must be set to C(no).
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: int
poll_interval_global:
description:
- Specifies whether the global HTTP poll-interval setting overrides the object-level Cpoll-interval setting.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: bool
sampling_rate:
description:
- Specifies the ratio of packets observed to the samples generated. For example, a sampling rate of C(2000)
specifies that 1 sample will be randomly generated for every 2000 packets observed.
- The valid value range is between 0 and 4294967295 packets inclusive.
- For this setting to take effect the C(sampling_rate_global) parameter must be set to C(no).
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: int
sampling_rate_global:
description:
- Specifies whether the global HTTP sampling-rate setting overrides the object-level sampling-rate setting.
- When creating a new profile, if this parameter is not specified, the default is provided
by the parent profile.
type: bool
type: dict
version_added: 2.9
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create HTTP profile
bigip_profile_http:
name: my_profile
insert_xforwarded_for: yes
redirect_rewrite: all
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Remove HTTP profile
bigip_profile_http:
name: my_profile
state: absent
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add HTTP profile for transparent proxy
bigip_profile_http:
name: my_profile
proxy_type: transparent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
parent:
description: Specifies the profile from which this profile inherits settings.
returned: changed
type: str
sample: /Common/http
description:
description: Description of the profile.
returned: changed
type: str
sample: My profile
proxy_type:
description: Specify proxy mode of the profile.
returned: changed
type: str
sample: explicit
hsts_mode:
description: Enables the HSTS settings.
returned: changed
type: bool
sample: no
maximum_age:
description: The maximum length of time, in seconds, that HSTS functionality requests that clients only use HTTPS.
returned: changed
type: str
sample: indefinite
include_subdomains:
description: Applies the HSTS policy to the HSTS host and its sub-domains.
returned: changed
type: bool
sample: yes
server_agent_name:
description: The string used as the server name in traffic generated by BIG-IP.
returned: changed
type: str
sample: foobar
header_erase:
description: The name of a header, in an HTTP request, which the system removes from request.
returned: changed
type: str
sample: FOO:BAR
header_insert:
description: The string that the system inserts as a header in an HTTP request.
returned: changed
type: str
sample: FOO:BAR
insert_xforwarded_for:
description: Insert X-Forwarded-For-Header.
returned: changed
type: bool
sample: yes
redirect_rewrite:
description: Rewrite URI that are part of 3xx responses.
returned: changed
type: str
sample: all
encrypt_cookies:
description: Cookie names to encrypt.
returned: changed
type: list
sample: ['MyCookie1', 'MyCookie2']
dns_resolver:
description: Configured dns resolver.
returned: changed
type: str
sample: '/Common/FooBar'
accept_xff:
description: Enables or disables trusting the client IP address, and statistics from the client IP address.
returned: changed
type: bool
sample: yes
xff_alternative_names:
description: Specifies alternative XFF headers instead of the default X-forwarded-for header.
returned: changed
type: list
sample: ['FooBar', 'client1']
fallback_host:
description: Specifies an HTTP fallback host.
returned: changed
type: str
sample: 'foobar.com'
fallback_status_codes:
description: HTTP error codes from server responses that should trigger a redirection to the fallback host.
returned: changed
type: list
sample: ['400-404', '500', '501']
oneconnect_transformations:
description: Enables or disables HTTP header transformations.
returned: changed
type: bool
sample: no
request_chunking:
description: Specifies how to handle chunked and unchunked requests.
returned: changed
type: str
sample: rechunk
response_chunking:
description: Specifies how to handle chunked and unchunked responses.
returned: changed
type: str
sample: rechunk
enforcement:
description: Specifies protocol enforcement settings for the HTTP profile.
type: complex
returned: changed
contains:
truncated_redirects:
description: Specifies what happens if a truncated redirect is seen from a server.
returned: changed
type: bool
sample: yes
excess_server_headers:
description: Specifies the behavior when too many server headers are received.
returned: changed
type: str
sample: pass-through
oversize_client_headers:
description: Specifies the behavior when too-large client headers are received.
returned: changed
type: str
sample: reject
oversize_server_headers:
description: Specifies the behavior when too-large server headers are received.
returned: changed
type: str
sample: reject
pipeline:
description: Allows, rejects or switches to pass-through mode when dealing with pipelined data.
returned: changed
type: str
sample: allow
unknown_method:
description: Allows, rejects or switches to pass-through mode when an unknown HTTP method is parsed.
returned: changed
type: str
sample: allow
max_header_count:
description: The maximum number of headers allowed in HTTP request/response.
returned: changed
type: str
sample: 4096
max_header_size:
description: The maximum header size specified in bytes.
returned: changed
type: str
sample: default
max_requests:
description: The number of requests that the system accepts on a per-connection basis.
returned: changed
type: str
sample: default
known_methods:
description: The list of known HTTP methods.
returned: changed
type: list
sample: ['default', 'FOO', 'BAR']
sample: hash/dictionary of values
sflow:
description: Specifies sFlow settings for the HTTP profile.
type: complex
returned: changed
contains:
poll_interval:
description: Specifies the maximum interval in seconds between two pollings.
returned: changed
type: int
sample: 30
poll_interval_global:
description: Enables/Disables overriding HTTP poll-interval setting.
returned: changed
type: bool
sample: yes
sampling_rate:
description: Specifies the ratio of packets observed to the samples generated.
returned: changed
type: int
sample: 2000
sampling_rate_global:
description: Enables/Disables overriding HTTP sampling-rate setting.
returned: changed
type: bool
sample: yes
sample: hash/dictionary of values
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.compare import cmp_simple_list
from library.module_utils.network.f5.urls import check_header_validity
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.compare import cmp_simple_list
from ansible.module_utils.network.f5.urls import check_header_validity
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'insertXforwardedFor': 'insert_xforwarded_for',
'redirectRewrite': 'redirect_rewrite',
'encryptCookies': 'encrypt_cookies',
'encryptCookieSecret': 'encrypt_cookie_secret',
'proxyType': 'proxy_type',
'explicitProxy': 'explicit_proxy',
'headerErase': 'header_erase',
'headerInsert': 'header_insert',
'serverAgentName': 'server_agent_name',
'includeSubdomains': 'include_subdomains',
'maximumAge': 'maximum_age',
'mode': 'hsts_mode',
'acceptXff': 'accept_xff',
'xffAlternativeNames': 'xff_alternative_names',
'fallbackHost': 'fallback_host',
'fallbackStatusCodes': 'fallback_status_codes',
'oneconnectTransformations': 'oneconnect_transformations',
'requestChunking': 'request_chunking',
'responseChunking': 'response_chunking',
}
api_attributes = [
'insertXforwardedFor',
'description',
'defaultsFrom',
'redirectRewrite',
'encryptCookies',
'encryptCookieSecret',
'proxyType',
'explicitProxy',
'headerErase',
'headerInsert',
'hsts',
'serverAgentName',
'acceptXff',
'xffAlternativeNames',
'fallbackHost',
'fallbackStatusCodes',
'oneconnectTransformations',
'requestChunking',
'responseChunking',
'enforcement',
'sflow',
]
returnables = [
'parent',
'description',
'insert_xforwarded_for',
'redirect_rewrite',
'encrypt_cookies',
'proxy_type',
'explicit_proxy',
'dns_resolver',
'hsts_mode',
'maximum_age',
'include_subdomains',
'server_agent_name',
'header_erase',
'header_insert',
'accept_xff',
'xff_alternative_names',
'fallback_host',
'fallback_status_codes',
'oneconnect_transformations',
'request_chunking',
'response_chunking',
'truncated_redirects',
'excess_client_headers',
'excess_server_headers',
'oversize_client_headers',
'oversize_server_headers',
'pipeline',
'unknown_method',
'max_header_count',
'max_header_size',
'max_requests',
'known_methods',
'poll_interval',
'poll_interval_global',
'sampling_rate',
'sampling_rate_global',
]
updatables = [
'description',
'insert_xforwarded_for',
'redirect_rewrite',
'encrypt_cookies',
'encrypt_cookie_secret',
'proxy_type',
'dns_resolver',
'hsts_mode',
'maximum_age',
'include_subdomains',
'server_agent_name',
'header_erase',
'header_insert',
'accept_xff',
'xff_alternative_names',
'fallback_host',
'fallback_status_codes',
'oneconnect_transformations',
'request_chunking',
'response_chunking',
'truncated_redirects',
'excess_client_headers',
'excess_server_headers',
'oversize_client_headers',
'oversize_server_headers',
'pipeline',
'unknown_method',
'max_header_count',
'max_header_size',
'max_requests',
'known_methods',
'poll_interval',
'poll_interval_global',
'sampling_rate',
'sampling_rate_global',
]
class ApiParameters(Parameters):
@property
def poll_interval(self):
return self._values['sflow']['pollInterval']
@property
def poll_interval_global(self):
return self._values['sflow']['pollIntervalGlobal']
@property
def sampling_rate(self):
return self._values['sflow']['samplingRate']
@property
def sampling_rate_global(self):
return self._values['sflow']['samplingRateGlobal']
@property
def truncated_redirects(self):
return self._values['enforcement']['truncatedRedirects']
@property
def excess_client_headers(self):
return self._values['enforcement']['excessClientHeaders']
@property
def excess_server_headers(self):
return self._values['enforcement']['excessServerHeaders']
@property
def oversize_client_headers(self):
return self._values['enforcement']['oversizeClientHeaders']
@property
def oversize_server_headers(self):
return self._values['enforcement']['oversizeServerHeaders']
@property
def pipeline(self):
return self._values['enforcement']['pipeline']
@property
def unknown_method(self):
return self._values['enforcement']['unknownMethod']
@property
def max_header_count(self):
return self._values['enforcement']['maxHeaderCount']
@property
def max_header_size(self):
return self._values['enforcement']['maxHeaderSize']
@property
def max_requests(self):
return self._values['enforcement']['maxRequests']
@property
def known_methods(self):
return self._values['enforcement'].get('knownMethods', None)
@property
def dns_resolver(self):
if self._values['explicit_proxy'] is None:
return None
if 'dnsResolver' in self._values['explicit_proxy']:
return self._values['explicit_proxy']['dnsResolver']
@property
def dns_resolver_address(self):
if self._values['explicit_proxy'] is None:
return None
if 'dnsResolverReference' in self._values['explicit_proxy']:
return self._values['explicit_proxy']['dnsResolverReference']
@property
def include_subdomains(self):
if self._values['hsts'] is None:
return None
return self._values['hsts']['includeSubdomains']
@property
def hsts_mode(self):
if self._values['hsts'] is None:
return None
return self._values['hsts']['mode']
@property
def maximum_age(self):
if self._values['hsts'] is None:
return None
return self._values['hsts']['maximumAge']
class ModuleParameters(Parameters):
@property
def accept_xff(self):
result = flatten_boolean(self._values['accept_xff'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def fallback_status_codes(self):
if self._values['fallback_status_codes'] is None:
return None
p1 = r'(?!([4][0-1][0-7]))\d{3}'
p2 = r'(?!(50[0-5]))\d{3}'
for code in self._values['fallback_status_codes']:
match_4xx = re.search(p1, code)
if match_4xx:
match_5xx = re.search(p2, code)
if match_5xx:
raise F5ModuleError(
'Invalid HTTP error code or error code range specified.'
)
return self._values['fallback_status_codes']
@property
def oneconnect_transformations(self):
result = flatten_boolean(self._values['oneconnect_transformations'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def proxy_type(self):
if self._values['proxy_type'] is None:
return None
if self._values['proxy_type'] == 'explicit':
if self.dns_resolver is None or self.dns_resolver == '':
raise F5ModuleError(
'A proxy type cannot be set to {0} without providing DNS resolver.'.format(self._values['proxy_type'])
)
return self._values['proxy_type']
@property
def dns_resolver(self):
if self._values['dns_resolver'] is None:
return None
if self._values['dns_resolver'] == '' or self._values['dns_resolver'] == 'none':
return ''
result = fq_name(self.partition, self._values['dns_resolver'])
return result
@property
def dns_resolver_address(self):
resolver = self.dns_resolver
if resolver is None:
return None
tmp = resolver.split('/')
link = dict(link='https://localhost/mgmt/tm/net/dns-resolver/~{0}~{1}'.format(tmp[1], tmp[2]))
return link
@property
def insert_xforwarded_for(self):
result = flatten_boolean(self._values['insert_xforwarded_for'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def encrypt_cookies(self):
if self._values['encrypt_cookies'] is None:
return None
if self._values['encrypt_cookies'] == [''] or self._values['encrypt_cookies'] == ['none']:
return list()
return self._values['encrypt_cookies']
@property
def explicit_proxy(self):
if self.dns_resolver is None:
return None
result = dict(
dnsResolver=self.dns_resolver,
dnsResolverReference=self.dns_resolver_address
)
return result
@property
def include_subdomains(self):
result = flatten_boolean(self._values['include_subdomains'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def maximum_age(self):
if self._values['maximum_age'] is None:
return None
if self._values['maximum_age'] == 'indefinite':
return 4294967295
if 0 <= int(self._values['maximum_age']) <= 4294967295:
return int(self._values['maximum_age'])
raise F5ModuleError(
"Valid 'maximum_age' must be in range 0 - 4294967295, or 'indefinite'."
)
@property
def hsts_mode(self):
result = flatten_boolean(self._values['hsts_mode'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def header_erase(self):
header_erase = self._values['header_erase']
if header_erase is None:
return None
if header_erase in ['none', '']:
return self._values['header_erase']
check_header_validity(header_erase)
return header_erase
@property
def header_insert(self):
header_insert = self._values['header_insert']
if header_insert is None:
return None
if header_insert in ['none', '']:
return self._values['header_insert']
check_header_validity(header_insert)
return header_insert
@property
def excess_client_headers(self):
if self._values['enforcement'] is None:
return None
return self._values['enforcement']['excess_client_headers']
@property
def excess_server_headers(self):
if self._values['enforcement'] is None:
return None
return self._values['enforcement']['excess_server_headers']
@property
def oversize_client_headers(self):
if self._values['enforcement'] is None:
return None
return self._values['enforcement']['oversize_client_headers']
@property
def oversize_server_headers(self):
if self._values['enforcement'] is None:
return None
return self._values['enforcement']['oversize_server_headers']
@property
def pipeline(self):
if self._values['enforcement'] is None:
return None
return self._values['enforcement']['pipeline']
@property
def unknown_method(self):
if self._values['enforcement'] is None:
return None
return self._values['enforcement']['unknown_method']
@property
def truncated_redirects(self):
if self._values['enforcement'] is None:
return None
result = flatten_boolean(self._values['enforcement']['truncated_redirects'])
if result is None:
return None
if result == 'yes':
return 'enabled'
return 'disabled'
@property
def max_header_count(self):
if self._values['enforcement'] is None:
return None
if self._values['enforcement']['max_header_count'] is None:
return None
if self._values['enforcement']['max_header_count'] == 'default':
return 64
if 16 <= int(self._values['enforcement']['max_header_count']) <= 4096:
return int(self._values['enforcement']['max_header_count'])
raise F5ModuleError(
"Valid 'max_header_count' must be in range 16 - 4096, or 'default'."
)
@property
def max_header_size(self):
if self._values['enforcement'] is None:
return None
if self._values['enforcement']['max_header_size'] is None:
return None
if self._values['enforcement']['max_header_size'] == 'default':
return 32768
if 0 <= int(self._values['enforcement']['max_header_size']) <= 4294967295:
return int(self._values['enforcement']['max_header_size'])
raise F5ModuleError(
"Valid 'max_header_size' must be in range 0 - 4294967295, or 'default'."
)
@property
def max_requests(self):
if self._values['enforcement'] is None:
return None
if self._values['enforcement']['max_requests'] is None:
return None
if self._values['enforcement']['max_requests'] == 'default':
return 0
if 0 <= int(self._values['enforcement']['max_requests']) <= 4294967295:
return int(self._values['enforcement']['max_requests'])
raise F5ModuleError(
"Valid 'max_requests' must be in range 0 - 4294967295, or 'default'."
)
@property
def known_methods(self):
if self._values['enforcement'] is None:
return None
defaults = ['CONNECT', 'DELETE', 'GET', 'HEAD', 'LOCK', 'OPTIONS', 'POST', 'PROPFIND', 'PUT', 'TRACE', 'UNLOCK']
known = self._values['enforcement']['known_methods']
if known is None:
return None
if len(known) == 1:
if known[0] == 'default':
return defaults
if known[0] == '':
return []
if 'default' in known:
to_return = [method for method in known if method != 'default']
to_return.extend(defaults)
return to_return
result = [method for method in known]
return result
@property
def poll_interval(self):
if self._values['sflow'] is None:
return None
if self._values['sflow']['poll_interval'] is None:
return None
if 0 <= self._values['sflow']['poll_interval'] <= 4294967295:
return self._values['sflow']['poll_interval']
raise F5ModuleError(
"Valid 'poll_interval' must be in range 0 - 4294967295 seconds."
)
@property
def sampling_rate(self):
if self._values['sflow'] is None:
return None
if self._values['sflow']['sampling_rate'] is None:
return None
if 0 <= self._values['sflow']['sampling_rate'] <= 4294967295:
return self._values['sflow']['sampling_rate']
raise F5ModuleError(
"Valid 'sampling_rate' must be in range 0 - 4294967295 packets."
)
@property
def poll_interval_global(self):
if self._values['sflow'] is None:
return None
result = flatten_boolean(self._values['sflow']['poll_interval_global'])
return result
@property
def sampling_rate_global(self):
if self._values['sflow'] is None:
return None
result = flatten_boolean(self._values['sflow']['sampling_rate_global'])
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def explicit_proxy(self):
result = dict()
if self._values['dns_resolver'] is not None:
result['dnsResolver'] = self._values['dns_resolver']
if self._values['dns_resolver_address'] is not None:
result['dnsResolverReference'] = self._values['dns_resolver_address']
if not result:
return None
return result
@property
def hsts(self):
result = dict()
if self._values['hsts_mode'] is not None:
result['mode'] = self._values['hsts_mode']
if self._values['maximum_age'] is not None:
result['maximumAge'] = self._values['maximum_age']
if self._values['include_subdomains'] is not None:
result['includeSubdomains'] = self._values['include_subdomains']
if not result:
return None
return result
@property
def enforcement(self):
to_filter = dict(
excessClientHeaders=self._values['excess_client_headers'],
excessServerHeaders=self._values['excess_server_headers'],
knownMethods=self._values['known_methods'],
maxHeaderCount=self._values['max_header_count'],
maxHeaderSize=self._values['max_header_size'],
maxRequests=self._values['max_requests'],
oversizeClientHeaders=self._values['oversize_client_headers'],
oversizeServerHeaders=self._values['oversize_server_headers'],
pipeline=self._values['pipeline'],
truncatedRedirects=self._values['truncated_redirects'],
unknownMethod=self._values['unknown_method']
)
result = self._filter_params(to_filter)
if result:
return result
@property
def sflow(self):
to_filter = dict(
pollInterval=self._values['poll_interval'],
pollIntervalGlobal=self._values['poll_interval_global'],
samplingRate=self._values['sampling_rate'],
samplingRateGlobal=self._values['sampling_rate_global'],
)
result = self._filter_params(to_filter)
if result:
return result
class ReportableChanges(Changes):
returnables = [
'parent',
'description',
'insert_xforwarded_for',
'redirect_rewrite',
'encrypt_cookies',
'proxy_type',
'explicit_proxy',
'dns_resolver',
'hsts_mode',
'maximum_age',
'include_subdomains',
'server_agent_name',
'header_erase',
'header_insert',
'accept_xff',
'xff_alternative_names',
'fallback_host',
'fallback_status_codes',
'oneconnect_transformations',
'request_chunking',
'response_chunking',
'enforcement',
'sflow'
]
@property
def insert_xforwarded_for(self):
if self._values['insert_xforwarded_for'] is None:
return None
elif self._values['insert_xforwarded_for'] == 'enabled':
return 'yes'
return 'no'
@property
def hsts_mode(self):
if self._values['hsts_mode'] is None:
return None
elif self._values['hsts_mode'] == 'enabled':
return 'yes'
return 'no'
@property
def include_subdomains(self):
if self._values['include_subdomains'] is None:
return None
elif self._values['include_subdomains'] == 'enabled':
return 'yes'
return 'no'
@property
def maximum_age(self):
if self._values['maximum_age'] is None:
return None
if self._values['maximum_age'] == 4294967295:
return 'indefinite'
return int(self._values['maximum_age'])
@property
def truncated_redirects(self):
result = flatten_boolean(self._values['truncated_redirects'])
return result
@property
def max_header_count(self):
if self._values['max_header_count'] is None:
return None
if self._values['max_header_count'] == 64:
return 'default'
return str(self._values['max_header_count'])
@property
def max_header_size(self):
if self._values['max_header_size'] is None:
return None
if self._values['max_header_size'] == 32768:
return 'default'
return str(self._values['max_header_size'])
@property
def max_requests(self):
if self._values['max_requests'] is None:
return None
if self._values['max_requests'] == 0:
return 'default'
return str(self._values['max_requests'])
@property
def known_methods(self):
defaults = ['CONNECT', 'DELETE', 'GET', 'HEAD', 'LOCK', 'OPTIONS', 'POST', 'PROPFIND', 'PUT', 'TRACE', 'UNLOCK']
known = self._values['known_methods']
if known is None:
return None
if not known:
return ['']
if set(known) == set(defaults):
return ['default']
if set(known).issuperset(set(defaults)):
result = [item for item in known if item not in defaults]
result.append('default')
return result
return known
@property
def enforcement(self):
to_filter = dict(
excess_client_headers=self._values['excess_client_headers'],
excess_server_headers=self._values['excess_server_headers'],
known_methods=self.known_methods,
max_header_count=self.max_header_count,
max_header_size=self.max_header_size,
max_requests=self.max_requests,
oversize_client_headers=self._values['oversize_client_headers'],
oversize_server_headers=self._values['oversize_server_headers'],
pipeline=self._values['pipeline'],
truncated_redirects=self.truncated_redirects,
unknown_method=self._values['unknown_method']
)
result = self._filter_params(to_filter)
if result:
return result
@property
def accept_xff(self):
result = flatten_boolean(self._values['accept_xff'])
return result
@property
def oneconnect_transformations(self):
result = flatten_boolean(self._values['oneconnect_transformations'])
return result
@property
def sflow(self):
to_filter = dict(
poll_interval=self._values['poll_interval'],
poll_interval_global=self._values['poll_interval_global'],
sampling_rate=self._values['sampling_rate'],
sampling_rate_global=self._values['sampling_rate_global'],
)
result = self._filter_params(to_filter)
if result:
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent http profile cannot be changed"
)
@property
def dns_resolver(self):
if self.want.dns_resolver is None:
return None
if self.want.dns_resolver == '':
if self.have.dns_resolver is None or self.have.dns_resolver == 'none':
return None
elif self.have.proxy_type == 'explicit' and self.want.proxy_type is None:
raise F5ModuleError(
"DNS resolver cannot be empty or 'none' if an existing profile proxy type is set to {0}.".format(self.have.proxy_type)
)
elif self.have.dns_resolver is not None:
return self.want.dns_resolver
if self.have.dns_resolver is None:
return self.want.dns_resolver
@property
def header_erase(self):
if self.want.header_erase is None:
return None
if self.want.header_erase in ['none', '']:
if self.have.header_erase in [None, 'none']:
return None
if self.want.header_erase != self.have.header_erase:
return self.want.header_erase
@property
def header_insert(self):
if self.want.header_insert is None:
return None
if self.want.header_insert in ['none', '']:
if self.have.header_insert in [None, 'none']:
return None
if self.want.header_insert != self.have.header_insert:
return self.want.header_insert
@property
def server_agent_name(self):
if self.want.server_agent_name is None:
return None
if self.want.server_agent_name in ['none', '']:
if self.have.server_agent_name in [None, 'none']:
return None
if self.want.server_agent_name != self.have.server_agent_name:
return self.want.server_agent_name
@property
def encrypt_cookies(self):
if self.want.encrypt_cookies is None:
return None
if self.have.encrypt_cookies in [None, []]:
if not self.want.encrypt_cookies:
return None
else:
return self.want.encrypt_cookies
if set(self.want.encrypt_cookies) != set(self.have.encrypt_cookies):
return self.want.encrypt_cookies
@property
def encrypt_cookie_secret(self):
if self.want.encrypt_cookie_secret != self.have.encrypt_cookie_secret:
if self.want.update_password == 'always':
result = self.want.encrypt_cookie_secret
return result
@property
def xff_alternative_names(self):
result = cmp_simple_list(self.want.xff_alternative_names, self.have.xff_alternative_names)
return result
@property
def fallback_status_codes(self):
result = cmp_simple_list(self.want.fallback_status_codes, self.have.fallback_status_codes)
return result
@property
def known_methods(self):
result = cmp_simple_list(self.want.known_methods, self.have.known_methods)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/http/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.chunk = ['rechunk', 'selective', 'preserve']
self.choices = ['pass-through', 'reject']
self.select = ['allow', 'pass-through', 'reject']
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/http'),
description=dict(),
accept_xff=dict(type='bool'),
xff_alternative_names=dict(type='list'),
fallback_host=dict(),
fallback_status_codes=dict(type='list'),
oneconnect_transformations=dict(type='bool'),
request_chunking=dict(choices=self.chunk),
response_chunking=dict(choices=self.chunk),
proxy_type=dict(
choices=[
'reverse',
'transparent',
'explicit'
]
),
dns_resolver=dict(),
insert_xforwarded_for=dict(type='bool'),
redirect_rewrite=dict(
choices=[
'none',
'all',
'matching',
'nodes'
]
),
encrypt_cookies=dict(type='list'),
encrypt_cookie_secret=dict(no_log=True),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
header_erase=dict(),
header_insert=dict(),
server_agent_name=dict(),
hsts_mode=dict(type='bool'),
maximum_age=dict(),
include_subdomains=dict(type='bool'),
enforcement=dict(
type='dict',
options=dict(
truncated_redirects=dict(type='bool'),
excess_client_headers=dict(choices=self.choices),
excess_server_headers=dict(choices=self.choices),
oversize_client_headers=dict(choices=self.choices),
oversize_server_headers=dict(choices=self.choices),
pipeline=dict(choices=self.select),
unknown_method=dict(choices=self.select),
max_header_count=dict(),
max_header_size=dict(),
max_requests=dict(),
known_methods=dict(type='list'),
)
),
sflow=dict(
type='dict',
options=dict(
poll_interval=dict(type='int'),
poll_interval_global=dict(type='bool'),
sampling_rate=dict(type='int'),
sampling_rate_global=dict(type='bool'),
)
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
leeseulstack/openstack | neutron/tests/unit/ml2/drivers/brocade/test_brocade_mechanism_driver.py | 8 | 3928 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.utils import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import config as ml2_config
from neutron.plugins.ml2.drivers.brocade import (mechanism_brocade
as brocademechanism)
from neutron.tests.unit.ml2 import test_ml2_plugin
LOG = logging.getLogger(__name__)
MECHANISM_NAME = ('neutron.plugins.ml2.'
'drivers.brocade.mechanism_brocade.BrocadeMechanism')
class TestBrocadeMechDriverV2(test_ml2_plugin.Ml2PluginV2TestCase):
"""Test Brocade VCS/VDX mechanism driver.
"""
_mechanism_name = MECHANISM_NAME
def setUp(self):
_mechanism_name = MECHANISM_NAME
ml2_opts = {
'mechanism_drivers': ['brocade'],
'tenant_network_types': ['vlan']}
for opt, val in ml2_opts.items():
ml2_config.cfg.CONF.set_override(opt, val, 'ml2')
def mocked_brocade_init(self):
self._driver = mock.MagicMock()
with mock.patch.object(brocademechanism.BrocadeMechanism,
'brocade_init', new=mocked_brocade_init):
super(TestBrocadeMechDriverV2, self).setUp()
self.mechanism_driver = importutils.import_object(_mechanism_name)
class TestBrocadeMechDriverNetworksV2(test_ml2_plugin.TestMl2NetworksV2,
TestBrocadeMechDriverV2):
pass
class TestBrocadeMechDriverPortsV2(test_ml2_plugin.TestMl2PortsV2,
TestBrocadeMechDriverV2):
pass
class TestBrocadeMechDriverSubnetsV2(test_ml2_plugin.TestMl2SubnetsV2,
TestBrocadeMechDriverV2):
pass
class TestBrocadeMechDriverFeaturesEnabledTestCase(TestBrocadeMechDriverV2):
def setUp(self):
super(TestBrocadeMechDriverFeaturesEnabledTestCase, self).setUp()
def test_version_features(self):
vf = True
# Test for NOS version 4.0.3
self.mechanism_driver.set_features_enabled("4.0.3", vf)
# Verify
pp_domain_support, virtual_fabric_enabled = (
self.mechanism_driver.get_features_enabled()
)
self.assertFalse(pp_domain_support)
self.assertTrue(virtual_fabric_enabled)
# Test for NOS version 4.1.0
vf = True
self.mechanism_driver.set_features_enabled("4.1.0", vf)
# Verify
pp_domain_support, virtual_fabric_enabled = (
self.mechanism_driver.get_features_enabled()
)
self.assertTrue(pp_domain_support)
self.assertTrue(virtual_fabric_enabled)
# Test for NOS version 4.1.3
vf = False
self.mechanism_driver.set_features_enabled("4.1.3", vf)
# Verify
pp_domain_support, virtual_fabric_enabled = (
self.mechanism_driver.get_features_enabled()
)
self.assertTrue(pp_domain_support)
self.assertFalse(virtual_fabric_enabled)
# Test for NOS version 5.0.0
vf = True
self.mechanism_driver.set_features_enabled("5.0.0", vf)
# Verify
pp_domain_support, virtual_fabric_enabled = (
self.mechanism_driver.get_features_enabled()
)
self.assertTrue(pp_domain_support)
self.assertTrue(virtual_fabric_enabled)
| apache-2.0 |
jonsmirl/buildroot | support/scripts/kconfiglib.py | 87 | 137745 | # This is Kconfiglib, a Python library for scripting, debugging, and extracting
# information from Kconfig-based configuration systems. To view the
# documentation, run
#
# $ pydoc kconfiglib
#
# or, if you prefer HTML,
#
# $ pydoc -w kconfiglib
#
# The examples/ subdirectory contains examples, to be run with e.g.
#
# $ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
#
# Look in testsuite.py for the test suite.
"""
Kconfiglib is a Python library for scripting and extracting information from
Kconfig-based configuration systems. Features include the following:
- Symbol values and properties can be looked up and values assigned
programmatically.
- .config files can be read and written.
- Expressions can be evaluated in the context of a Kconfig configuration.
- Relations between symbols can be quickly determined, such as finding all
symbols that reference a particular symbol.
- Highly compatible with the scripts/kconfig/*conf utilities. The test suite
automatically compares outputs between Kconfiglib and the C implementation
for a large number of cases.
For the Linux kernel, scripts are run using
$ make scriptconfig [ARCH=<architecture>] SCRIPT=<path to script> [SCRIPT_ARG=<arg>]
Using the 'scriptconfig' target ensures that required environment variables
(SRCARCH, ARCH, srctree, KERNELVERSION, etc.) are set up correctly.
Scripts receive the name of the Kconfig file to load in sys.argv[1]. As of
Linux 4.1.0-rc5, this is always "Kconfig" from the kernel top-level directory.
If an argument is provided with SCRIPT_ARG, it appears as sys.argv[2].
To get an interactive Python prompt with Kconfiglib preloaded and a Config
object 'c' created, run
$ make iscriptconfig [ARCH=<architecture>]
Kconfiglib currently uses Python 2. For (i)scriptconfig, the Python interpreter
to use can be passed in PYTHONCMD. It defaults to 'python', but PyPy works too
and might be faster for long-running jobs.
The examples/ directory contains short example scripts, which can be run with
e.g.
$ make scriptconfig SCRIPT=Kconfiglib/examples/print_tree.py
or
$ make scriptconfig SCRIPT=Kconfiglib/examples/help_grep.py SCRIPT_ARG=kernel
testsuite.py contains the test suite. See the top of the script for how to run
it.
Credits: Written by Ulf "Ulfalizer" Magnusson
Send bug reports, suggestions and other feedback to ulfalizer a.t Google's
email service. Don't wrestle with internal APIs. Tell me what you need and I
might add it in a safe way as a client API instead."""
import os
import re
import sys
# File layout:
#
# Public classes
# Public functions
# Internal classes
# Internal functions
# Internal global constants
#
# Public classes
#
class Config(object):
"""Represents a Kconfig configuration, e.g. for i386 or ARM. This is the
set of symbols and other items appearing in the configuration together with
their values. Creating any number of Config objects -- including for
different architectures -- is safe; Kconfiglib has no global state."""
#
# Public interface
#
def __init__(self, filename = "Kconfig", base_dir = None,
print_warnings = True, print_undef_assign = False):
"""Creates a new Config object, representing a Kconfig configuration.
Raises Kconfig_Syntax_Error on syntax errors.
filename (default: "Kconfig") -- The base Kconfig file of the
configuration. For the Linux kernel, you'll probably want
"Kconfig" from the top-level directory, as environment
variables will make sure the right Kconfig is included from
there (arch/<architecture>/Kconfig). If you are using
kconfiglib via 'make scriptconfig', the filename of the base
base Kconfig file will be in sys.argv[1].
base_dir (default: None) -- The base directory relative to which
'source' statements within Kconfig files will work. For the
Linux kernel this should be the top-level directory of the
kernel tree. $-references to existing environment variables
will be expanded.
If None (the default), the environment variable 'srctree' will
be used if set, and the current directory otherwise. 'srctree'
is set by the Linux makefiles to the top-level kernel
directory. A default of "." would not work with an alternative
build directory.
print_warnings (default: True) -- Set to True if warnings related to
this configuration should be printed to stderr. This can
be changed later with Config.set_print_warnings(). It is
provided as a constructor argument since warnings might
be generated during parsing.
print_undef_assign (default: False) -- Set to True if informational
messages related to assignments to undefined symbols
should be printed to stderr for this configuration.
Can be changed later with
Config.set_print_undef_assign()."""
# The set of all symbols, indexed by name (a string)
self.syms = {}
# Python 2/3 compatibility hack. This is the only one needed.
if sys.version_info[0] >= 3:
self.syms_iter = self.syms.values
else:
self.syms_iter = self.syms.itervalues
# The set of all defined symbols in the configuration in the order they
# appear in the Kconfig files. This excludes the special symbols n, m,
# and y as well as symbols that are referenced but never defined.
self.kconfig_syms = []
# The set of all named choices (yes, choices can have names), indexed
# by name (a string)
self.named_choices = {}
def register_special_symbol(type_, name, val):
sym = Symbol()
sym.is_special_ = True
sym.is_defined_ = True
sym.config = self
sym.name = name
sym.type = type_
sym.cached_val = val
self.syms[name] = sym
return sym
# The special symbols n, m and y, used as shorthand for "n", "m" and
# "y"
self.n = register_special_symbol(TRISTATE, "n", "n")
self.m = register_special_symbol(TRISTATE, "m", "m")
self.y = register_special_symbol(TRISTATE, "y", "y")
# DEFCONFIG_LIST uses this
register_special_symbol(STRING, "UNAME_RELEASE", os.uname()[2])
# The symbol with "option defconfig_list" set, containing a list of
# default .config files
self.defconfig_sym = None
# See Symbol.get_(src)arch()
self.arch = os.environ.get("ARCH")
self.srcarch = os.environ.get("SRCARCH")
# See Config.__init__(). We need this for get_defconfig_filename().
self.srctree = os.environ.get("srctree")
if self.srctree is None:
self.srctree = "."
self.filename = filename
if base_dir is None:
self.base_dir = self.srctree
else:
self.base_dir = os.path.expandvars(base_dir)
# The 'mainmenu' text
self.mainmenu_text = None
# The filename of the most recently loaded .config file
self.config_filename = None
# The textual header of the most recently loaded .config, uncommented
self.config_header = None
self.print_warnings = print_warnings
self.print_undef_assign = print_undef_assign
# Lists containing all choices, menus and comments in the configuration
self.choices = []
self.menus = []
self.comments = []
# For parsing routines that stop when finding a line belonging to a
# different construct, these holds that line and the tokenized version
# of that line. The purpose is to avoid having to re-tokenize the line,
# which is inefficient and causes problems when recording references to
# symbols.
self.end_line = None
self.end_line_tokens = None
# See the comment in _parse_expr().
self.parse_expr_cur_sym_or_choice = None
self.parse_expr_line = None
self.parse_expr_filename = None
self.parse_expr_linenr = None
self.parse_expr_transform_m = None
# Parse the Kconfig files
self.top_block = self._parse_file(filename, None, None, None)
# Build Symbol.dep for all symbols
self._build_dep()
def load_config(self, filename, replace = True):
"""Loads symbol values from a file in the familiar .config format.
Equivalent to calling Symbol.set_user_value() to set each of the
values.
"# CONFIG_FOO is not set" within a .config file is treated specially
and sets the user value of FOO to 'n'. The C implementation works
the same way.
filename -- The .config file to load. $-references to environment
variables will be expanded. For scripts to work even
when an alternative build directory is used with the
Linux kernel, you need to refer to the top-level kernel
directory with "$srctree".
replace (default: True) -- True if the configuration should replace
the old configuration; False if it should add to it."""
# Put this first so that a missing file doesn't screw up our state
filename = os.path.expandvars(filename)
line_feeder = _FileFeed(filename)
self.config_filename = filename
#
# Read header
#
def is_header_line(line):
return line is not None and line.startswith("#") and \
not _unset_re_match(line)
self.config_header = None
line = line_feeder.peek_next()
if is_header_line(line):
self.config_header = ""
while is_header_line(line_feeder.peek_next()):
self.config_header += line_feeder.get_next()[1:]
# Remove trailing newline
if self.config_header.endswith("\n"):
self.config_header = self.config_header[:-1]
#
# Read assignments. Hotspot for some workloads.
#
def warn_override(filename, linenr, name, old_user_val, new_user_val):
self._warn('overriding the value of {0}. '
'Old value: "{1}", new value: "{2}".'
.format(name, old_user_val, new_user_val),
filename, linenr)
# Invalidate everything to keep things simple. It might be possible to
# improve performance for the case where multiple configurations are
# loaded by only invalidating a symbol (and its dependent symbols) if
# the new user value differs from the old. One complication would be
# that symbols not mentioned in the .config must lose their user value
# when replace = True, which is the usual case.
if replace:
self.unset_user_values()
else:
self._invalidate_all()
while 1:
line = line_feeder.get_next()
if line is None:
return
line = line.rstrip()
set_match = _set_re_match(line)
if set_match:
name, val = set_match.groups()
if val.startswith(('"', "'")):
if len(val) < 2 or val[-1] != val[0]:
_parse_error(line, "malformed string literal",
line_feeder.get_filename(),
line_feeder.get_linenr())
# Strip quotes and remove escapings. The unescaping
# producedure should be safe since " can only appear as \"
# inside the string.
val = val[1:-1].replace('\\"', '"').replace("\\\\", "\\")
if name in self.syms:
sym = self.syms[name]
if sym.user_val is not None:
warn_override(line_feeder.get_filename(),
line_feeder.get_linenr(),
name, sym.user_val, val)
if sym.is_choice_symbol_:
user_mode = sym.parent.user_mode
if user_mode is not None and user_mode != val:
self._warn("assignment to {0} changes mode of containing "
'choice from "{1}" to "{2}".'
.format(name, val, user_mode),
line_feeder.get_filename(),
line_feeder.get_linenr())
sym._set_user_value_no_invalidate(val, True)
else:
if self.print_undef_assign:
_stderr_msg('note: attempt to assign the value "{0}" to the '
"undefined symbol {1}.".format(val, name),
line_feeder.get_filename(),
line_feeder.get_linenr())
else:
unset_match = _unset_re_match(line)
if unset_match:
name = unset_match.group(1)
if name in self.syms:
sym = self.syms[name]
if sym.user_val is not None:
warn_override(line_feeder.get_filename(),
line_feeder.get_linenr(),
name, sym.user_val, "n")
sym._set_user_value_no_invalidate("n", True)
def write_config(self, filename, header = None):
"""Writes out symbol values in the familiar .config format.
Kconfiglib makes sure the format matches what the C implementation
would generate, down to whitespace. This eases testing.
filename -- The filename under which to save the configuration.
header (default: None) -- A textual header that will appear at the
beginning of the file, with each line commented out
automatically. None means no header."""
# already_written is set when _make_conf() is called on a symbol, so
# that symbols defined in multiple locations only get one entry in the
# .config. We need to reset it prior to writing out a new .config.
for sym in self.syms_iter():
sym.already_written = False
with open(filename, "w") as f:
# Write header
if header is not None:
f.write(_comment(header))
f.write("\n")
# Write configuration.
# Passing a list around to all the nodes and appending to it to
# avoid copying was surprisingly a lot slower with PyPy, and about
# as fast with Python. Passing the file around was slower too. Been
# a while since I last measured though.
f.write("\n".join(_make_block_conf(self.top_block)))
f.write("\n")
def get_kconfig_filename(self):
"""Returns the name of the (base) kconfig file this configuration was
loaded from."""
return self.filename
def get_arch(self):
"""Returns the value the environment variable ARCH had at the time the
Config instance was created, or None if ARCH was not set. For the
kernel, this corresponds to the architecture being built for, with
values such as "i386" or "mips"."""
return self.arch
def get_srcarch(self):
"""Returns the value the environment variable SRCARCH had at the time
the Config instance was created, or None if SRCARCH was not set. For
the kernel, this corresponds to the particular arch/ subdirectory
containing architecture-specific code."""
return self.srcarch
def get_srctree(self):
"""Returns the value the environment variable srctree had at the time
the Config instance was created, or None if srctree was not defined.
This variable points to the source directory and is used when building
in a separate directory."""
return self.srctree
def get_config_filename(self):
"""Returns the filename of the most recently loaded configuration file,
or None if no configuration has been loaded."""
return self.config_filename
def get_mainmenu_text(self):
"""Returns the text of the 'mainmenu' statement (with $-references to
symbols replaced by symbol values), or None if the configuration has no
'mainmenu' statement."""
return None if self.mainmenu_text is None else \
self._expand_sym_refs(self.mainmenu_text)
def get_defconfig_filename(self):
"""Returns the name of the defconfig file, which is the first existing
file in the list given in a symbol having 'option defconfig_list' set.
$-references to symbols will be expanded ("$FOO bar" -> "foo bar" if
FOO has the value "foo"). Returns None in case of no defconfig file.
Setting 'option defconfig_list' on multiple symbols currently results
in undefined behavior.
If the environment variable 'srctree' was set when the Config was
created, get_defconfig_filename() will first look relative to that
directory before looking in the current directory; see
Config.__init__().
WARNING: A wart here is that scripts/kconfig/Makefile sometimes uses the
--defconfig=<defconfig> option when calling the C implementation of e.g.
'make defconfig'. This option overrides the 'option defconfig_list'
symbol, meaning the result from get_defconfig_filename() might not
match what 'make defconfig' would use. That probably ought to be worked
around somehow, so that this function always gives the "expected"
result."""
if self.defconfig_sym is None:
return None
for filename, cond_expr in self.defconfig_sym.def_exprs:
if self._eval_expr(cond_expr) == "y":
filename = self._expand_sym_refs(filename)
# We first look in $srctree. os.path.join() won't work here as
# an absolute path in filename would override $srctree.
srctree_filename = os.path.normpath(self.srctree + "/" + filename)
if os.path.exists(srctree_filename):
return srctree_filename
if os.path.exists(filename):
return filename
return None
def get_symbol(self, name):
"""Returns the symbol with name 'name', or None if no such symbol
appears in the configuration. An alternative shorthand is conf[name],
where conf is a Config instance, though that will instead raise
KeyError if the symbol does not exist."""
return self.syms.get(name)
def get_top_level_items(self):
"""Returns a list containing the items (symbols, menus, choice
statements and comments) at the top level of the configuration -- that
is, all items that do not appear within a menu or choice. The items
appear in the same order as within the configuration."""
return self.top_block
def get_symbols(self, all_symbols = True):
"""Returns a list of symbols from the configuration. An alternative for
iterating over all defined symbols (in the order of definition) is
for sym in config:
...
which relies on Config implementing __iter__() and is equivalent to
for sym in config.get_symbols(False):
...
all_symbols (default: True) -- If True, all symbols -- including special
and undefined symbols -- will be included in the result, in
an undefined order. If False, only symbols actually defined
and not merely referred to in the configuration will be
included in the result, and will appear in the order that
they are defined within the Kconfig configuration files."""
return self.syms.values() if all_symbols else self.kconfig_syms
def get_choices(self):
"""Returns a list containing all choice statements in the
configuration, in the order they appear in the Kconfig files."""
return self.choices
def get_menus(self):
"""Returns a list containing all menus in the configuration, in the
order they appear in the Kconfig files."""
return self.menus
def get_comments(self):
"""Returns a list containing all comments in the configuration, in the
order they appear in the Kconfig files."""
return self.comments
def eval(self, s):
"""Returns the value of the expression 's' -- where 's' is represented
as a string -- in the context of the configuration. Raises
Kconfig_Syntax_Error if syntax errors are detected in 's'.
For example, if FOO and BAR are tristate symbols at least one of which
has the value "y", then config.eval("y && (FOO || BAR)") => "y"
This function always yields a tristate value. To get the value of
non-bool, non-tristate symbols, use Symbol.get_value().
The result of this function is consistent with how evaluation works for
conditional expressions in the configuration as well as in the C
implementation. "m" and m are rewritten as '"m" && MODULES' and 'm &&
MODULES', respectively, and a result of "m" will get promoted to "y" if
we're running without modules.
Syntax checking is somewhat lax, partly to be compatible with lax
parsing in the C implementation."""
return self._eval_expr(self._parse_expr(self._tokenize(s, True), # Feed
None, # Current symbol or choice
s)) # line
def get_config_header(self):
"""Returns the (uncommented) textual header of the .config file most
recently loaded with load_config(). Returns None if no .config file has
been loaded or if the most recently loaded .config file has no header.
The header comprises all lines up to but not including the first line
that either
1. Does not start with "#"
2. Has the form "# CONFIG_FOO is not set."
"""
return self.config_header
def get_base_dir(self):
"""Returns the base directory relative to which 'source' statements
will work, passed as an argument to Config.__init__()."""
return self.base_dir
def set_print_warnings(self, print_warnings):
"""Determines whether warnings related to this configuration (for
things like attempting to assign illegal values to symbols with
Symbol.set_user_value()) should be printed to stderr.
print_warnings -- True if warnings should be printed."""
self.print_warnings = print_warnings
def set_print_undef_assign(self, print_undef_assign):
"""Determines whether informational messages related to assignments to
undefined symbols should be printed to stderr for this configuration.
print_undef_assign -- If True, such messages will be printed."""
self.print_undef_assign = print_undef_assign
def __getitem__(self, key):
"""Returns the symbol with name 'name'. Raises KeyError if the symbol
does not appear in the configuration."""
return self.syms[key]
def __iter__(self):
"""Convenience function for iterating over the set of all defined
symbols in the configuration, used like
for sym in conf:
...
The iteration happens in the order of definition within the Kconfig
configuration files. Symbols only referred to but not defined will not
be included, nor will the special symbols n, m, and y. If you want to
include such symbols as well, see config.get_symbols()."""
return iter(self.kconfig_syms)
def unset_user_values(self):
"""Resets the values of all symbols, as if Config.load_config() or
Symbol.set_user_value() had never been called."""
for sym in self.syms_iter():
sym._unset_user_value_no_recursive_invalidate()
def __str__(self):
"""Returns a string containing various information about the Config."""
return _sep_lines("Configuration",
"File : " + self.filename,
"Base directory : " + self.base_dir,
"Value of $ARCH at creation time : " +
("(not set)" if self.arch is None else self.arch),
"Value of $SRCARCH at creation time : " +
("(not set)" if self.srcarch is None else self.srcarch),
"Source tree (derived from $srctree;",
"defaults to '.' if $srctree isn't set) : " + self.srctree,
"Most recently loaded .config : " +
("(no .config loaded)" if self.config_filename is None else
self.config_filename),
"Print warnings : " +
bool_str[self.print_warnings],
"Print assignments to undefined symbols : " +
bool_str[self.print_undef_assign])
#
# Private methods
#
def _invalidate_all(self):
for sym in self.syms_iter():
sym._invalidate()
def _tokenize(self, s, for_eval = False, filename = None, linenr = None):
"""Returns a _Feed instance containing tokens derived from the string
's'. Registers any new symbols encountered (via _sym_lookup()).
(I experimented with a pure regular expression implementation, but it
came out slower, less readable, and wouldn't have been as flexible.)
for_eval -- True when parsing an expression for a call to
Config.eval(), in which case we should not treat the first
token specially nor register new symbols."""
# lstrip() would work here too, but removing the '\n' at the end leads
# to earlier termination in the 'while' loop below, saving lots of
# calls
s = s.strip()
if s == "" or s[0] == "#":
return _Feed([])
if for_eval:
i = 0 # The current index in the string being tokenized
previous = None # The previous token seen
tokens = []
else:
# The initial word on a line is parsed specially. Let
# command_chars = [A-Za-z0-9_]. Then
# - leading non-command_chars characters on the line are ignored, and
# - the first token consists the following one or more command_chars
# characters.
# This is why things like "----help--" are accepted.
initial_token_match = _initial_token_re_match(s)
if initial_token_match is None:
return _Feed([])
# The current index in the string being tokenized
i = initial_token_match.end()
keyword = _get_keyword(initial_token_match.group(1))
if keyword is None:
# We expect a keyword as the first token
_tokenization_error(s, filename, linenr)
if keyword == T_HELP:
# Avoid junk after "help", e.g. "---", being registered as a
# symbol
return _Feed([T_HELP])
tokens = [keyword]
previous = keyword
# _tokenize() is a hotspot during parsing, and this speeds things up a
# bit
strlen = len(s)
append = tokens.append
# Main tokenization loop. (Handles tokens past the first one.)
while i < strlen:
# Test for an identifier/keyword preceded by whitespace first; this
# is the most common case.
id_keyword_match = _id_keyword_re_match(s, i)
if id_keyword_match:
# We have an identifier or keyword. The above also stripped any
# whitespace for us.
name = id_keyword_match.group(1)
# Jump past it
i = id_keyword_match.end()
# Keyword?
keyword = _get_keyword(name)
if keyword is not None:
append(keyword)
# What would ordinarily be considered a name is treated as a
# string after certain tokens.
elif previous in string_lex:
append(name)
else:
# We're dealing with a symbol. _sym_lookup() will take care
# of allocating a new Symbol instance if it's the first
# time we see it.
sym = self._sym_lookup(name, not for_eval)
if previous == T_CONFIG or previous == T_MENUCONFIG:
# If the previous token is T_(MENU)CONFIG
# ("(menu)config"), we're tokenizing the first line of
# a symbol definition, and should remember this as a
# location where the symbol is defined.
sym.def_locations.append((filename, linenr))
else:
# Otherwise, it's a reference to the symbol
sym.ref_locations.append((filename, linenr))
append(sym)
else:
# This restrips whitespace that could have been stripped in the
# regex above, but it's worth it since identifiers/keywords are
# more common
s = s[i:].lstrip()
if s == "":
break
c = s[0]
i = 1
# String literal (constant symbol)
if c == '"' or c == "'":
if "\\" in s:
# Slow path: This could probably be sped up, but it's a
# very unusual case anyway.
quote = c
val = ""
while 1:
if i >= len(s):
_tokenization_error(s, filename, linenr)
c = s[i]
if c == quote:
break
if c == "\\":
if i + 1 >= len(s):
_tokenization_error(s, filename, linenr)
val += s[i + 1]
i += 2
else:
val += c
i += 1
i += 1
append(val)
else:
# Fast path: If the string contains no backslashes (almost
# always) we can simply look for the matching quote.
end = s.find(c, i)
if end == -1:
_tokenization_error(s, filename, linenr)
append(s[i:end])
i = end + 1
elif c == "&":
# Invalid characters are ignored
if i >= len(s) or s[i] != "&": continue
append(T_AND)
i += 1
elif c == "|":
# Invalid characters are ignored
if i >= len(s) or s[i] != "|": continue
append(T_OR)
i += 1
elif c == "!":
if i < len(s) and s[i] == "=":
append(T_UNEQUAL)
i += 1
else:
append(T_NOT)
elif c == "=": append(T_EQUAL)
elif c == "(": append(T_OPEN_PAREN)
elif c == ")": append(T_CLOSE_PAREN)
elif c == "#": break # Comment
else: continue # Invalid characters are ignored
previous = tokens[-1]
return _Feed(tokens)
#
# Parsing
#
# Expression grammar:
#
# <expr> -> <symbol>
# <symbol> '=' <symbol>
# <symbol> '!=' <symbol>
# '(' <expr> ')'
# '!' <expr>
# <expr> '&&' <expr>
# <expr> '||' <expr>
def _parse_expr(self, feed, cur_sym_or_choice, line, filename = None,
linenr = None, transform_m = True):
"""Parse an expression from the tokens in 'feed' using a simple
top-down approach. The result has the form (<operator>, <list
containing parsed operands>).
feed -- _Feed instance containing the tokens for the expression.
cur_sym_or_choice -- The symbol or choice currently being parsed, or
None if we're not parsing a symbol or choice.
Used for recording references to symbols.
line -- The line containing the expression being parsed.
filename (default: None) -- The file containing the expression.
linenr (default: None) -- The line number containing the expression.
transform_m (default: False) -- Determines if 'm' should be rewritten to
'm && MODULES' -- see
parse_val_and_cond()."""
# Use instance variables to avoid having to pass these as arguments
# through the top-down parser in _parse_expr_2(), which is tedious and
# obfuscates the code. A profiler run shows no noticeable performance
# difference.
self.parse_expr_cur_sym_or_choice = cur_sym_or_choice
self.parse_expr_line = line
self.parse_expr_filename = filename
self.parse_expr_linenr = linenr
self.parse_expr_transform_m = transform_m
return self._parse_expr_2(feed)
def _parse_expr_2(self, feed):
or_terms = [self._parse_or_term(feed)]
# Keep parsing additional terms while the lookahead is '||'
while feed.check(T_OR):
or_terms.append(self._parse_or_term(feed))
return or_terms[0] if len(or_terms) == 1 else (OR, or_terms)
def _parse_or_term(self, feed):
and_terms = [self._parse_factor(feed)]
# Keep parsing additional terms while the lookahead is '&&'
while feed.check(T_AND):
and_terms.append(self._parse_factor(feed))
return and_terms[0] if len(and_terms) == 1 else (AND, and_terms)
def _parse_factor(self, feed):
if feed.check(T_OPEN_PAREN):
expr_parse = self._parse_expr_2(feed)
if not feed.check(T_CLOSE_PAREN):
_parse_error(self.parse_expr_line,
"missing end parenthesis.",
self.parse_expr_filename,
self.parse_expr_linenr)
return expr_parse
if feed.check(T_NOT):
return (NOT, self._parse_factor(feed))
sym_or_string = feed.get_next()
if not isinstance(sym_or_string, (Symbol, str)):
_parse_error(self.parse_expr_line,
"malformed expression.",
self.parse_expr_filename,
self.parse_expr_linenr)
if self.parse_expr_cur_sym_or_choice is not None and \
isinstance(sym_or_string, Symbol):
self.parse_expr_cur_sym_or_choice.referenced_syms.add(sym_or_string)
next_token = feed.peek_next()
# For conditional expressions ('depends on <expr>', '... if <expr>',
# etc.), "m" and m are rewritten to "m" && MODULES.
if next_token != T_EQUAL and next_token != T_UNEQUAL:
if self.parse_expr_transform_m and (sym_or_string is self.m or
sym_or_string == "m"):
return (AND, ["m", self._sym_lookup("MODULES")])
return sym_or_string
relation = EQUAL if (feed.get_next() == T_EQUAL) else UNEQUAL
sym_or_string_2 = feed.get_next()
if self.parse_expr_cur_sym_or_choice is not None and \
isinstance(sym_or_string_2, Symbol):
self.parse_expr_cur_sym_or_choice.referenced_syms.add(sym_or_string_2)
if sym_or_string is self.m:
sym_or_string = "m"
if sym_or_string_2 is self.m:
sym_or_string_2 = "m"
return (relation, sym_or_string, sym_or_string_2)
def _parse_file(self, filename, parent, deps, visible_if_deps, res = None):
"""Parses the Kconfig file 'filename'. Returns a list with the Items in
the file. See _parse_block() for the meaning of the parameters."""
return self._parse_block(_FileFeed(filename), None, parent, deps,
visible_if_deps, res)
def _parse_block(self, line_feeder, end_marker, parent, deps,
visible_if_deps = None, res = None):
"""Parses a block, which is the contents of either a file or an if,
menu, or choice statement. Returns a list with the Items in the block.
end_marker -- The token that ends the block, e.g. T_ENDIF ("endif") for
if's. None for files.
parent -- The enclosing menu, choice or if, or None if we're at the top
level.
deps -- Dependencies from enclosing menus, choices and if's.
visible_if_deps (default: None) -- 'visible if' dependencies from
enclosing menus.
res (default: None) -- The list to add items to. If None, a new list is
created to hold the items."""
block = [] if res is None else res
while 1:
# Do we already have a tokenized line that we determined wasn't
# part of whatever we were parsing earlier? See comment in
# Config.__init__().
if self.end_line is not None:
line = self.end_line
tokens = self.end_line_tokens
tokens.go_to_start()
self.end_line = None
self.end_line_tokens = None
else:
line = line_feeder.get_next()
if line is None:
if end_marker is not None:
raise Kconfig_Syntax_Error(
"Unexpected end of file {0}."
.format(line_feeder.get_filename()))
return block
tokens = self._tokenize(line, False,
line_feeder.get_filename(),
line_feeder.get_linenr())
t0 = tokens.get_next()
if t0 is None:
continue
# Cases are ordered roughly by frequency, which speeds things up a
# bit
if t0 == T_CONFIG or t0 == T_MENUCONFIG:
# The tokenizer will automatically allocate a new Symbol object
# for any new names it encounters, so we don't need to worry
# about that here.
sym = tokens.get_next()
# Symbols defined in multiple places get the parent of their
# first definition. However, for symbols whose parents are choice
# statements, the choice statement takes precedence.
if not sym.is_defined_ or isinstance(parent, Choice):
sym.parent = parent
sym.is_defined_ = True
self.kconfig_syms.append(sym)
block.append(sym)
self._parse_properties(line_feeder, sym, deps, visible_if_deps)
elif t0 == T_SOURCE:
kconfig_file = tokens.get_next()
exp_kconfig_file = self._expand_sym_refs(kconfig_file)
f = os.path.join(self.base_dir, exp_kconfig_file)
if not os.path.exists(f):
raise IOError('{0}:{1}: sourced file "{2}" (expands to '
'"{3}") not found. Perhaps base_dir '
'(argument to Config.__init__(), currently '
'"{4}") is set to the wrong value.'
.format(line_feeder.get_filename(),
line_feeder.get_linenr(),
kconfig_file, exp_kconfig_file,
self.base_dir))
# Add items to the same block
self._parse_file(f, parent, deps, visible_if_deps, block)
elif t0 == end_marker:
# We have reached the end of the block
return block
elif t0 == T_IF:
# If statements are treated as syntactic sugar for adding
# dependencies to enclosed items and do not have an explicit
# object representation.
dep_expr = self._parse_expr(tokens, None, line,
line_feeder.get_filename(),
line_feeder.get_linenr())
self._parse_block(line_feeder,
T_ENDIF,
parent,
_make_and(dep_expr, deps),
visible_if_deps,
block) # Add items to the same block
elif t0 == T_COMMENT:
comment = Comment()
comment.config = self
comment.parent = parent
comment.filename = line_feeder.get_filename()
comment.linenr = line_feeder.get_linenr()
comment.text = tokens.get_next()
self.comments.append(comment)
block.append(comment)
self._parse_properties(line_feeder, comment, deps, visible_if_deps)
elif t0 == T_MENU:
menu = Menu()
menu.config = self
menu.parent = parent
menu.filename = line_feeder.get_filename()
menu.linenr = line_feeder.get_linenr()
menu.title = tokens.get_next()
self.menus.append(menu)
block.append(menu)
# Parse properties and contents
self._parse_properties(line_feeder, menu, deps, visible_if_deps)
menu.block = self._parse_block(line_feeder,
T_ENDMENU,
menu,
menu.dep_expr,
_make_and(visible_if_deps,
menu.visible_if_expr))
elif t0 == T_CHOICE:
name = tokens.get_next()
if name is None:
choice = Choice()
self.choices.append(choice)
else:
# Named choice
choice = self.named_choices.get(name)
if choice is None:
choice = Choice()
choice.name = name
self.named_choices[name] = choice
self.choices.append(choice)
choice.config = self
choice.parent = parent
choice.def_locations.append((line_feeder.get_filename(),
line_feeder.get_linenr()))
# Parse properties and contents
self._parse_properties(line_feeder, choice, deps, visible_if_deps)
choice.block = self._parse_block(line_feeder,
T_ENDCHOICE,
choice,
deps,
visible_if_deps)
choice._determine_actual_symbols()
# If no type is set for the choice, its type is that of the first
# choice item
if choice.type == UNKNOWN:
for item in choice.get_symbols():
if item.type != UNKNOWN:
choice.type = item.type
break
# Each choice item of UNKNOWN type gets the type of the choice
for item in choice.get_symbols():
if item.type == UNKNOWN:
item.type = choice.type
block.append(choice)
elif t0 == T_MAINMENU:
text = tokens.get_next()
if self.mainmenu_text is not None:
self._warn("overriding 'mainmenu' text. "
'Old value: "{0}", new value: "{1}".'
.format(self.mainmenu_text, text),
line_feeder.get_filename(),
line_feeder.get_linenr())
self.mainmenu_text = text
else:
_parse_error(line, "unrecognized construct.",
line_feeder.get_filename(),
line_feeder.get_linenr())
def _parse_properties(self, line_feeder, stmt, deps, visible_if_deps):
"""Parsing of properties for symbols, menus, choices, and comments."""
def parse_val_and_cond(tokens, line, filename, linenr):
"""Parses '<expr1> if <expr2>' constructs, where the 'if' part is
optional. Returns a tuple containing the parsed expressions, with
None as the second element if the 'if' part is missing."""
val = self._parse_expr(tokens, stmt, line, filename, linenr, False)
if tokens.check(T_IF):
return (val, self._parse_expr(tokens, stmt, line, filename, linenr))
return (val, None)
# In case the symbol is defined in multiple locations, we need to
# remember what prompts, defaults, and selects are new for this
# definition, as "depends on" should only apply to the local
# definition.
new_prompt = None
new_def_exprs = []
new_selects = []
# Dependencies from 'depends on' statements
depends_on_expr = None
while 1:
line = line_feeder.get_next()
if line is None:
break
filename = line_feeder.get_filename()
linenr = line_feeder.get_linenr()
tokens = self._tokenize(line, False, filename, linenr)
t0 = tokens.get_next()
if t0 is None:
continue
# Cases are ordered roughly by frequency, which speeds things up a
# bit
if t0 == T_DEPENDS:
if not tokens.check(T_ON):
_parse_error(line, 'expected "on" after "depends".', filename, linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename, linenr)
if isinstance(stmt, (Menu, Comment)):
stmt.dep_expr = _make_and(stmt.dep_expr, parsed_deps)
else:
depends_on_expr = _make_and(depends_on_expr, parsed_deps)
elif t0 == T_HELP:
# Find first non-blank (not all-space) line and get its
# indentation
line_feeder.remove_blank()
line = line_feeder.get_next()
if line is None:
stmt.help = ""
break
indent = _indentation(line)
if indent == 0:
# If the first non-empty lines has zero indent, there is no
# help text
stmt.help = ""
line_feeder.go_back()
break
# The help text goes on till the first non-empty line with less
# indent
help_lines = [_deindent(line, indent)]
while 1:
line = line_feeder.get_next()
if line is None or \
(not line.isspace() and _indentation(line) < indent):
stmt.help = "".join(help_lines)
break
help_lines.append(_deindent(line, indent))
if line is None:
break
line_feeder.go_back()
elif t0 == T_SELECT:
target = tokens.get_next()
stmt.referenced_syms.add(target)
stmt.selected_syms.add(target)
if tokens.check(T_IF):
new_selects.append((target,
self._parse_expr(tokens, stmt, line, filename, linenr)))
else:
new_selects.append((target, None))
elif t0 in (T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING):
stmt.type = token_to_type[t0]
if len(tokens) > 1:
new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
elif t0 == T_DEFAULT:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename, linenr))
elif t0 == T_DEF_BOOL:
stmt.type = BOOL
if len(tokens) > 1:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename, linenr))
elif t0 == T_PROMPT:
# 'prompt' properties override each other within a single
# definition of a symbol, but additional prompts can be added
# by defining the symbol multiple times; hence 'new_prompt'
# instead of 'prompt'.
new_prompt = parse_val_and_cond(tokens, line, filename, linenr)
elif t0 == T_RANGE:
lower = tokens.get_next()
upper = tokens.get_next()
stmt.referenced_syms.add(lower)
stmt.referenced_syms.add(upper)
if tokens.check(T_IF):
stmt.ranges.append((lower, upper,
self._parse_expr(tokens, stmt, line, filename, linenr)))
else:
stmt.ranges.append((lower, upper, None))
elif t0 == T_DEF_TRISTATE:
stmt.type = TRISTATE
if len(tokens) > 1:
new_def_exprs.append(parse_val_and_cond(tokens, line, filename, linenr))
elif t0 == T_OPTION:
if tokens.check(T_ENV) and tokens.check(T_EQUAL):
env_var = tokens.get_next()
stmt.is_special_ = True
stmt.is_from_env = True
if env_var not in os.environ:
self._warn("The symbol {0} references the "
"non-existent environment variable {1} and "
"will get the empty string as its value. "
"If you're using kconfiglib via "
"'make (i)scriptconfig', it should have "
"set up the environment correctly for you. "
"If you still got this message, that "
"might be an error, and you should email "
"ulfalizer a.t Google's email service."""
.format(stmt.name, env_var),
filename, linenr)
stmt.cached_val = ""
else:
stmt.cached_val = os.environ[env_var]
elif tokens.check(T_DEFCONFIG_LIST):
self.defconfig_sym = stmt
elif tokens.check(T_MODULES):
# To reduce warning spam, only warn if 'option modules' is
# set on some symbol that isn't MODULES, which should be
# safe. I haven't run into any projects that make use
# modules besides the kernel yet, and there it's likely to
# keep being called "MODULES".
if stmt.name != "MODULES":
self._warn("the 'modules' option is not supported. "
"Let me know if this is a problem for you; "
"it shouldn't be that hard to implement. "
"(Note that modules are still supported -- "
"Kconfiglib just assumes the symbol name "
"MODULES, like older versions of the C "
"implementation did when 'option modules' "
"wasn't used.)",
filename, linenr)
elif tokens.check(T_ALLNOCONFIG_Y):
if not isinstance(stmt, Symbol):
_parse_error(line,
"the 'allnoconfig_y' option is only valid for symbols.",
filename,
linenr)
stmt.allnoconfig_y = True
else:
_parse_error(line, "unrecognized option.", filename, linenr)
elif t0 == T_VISIBLE:
if not tokens.check(T_IF):
_parse_error(line, 'expected "if" after "visible".', filename, linenr)
if not isinstance(stmt, Menu):
_parse_error(line,
"'visible if' is only valid for menus.",
filename,
linenr)
parsed_deps = self._parse_expr(tokens, stmt, line, filename, linenr)
stmt.visible_if_expr = _make_and(stmt.visible_if_expr, parsed_deps)
elif t0 == T_OPTIONAL:
if not isinstance(stmt, Choice):
_parse_error(line,
'"optional" is only valid for choices.',
filename,
linenr)
stmt.optional = True
else:
# See comment in Config.__init__()
self.end_line = line
self.end_line_tokens = tokens
break
# Propagate dependencies from enclosing menus and if's.
# For menus and comments..
if isinstance(stmt, (Menu, Comment)):
stmt.orig_deps = stmt.dep_expr
stmt.deps_from_containing = deps
stmt.dep_expr = _make_and(stmt.dep_expr, deps)
stmt.all_referenced_syms = \
stmt.referenced_syms | _get_expr_syms(deps)
# For symbols and choices..
else:
# See comment for 'menu_dep'
stmt.menu_dep = depends_on_expr
# Propagate dependencies specified with 'depends on' to any new
# default expressions, prompts, and selections. ("New" since a
# symbol might be defined in multiple places and the dependencies
# should only apply to the local definition.)
new_def_exprs = [(val_expr, _make_and(cond_expr, depends_on_expr))
for val_expr, cond_expr in new_def_exprs]
new_selects = [(target, _make_and(cond_expr, depends_on_expr))
for target, cond_expr in new_selects]
if new_prompt is not None:
prompt, cond_expr = new_prompt
# 'visible if' dependencies from enclosing menus get propagated
# to prompts
if visible_if_deps is not None:
cond_expr = _make_and(cond_expr, visible_if_deps)
new_prompt = (prompt, _make_and(cond_expr, depends_on_expr))
# We save the original expressions -- before any menu and if
# conditions have been propagated -- so these can be retrieved
# later.
stmt.orig_def_exprs.extend(new_def_exprs)
if new_prompt is not None:
stmt.orig_prompts.append(new_prompt)
# Only symbols can select
if isinstance(stmt, Symbol):
stmt.orig_selects.extend(new_selects)
# Save dependencies from enclosing menus and if's
stmt.deps_from_containing = deps
# The set of symbols referenced directly by the symbol/choice plus
# all symbols referenced by enclosing menus and if's.
stmt.all_referenced_syms = \
stmt.referenced_syms | _get_expr_syms(deps)
# Propagate dependencies from enclosing menus and if's
stmt.def_exprs.extend([(val_expr, _make_and(cond_expr, deps))
for val_expr, cond_expr in new_def_exprs])
for target, cond in new_selects:
target.rev_dep = _make_or(target.rev_dep,
_make_and(stmt,
_make_and(cond, deps)))
if new_prompt is not None:
prompt, cond_expr = new_prompt
stmt.prompts.append((prompt, _make_and(cond_expr, deps)))
#
# Symbol table manipulation
#
def _sym_lookup(self, name, add_sym_if_not_exists = True):
"""Fetches the symbol 'name' from the symbol table, optionally adding
it if it does not exist (this is usually what we want)."""
if name in self.syms:
return self.syms[name]
new_sym = Symbol()
new_sym.config = self
new_sym.name = name
if add_sym_if_not_exists:
self.syms[name] = new_sym
else:
# This warning is generated while evaluating an expression
# containing undefined symbols using Config.eval()
self._warn("no symbol {0} in configuration".format(name))
return new_sym
#
# Evaluation of symbols and expressions
#
def _eval_expr(self, expr):
"""Evaluates an expression and returns one of the tristate values "n",
"m" or "y"."""
res = self._eval_expr_2(expr)
# Promote "m" to "y" if we're running without modules. Internally, "m"
# is often rewritten to "m" && MODULES by both the C implementation and
# kconfiglib, which takes care of cases where "m" should be false if
# we're running without modules.
if res == "m" and not self._has_modules():
return "y"
return res
def _eval_expr_2(self, expr):
if expr is None:
return "y"
if isinstance(expr, Symbol):
# Non-bool/tristate symbols are always "n" in a tristate sense,
# regardless of their value
if expr.type != BOOL and expr.type != TRISTATE:
return "n"
return expr.get_value()
if isinstance(expr, str):
return expr if (expr == "y" or expr == "m") else "n"
first_expr = expr[0]
if first_expr == AND:
res = "y"
for subexpr in expr[1]:
ev = self._eval_expr_2(subexpr)
# Return immediately upon discovering an "n" term
if ev == "n":
return "n"
if ev == "m":
res = "m"
# 'res' is either "m" or "y" here; we already handled the
# short-circuiting "n" case in the loop.
return res
if first_expr == OR:
res = "n"
for subexpr in expr[1]:
ev = self._eval_expr_2(subexpr)
# Return immediately upon discovering a "y" term
if ev == "y":
return "y"
if ev == "m":
res = "m"
# 'res' is either "n" or "m" here; we already handled the
# short-circuiting "y" case in the loop.
return res
if first_expr == NOT:
ev = self._eval_expr_2(expr[1])
if ev == "y":
return "n"
return "y" if (ev == "n") else "m"
if first_expr == EQUAL:
return "y" if (_str_val(expr[1]) == _str_val(expr[2])) else "n"
if first_expr == UNEQUAL:
return "y" if (_str_val(expr[1]) != _str_val(expr[2])) else "n"
_internal_error("Internal error while evaluating expression: "
"unknown operation {0}.".format(first_expr))
def _eval_min(self, e1, e2):
"""Returns the minimum value of the two expressions. Equates None with
'y'."""
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_less(e1_eval, e2_eval) else e2_eval
def _eval_max(self, e1, e2):
"""Returns the maximum value of the two expressions. Equates None with
'y'."""
e1_eval = self._eval_expr(e1)
e2_eval = self._eval_expr(e2)
return e1_eval if tri_greater(e1_eval, e2_eval) else e2_eval
#
# Methods related to the MODULES symbol
#
def _has_modules(self):
modules_sym = self.syms.get("MODULES")
return (modules_sym is not None) and (modules_sym.get_value() == "y")
#
# Dependency tracking
#
def _build_dep(self):
"""Populates the Symbol.dep sets, linking the symbol to the symbols
that immediately depend on it in the sense that changing the value of
the symbol might affect the values of those other symbols. This is used
for caching/invalidation purposes. The calculated sets might be larger
than necessary as we don't do any complicated analysis of the
expressions."""
# Adds 'sym' as a directly dependent symbol to all symbols that appear
# in the expression 'e'
def add_expr_deps(e, sym):
for s in _get_expr_syms(e):
s.dep.add(sym)
# The directly dependent symbols of a symbol are:
# - Any symbols whose prompts, default values, rev_dep (select
# condition), or ranges depend on the symbol
# - Any symbols that belong to the same choice statement as the symbol
# (these won't be included in 'dep' as that makes the dependency
# graph unwieldy, but Symbol._get_dependent() will include them)
# - Any symbols in a choice statement that depends on the symbol
for sym in self.syms_iter():
for _, e in sym.prompts:
add_expr_deps(e, sym)
for v, e in sym.def_exprs:
add_expr_deps(v, sym)
add_expr_deps(e, sym)
add_expr_deps(sym.rev_dep, sym)
for l, u, e in sym.ranges:
add_expr_deps(l, sym)
add_expr_deps(u, sym)
add_expr_deps(e, sym)
if sym.is_choice_symbol_:
choice = sym.parent
for _, e in choice.prompts:
add_expr_deps(e, sym)
for _, e in choice.def_exprs:
add_expr_deps(e, sym)
def _expr_val_str(self, expr, no_value_str = "(none)", get_val_instead_of_eval = False):
# Since values are valid expressions, _expr_to_str() will get a nice
# string representation for those as well.
if expr is None:
return no_value_str
if get_val_instead_of_eval:
if isinstance(expr, str):
return _expr_to_str(expr)
val = expr.get_value()
else:
val = self._eval_expr(expr)
return "{0} (value: {1})".format(_expr_to_str(expr), _expr_to_str(val))
def _expand_sym_refs(self, s):
"""Expands $-references to symbols in 's' to symbol values, or to the
empty string for undefined symbols."""
while 1:
sym_ref_match = _sym_ref_re_search(s)
if sym_ref_match is None:
return s
sym_name = sym_ref_match.group(0)[1:]
sym = self.syms.get(sym_name)
expansion = "" if sym is None else sym.get_value()
s = s[:sym_ref_match.start()] + \
expansion + \
s[sym_ref_match.end():]
def _get_sym_or_choice_str(self, sc):
"""Symbols and choices have many properties in common, so we factor out
common __str__() stuff here. "sc" is short for "symbol or choice"."""
# As we deal a lot with string representations here, use some
# convenient shorthand:
s = _expr_to_str
#
# Common symbol/choice properties
#
user_val_str = "(no user value)" if sc.user_val is None else s(sc.user_val)
# Build prompts string
if sc.prompts == []:
prompts_str = " (no prompts)"
else:
prompts_str_rows = []
for prompt, cond_expr in sc.orig_prompts:
if cond_expr is None:
prompts_str_rows.append(' "{0}"'.format(prompt))
else:
prompts_str_rows.append(' "{0}" if {1}'
.format(prompt, self._expr_val_str(cond_expr)))
prompts_str = "\n".join(prompts_str_rows)
# Build locations string
if sc.def_locations == []:
locations_str = "(no locations)"
else:
locations_str = " ".join(["{0}:{1}".format(filename, linenr) for
(filename, linenr) in sc.def_locations])
# Build additional-dependencies-from-menus-and-if's string
additional_deps_str = " " + self._expr_val_str(sc.deps_from_containing,
"(no additional dependencies)")
#
# Symbol-specific stuff
#
if isinstance(sc, Symbol):
# Build ranges string
if isinstance(sc, Symbol):
if sc.ranges == []:
ranges_str = " (no ranges)"
else:
ranges_str_rows = []
for l, u, cond_expr in sc.ranges:
if cond_expr is None:
ranges_str_rows.append(" [{0}, {1}]".format(s(l), s(u)))
else:
ranges_str_rows.append(" [{0}, {1}] if {2}"
.format(s(l), s(u), self._expr_val_str(cond_expr)))
ranges_str = "\n".join(ranges_str_rows)
# Build default values string
if sc.def_exprs == []:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for val_expr, cond_expr in sc.orig_def_exprs:
row_str = " " + self._expr_val_str(val_expr, "(none)", sc.type == STRING)
defaults_str_rows.append(row_str)
defaults_str_rows.append(" Condition: " + self._expr_val_str(cond_expr))
defaults_str = "\n".join(defaults_str_rows)
# Build selects string
if sc.orig_selects == []:
selects_str = " (no selects)"
else:
selects_str_rows = []
for target, cond_expr in sc.orig_selects:
if cond_expr is None:
selects_str_rows.append(" {0}".format(target.name))
else:
selects_str_rows.append(" {0} if ".format(target.name) +
self._expr_val_str(cond_expr))
selects_str = "\n".join(selects_str_rows)
res = _sep_lines("Symbol " +
("(no name)" if sc.name is None else sc.name),
"Type : " + typename[sc.type],
"Value : " + s(sc.get_value()),
"User value : " + user_val_str,
"Visibility : " + s(sc.get_visibility()),
"Is choice item : " + bool_str[sc.is_choice_symbol_],
"Is defined : " + bool_str[sc.is_defined_],
"Is from env. : " + bool_str[sc.is_from_env],
"Is special : " + bool_str[sc.is_special_] + "\n")
if sc.ranges != []:
res += _sep_lines("Ranges:",
ranges_str + "\n")
res += _sep_lines("Prompts:",
prompts_str,
"Default values:",
defaults_str,
"Selects:",
selects_str,
"Reverse (select-related) dependencies:",
" (no reverse dependencies)" if sc.rev_dep == "n"
else " " + self._expr_val_str(sc.rev_dep),
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Locations: " + locations_str)
return res
#
# Choice-specific stuff
#
# Build selected symbol string
sel = sc.get_selection()
sel_str = "(no selection)" if sel is None else sel.name
# Build default values string
if sc.def_exprs == []:
defaults_str = " (no default values)"
else:
defaults_str_rows = []
for sym, cond_expr in sc.orig_def_exprs:
if cond_expr is None:
defaults_str_rows.append(" {0}".format(sym.name))
else:
defaults_str_rows.append(" {0} if ".format(sym.name) +
self._expr_val_str(cond_expr))
defaults_str = "\n".join(defaults_str_rows)
# Build contained symbols string
names = [sym.name for sym in sc.get_symbols()]
syms_string = "(empty)" if names == [] else " ".join(names)
return _sep_lines("Choice",
"Name (for named choices): " +
("(no name)" if sc.name is None else sc.name),
"Type : " + typename[sc.type],
"Selected symbol : " + sel_str,
"User value : " + user_val_str,
"Mode : " + s(sc.get_mode()),
"Visibility : " + s(sc.get_visibility()),
"Optional : " + bool_str[sc.optional],
"Prompts:",
prompts_str,
"Defaults:",
defaults_str,
"Choice symbols:",
" " + syms_string,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Locations: " + locations_str)
def _expr_depends_on(self, expr, sym):
"""Reimplementation of expr_depends_symbol() from mconf.c. Used to
determine if a submenu should be implicitly created, which influences what
items inside choice statements are considered choice items."""
if expr is None:
return False
def rec(expr):
if isinstance(expr, str):
return False
if isinstance(expr, Symbol):
return expr is sym
e0 = expr[0]
if e0 == EQUAL or e0 == UNEQUAL:
return self._eq_to_sym(expr) is sym
if e0 == AND:
for and_expr in expr[1]:
if rec(and_expr):
return True
return False
return rec(expr)
def _eq_to_sym(self, eq):
"""_expr_depends_on() helper. For (in)equalities of the form sym = y/m
or sym != n, returns sym. For other (in)equalities, returns None."""
relation, left, right = eq
left = self._transform_n_m_y(left)
right = self._transform_n_m_y(right)
# Make sure the symbol (if any) appears to the left
if not isinstance(left, Symbol):
left, right = right, left
if not isinstance(left, Symbol):
return None
if (relation == EQUAL and (right == "y" or right == "m")) or \
(relation == UNEQUAL and right == "n"):
return left
return None
def _transform_n_m_y(self, item):
"""_eq_to_sym() helper. Translates the symbols n, m, and y to their
string equivalents."""
if item is self.n:
return "n"
if item is self.m:
return "m"
if item is self.y:
return "y"
return item
def _warn(self, msg, filename = None, linenr = None):
"""For printing warnings to stderr."""
if self.print_warnings:
_stderr_msg("warning: " + msg, filename, linenr)
class Item(object):
"""Base class for symbols and other Kconfig constructs. Subclasses are
Symbol, Choice, Menu, and Comment."""
def is_symbol(self):
"""Returns True if the item is a symbol. Short for
isinstance(item, kconfiglib.Symbol)."""
return isinstance(self, Symbol)
def is_choice(self):
"""Returns True if the item is a choice. Short for
isinstance(item, kconfiglib.Choice)."""
return isinstance(self, Choice)
def is_menu(self):
"""Returns True if the item is a menu. Short for
isinstance(item, kconfiglib.Menu)."""
return isinstance(self, Menu)
def is_comment(self):
"""Returns True if the item is a comment. Short for
isinstance(item, kconfiglib.Comment)."""
return isinstance(self, Comment)
class Symbol(Item):
"""Represents a configuration symbol - e.g. FOO for
config FOO
..."""
#
# Public interface
#
def get_value(self):
"""Calculate and return the value of the symbol. See also
Symbol.set_user_value()."""
if self.cached_val is not None:
return self.cached_val
self.write_to_conf = False
# As a quirk of Kconfig, undefined symbols get their name as their
# value. This is why things like "FOO = bar" work for seeing if FOO has
# the value "bar".
if self.type == UNKNOWN:
self.cached_val = self.name
return self.name
new_val = default_value[self.type]
vis = _get_visibility(self)
if self.type == BOOL or self.type == TRISTATE:
# The visibility and mode (modules-only or single-selection) of
# choice items will be taken into account in _get_visibility()
if self.is_choice_symbol_:
if vis != "n":
choice = self.parent
mode = choice.get_mode()
self.write_to_conf = (mode != "n")
if mode == "y":
new_val = "y" if (choice.get_selection() is self) else "n"
elif mode == "m":
if self.user_val == "m" or self.user_val == "y":
new_val = "m"
else:
# If the symbol is visible and has a user value, use that.
# Otherwise, look at defaults.
use_defaults = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.config._eval_min(self.user_val, vis)
use_defaults = False
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
cond_eval = self.config._eval_expr(cond_expr)
if cond_eval != "n":
self.write_to_conf = True
new_val = self.config._eval_min(val_expr, cond_eval)
break
# Reverse (select-related) dependencies take precedence
rev_dep_val = self.config._eval_expr(self.rev_dep)
if rev_dep_val != "n":
self.write_to_conf = True
new_val = self.config._eval_max(new_val, rev_dep_val)
# Promote "m" to "y" for booleans
if new_val == "m" and self.type == BOOL:
new_val = "y"
elif self.type == STRING:
use_defaults = True
if vis != "n":
self.write_to_conf = True
if self.user_val is not None:
new_val = self.user_val
use_defaults = False
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
new_val = _str_val(val_expr)
break
elif self.type == HEX or self.type == INT:
has_active_range = False
low = None
high = None
use_defaults = True
base = 16 if self.type == HEX else 10
for(l, h, cond_expr) in self.ranges:
if self.config._eval_expr(cond_expr) != "n":
has_active_range = True
low_str = _str_val(l)
high_str = _str_val(h)
low = int(low_str, base) if \
_is_base_n(low_str, base) else 0
high = int(high_str, base) if \
_is_base_n(high_str, base) else 0
break
if vis != "n":
self.write_to_conf = True
if self.user_val is not None and \
_is_base_n(self.user_val, base) and \
(not has_active_range or
low <= int(self.user_val, base) <= high):
# If the user value is OK, it is stored in exactly the same
# form as specified in the assignment (with or without
# "0x", etc).
use_defaults = False
new_val = self.user_val
if use_defaults:
for val_expr, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
self.write_to_conf = True
# If the default value is OK, it is stored in exactly
# the same form as specified. Otherwise, it is clamped
# to the range, and the output has "0x" as appropriate
# for the type.
new_val = _str_val(val_expr)
if _is_base_n(new_val, base):
new_val_num = int(new_val, base)
if has_active_range:
clamped_val = None
if new_val_num < low:
clamped_val = low
elif new_val_num > high:
clamped_val = high
if clamped_val is not None:
new_val = (hex(clamped_val) if \
self.type == HEX else str(clamped_val))
break
else: # For the for loop
# If no user value or default kicks in but the hex/int has
# an active range, then the low end of the range is used,
# provided it's > 0, with "0x" prepended as appropriate.
if has_active_range and low > 0:
new_val = (hex(low) if self.type == HEX else str(low))
self.cached_val = new_val
return new_val
def set_user_value(self, v):
"""Sets the user value of the symbol.
Equal in effect to assigning the value to the symbol within a .config
file. Use get_lower/upper_bound() or get_assignable_values() to find
the range of currently assignable values for bool and tristate symbols;
setting values outside this range will cause the user value to differ
from the result of Symbol.get_value() (be truncated). Values that are
invalid for the type (such as a_bool.set_user_value("foo")) are
ignored, and a warning is emitted if an attempt is made to assign such
a value.
For any type of symbol, is_modifiable() can be used to check if a user
value will currently have any effect on the symbol, as determined by
its visibility and range of assignable values. Any value that is valid
for the type (bool, tristate, etc.) will end up being reflected in
get_user_value() though, and might have an effect later if conditions
change. To get rid of the user value, use unset_user_value().
Any symbols dependent on the symbol are (recursively) invalidated, so
things will just work with regards to dependencies.
v -- The user value to give to the symbol."""
self._set_user_value_no_invalidate(v, False)
# There might be something more efficient you could do here, but play
# it safe.
if self.name == "MODULES":
self.config._invalidate_all()
return
self._invalidate()
self._invalidate_dependent()
def unset_user_value(self):
"""Resets the user value of the symbol, as if the symbol had never
gotten a user value via Config.load_config() or
Symbol.set_user_value()."""
self._unset_user_value_no_recursive_invalidate()
self._invalidate_dependent()
def get_user_value(self):
"""Returns the value assigned to the symbol in a .config or via
Symbol.set_user_value() (provided the value was valid for the type of the
symbol). Returns None in case of no user value."""
return self.user_val
def get_name(self):
"""Returns the name of the symbol."""
return self.name
def get_prompts(self):
"""Returns a list of prompts defined for the symbol, in the order they
appear in the configuration files. Returns the empty list for symbols
with no prompt.
This list will have a single entry for the vast majority of symbols
having prompts, but having multiple prompts for a single symbol is
possible through having multiple 'config' entries for it."""
return [prompt for prompt, _ in self.orig_prompts]
def get_upper_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the highest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "m" or "y",
arranged from lowest to highest. This corresponds to the highest value
the symbol could be given in e.g. the 'make menuconfig' interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
vis = _get_visibility(self)
if (tri_to_int[vis] - tri_to_int[rev_dep]) > 0:
return vis
return None
def get_lower_bound(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns None.
Otherwise, returns the lowest value the symbol can be set to with
Symbol.set_user_value() (that will not be truncated): one of "n" or "m",
arranged from lowest to highest. This corresponds to the lowest value
the symbol could be given in e.g. the 'make menuconfig' interface.
See also the tri_less*() and tri_greater*() functions, which could come
in handy."""
if self.type != BOOL and self.type != TRISTATE:
return None
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
if (tri_to_int[_get_visibility(self)] - tri_to_int[rev_dep]) > 0:
return rev_dep
return None
def get_assignable_values(self):
"""For string/hex/int symbols and for bool and tristate symbols that
cannot be modified (see is_modifiable()), returns the empty list.
Otherwise, returns a list containing the user values that can be
assigned to the symbol (that won't be truncated). Usage example:
if "m" in sym.get_assignable_values():
sym.set_user_value("m")
This is basically a more convenient interface to
get_lower/upper_bound() when wanting to test if a particular tristate
value can be assigned."""
if self.type != BOOL and self.type != TRISTATE:
return []
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
res = ["n", "m", "y"][tri_to_int[rev_dep] :
tri_to_int[_get_visibility(self)] + 1]
return res if len(res) > 1 else []
def get_type(self):
"""Returns the type of the symbol: one of UNKNOWN, BOOL, TRISTATE,
STRING, HEX, or INT. These are defined at the top level of the module,
so you'd do something like
if sym.get_type() == kconfiglib.STRING:
..."""
return self.type
def get_visibility(self):
"""Returns the visibility of the symbol: one of "n", "m" or "y". For
bool and tristate symbols, this is an upper bound on the value users
can set for the symbol. For other types of symbols, a visibility of "n"
means the user value will be ignored. A visibility of "n" corresponds
to not being visible in the 'make *config' interfaces.
Example (assuming we're running with modules enabled -- i.e., MODULES
set to 'y'):
# Assume this has been assigned 'n'
config N_SYM
tristate "N_SYM"
# Assume this has been assigned 'm'
config M_SYM
tristate "M_SYM"
# Has visibility 'n'
config A
tristate "A"
depends on N_SYM
# Has visibility 'm'
config B
tristate "B"
depends on M_SYM
# Has visibility 'y'
config C
tristate "C"
# Has no prompt, and hence visibility 'n'
config D
tristate
Having visibility be tri-valued ensures that e.g. a symbol cannot be
set to "y" by the user if it depends on a symbol with value "m", which
wouldn't be safe.
You should probably look at get_lower/upper_bound(),
get_assignable_values() and is_modifiable() before using this."""
return _get_visibility(self)
def get_parent(self):
"""Returns the menu or choice statement that contains the symbol, or
None if the symbol is at the top level. Note that if statements are
treated as syntactic and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""Returns the set() of all symbols referenced by this symbol. For
example, the symbol defined by
config FOO
bool
prompt "foo" if A && B
default C if D
depends on E
select F if G
references the symbols A through G.
refs_from_enclosing (default: False) -- If True, the symbols
referenced by enclosing menus and if's will be
included in the result."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_selected_symbols(self):
"""Returns the set() of all symbols X for which this symbol has a
'select X' or 'select X if Y' (regardless of whether Y is satisfied or
not). This is a subset of the symbols returned by
get_referenced_symbols()."""
return self.selected_syms
def get_help(self):
"""Returns the help text of the symbol, or None if the symbol has no
help text."""
return self.help
def get_config(self):
"""Returns the Config instance this symbol is from."""
return self.config
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is defined. For
the vast majority of symbols this list will only contain one element.
For the following Kconfig, FOO would get two entries: the lines marked
with *.
config FOO *
bool "foo prompt 1"
config FOO *
bool "foo prompt 2"
"""
return self.def_locations
def get_ref_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the symbol is referenced in
the configuration. For example, the lines marked by * would be included
for FOO below:
config A
bool
default BAR || FOO *
config B
tristate
depends on FOO *
default m if FOO *
if FOO *
config A
bool "A"
endif
config FOO (definition not included)
bool
"""
return self.ref_locations
def is_modifiable(self):
"""Returns True if the value of the symbol could be modified by calling
Symbol.set_user_value().
For bools and tristates, this corresponds to the symbol being visible
in the 'make menuconfig' interface and not already being pinned to a
specific value (e.g. because it is selected by another symbol).
For strings and numbers, this corresponds to just being visible. (See
Symbol.get_visibility().)"""
if self.is_special_:
return False
if self.type == BOOL or self.type == TRISTATE:
rev_dep = self.config._eval_expr(self.rev_dep)
# A bool selected to "m" gets promoted to "y"
if self.type == BOOL and rev_dep == "m":
rev_dep = "y"
return (tri_to_int[_get_visibility(self)] -
tri_to_int[rev_dep]) > 0
return _get_visibility(self) != "n"
def is_defined(self):
"""Returns False if the symbol is referred to in the Kconfig but never
actually defined."""
return self.is_defined_
def is_special(self):
"""Returns True if the symbol is one of the special symbols n, m, y, or
UNAME_RELEASE, or gets its value from the environment."""
return self.is_special_
def is_from_environment(self):
"""Returns True if the symbol gets its value from the environment."""
return self.is_from_env
def has_ranges(self):
"""Returns True if the symbol is of type INT or HEX and has ranges that
limit what values it can take on."""
return self.ranges != []
def is_choice_symbol(self):
"""Returns True if the symbol is in a choice statement and is an actual
choice symbol (see Choice.get_symbols())."""
return self.is_choice_symbol_
def is_choice_selection(self):
"""Returns True if the symbol is contained in a choice statement and is
the selected item. Equivalent to
'sym.is_choice_symbol() and sym.get_parent().get_selection() is sym'."""
return self.is_choice_symbol_ and self.parent.get_selection() is self
def is_allnoconfig_y(self):
"""Returns True if the symbol has the 'allnoconfig_y' option set."""
return self.allnoconfig_y
def __str__(self):
"""Returns a string containing various information about the symbol."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Symbol constructor -- not intended to be called directly by
kconfiglib clients."""
self.prompts = []
self.cached_visibility = None
self.config = None
self.parent = None
self.name = None
self.type = UNKNOWN
self.def_exprs = []
self.ranges = []
self.rev_dep = "n"
# The prompt, default value and select conditions without any
# dependencies from menus or if's propagated to them
self.orig_prompts = []
self.orig_def_exprs = []
self.orig_selects = []
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
self.help = None
# The set of symbols referenced by this symbol (see
# get_referenced_symbols())
self.referenced_syms = set()
# The set of symbols selected by this symbol (see
# get_selected_symbols())
self.selected_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = set()
# This is set to True for "actual" choice symbols. See
# Choice._determine_actual_symbols(). The trailing underscore avoids a
# collision with is_choice_symbol().
self.is_choice_symbol_ = False
# This records only dependencies specified with 'depends on'. Needed
# when determining actual choice items (hrrrr...). See also
# Choice._determine_actual_symbols().
self.menu_dep = None
# See Symbol.get_ref/def_locations().
self.def_locations = []
self.ref_locations = []
self.user_val = None
# Flags
# Should the symbol get an entry in .config?
self.write_to_conf = False
# Caches the calculated value
self.cached_val = None
# Populated in Config._build_dep() after parsing. Links the symbol to
# the symbols that immediately depend on it (in a caching/invalidation
# sense). The total set of dependent symbols for the symbol (the
# transitive closure) is calculated on an as-needed basis in
# _get_dependent().
self.dep = set()
# Caches the total list of dependent symbols. Calculated in
# _get_dependent().
self.cached_deps = None
# Does the symbol have an entry in the Kconfig file? The trailing
# underscore avoids a collision with is_defined().
self.is_defined_ = False
# Does the symbol get its value in some special way, e.g. from the
# environment or by being one of the special symbols n, m, and y? If
# so, the value is stored in self.cached_val, which is never
# invalidated. The trailing underscore avoids a collision with
# is_special().
self.is_special_ = False
# Does the symbol get its value from the environment?
self.is_from_env = False
# Does the symbol have the 'allnoconfig_y' option set?
self.allnoconfig_y = False
def _invalidate(self):
if self.is_special_:
return
if self.is_choice_symbol_:
self.parent._invalidate()
self.cached_val = None
self.cached_visibility = None
self.write_to_conf = False
def _invalidate_dependent(self):
for sym in self._get_dependent():
sym._invalidate()
def _set_user_value_no_invalidate(self, v, suppress_load_warnings):
"""Like set_user_value(), but does not invalidate any symbols.
suppress_load_warnings --
some warnings are annoying when loading a .config that can be helpful
when manually invoking set_user_value(). This flag is set to True to
suppress such warnings.
Perhaps this could be made optional for load_config() instead."""
if self.is_special_:
if self.is_from_env:
self.config._warn('attempt to assign the value "{0}" to the '
'symbol {1}, which gets its value from the '
'environment. Assignment ignored.'
.format(v, self.name))
else:
self.config._warn('attempt to assign the value "{0}" to the '
'special symbol {1}. Assignment ignored.'
.format(v, self.name))
return
if not self.is_defined_:
filename, linenr = self.ref_locations[0]
if self.config.print_undef_assign:
_stderr_msg('note: attempt to assign the value "{0}" to {1}, '
"which is referenced at {2}:{3} but never "
"defined. Assignment ignored."
.format(v, self.name, filename, linenr))
return
# Check if the value is valid for our type
if not ((self.type == BOOL and (v == "y" or v == "n") ) or
(self.type == TRISTATE and (v == "y" or v == "m" or
v == "n") ) or
(self.type == STRING ) or
(self.type == INT and _is_base_n(v, 10) ) or
(self.type == HEX and _is_base_n(v, 16) )):
self.config._warn('the value "{0}" is invalid for {1}, which has type {2}. '
"Assignment ignored."
.format(v, self.name, typename[self.type]))
return
if self.prompts == [] and not suppress_load_warnings:
self.config._warn('assigning "{0}" to the symbol {1} which '
'lacks prompts and thus has visibility "n". '
'The assignment will have no effect.'
.format(v, self.name))
self.user_val = v
if self.is_choice_symbol_ and (self.type == BOOL or
self.type == TRISTATE):
choice = self.parent
if v == "y":
choice.user_val = self
choice.user_mode = "y"
elif v == "m":
choice.user_val = None
choice.user_mode = "m"
def _unset_user_value_no_recursive_invalidate(self):
self._invalidate()
self.user_val = None
if self.is_choice_symbol_:
self.parent._unset_user_value()
def _make_conf(self):
if self.already_written:
return []
self.already_written = True
# Note: write_to_conf is determined in get_value()
val = self.get_value()
if not self.write_to_conf:
return []
if self.type == BOOL or self.type == TRISTATE:
if val == "m" or val == "y":
return ["CONFIG_{0}={1}".format(self.name, val)]
return ["# CONFIG_{0} is not set".format(self.name)]
elif self.type == STRING:
# Escape \ and "
return ['CONFIG_{0}="{1}"'
.format(self.name,
val.replace("\\", "\\\\").replace('"', '\\"'))]
elif self.type == INT or self.type == HEX:
return ["CONFIG_{0}={1}".format(self.name, val)]
else:
_internal_error('Internal error while creating .config: unknown type "{0}".'
.format(self.type))
def _get_dependent(self):
"""Returns the set of symbols that should be invalidated if the value
of the symbol changes, because they might be affected by the change.
Note that this is an internal API -- it's probably of limited
usefulness to clients."""
if self.cached_deps is not None:
return self.cached_deps
res = set()
self._add_dependent_ignore_siblings(res)
if self.is_choice_symbol_:
for s in self.parent.get_symbols():
if s is not self:
res.add(s)
s._add_dependent_ignore_siblings(res)
self.cached_deps = res
return res
def _add_dependent_ignore_siblings(self, to):
"""Calculating dependencies gets a bit tricky for choice items as they
all depend on each other, potentially leading to infinite recursion.
This helper function calculates dependencies ignoring the other symbols
in the choice. It also works fine for symbols that are not choice
items."""
for s in self.dep:
to.add(s)
to |= s._get_dependent()
def _has_auto_menu_dep_on(self, on):
"""See Choice._determine_actual_symbols()."""
if not isinstance(self.parent, Choice):
_internal_error("Attempt to determine auto menu dependency for symbol ouside of choice.")
if self.prompts == []:
# If we have no prompt, use the menu dependencies instead (what was
# specified with 'depends on')
return self.menu_dep is not None and \
self.config._expr_depends_on(self.menu_dep, on)
for _, cond_expr in self.prompts:
if self.config._expr_depends_on(cond_expr, on):
return True
return False
class Menu(Item):
"""Represents a menu statement."""
#
# Public interface
#
def get_config(self):
"""Return the Config instance this menu is from."""
return self.config
def get_visibility(self):
"""Returns the visibility of the menu. This also affects the visibility
of subitems. See also Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_visible_if_visibility(self):
"""Returns the visibility the menu gets from its 'visible if'
condition. "y" if the menu has no 'visible if' condition."""
return self.config._eval_expr(self.visible_if_expr)
def get_items(self, recursive = False):
"""Returns a list containing the items (symbols, menus, choice
statements and comments) in in the menu, in the same order that the
items appear within the menu.
recursive (default: False) -- True if items contained in items within
the menu should be included
recursively (preorder)."""
if not recursive:
return self.block
res = []
for item in self.block:
res.append(item)
if isinstance(item, Menu):
res.extend(item.get_items(True))
elif isinstance(item, Choice):
res.extend(item.get_items())
return res
def get_symbols(self, recursive = False):
"""Returns a list containing the symbols in the menu, in the same order
that they appear within the menu.
recursive (default: False) -- True if symbols contained in items within
the menu should be included
recursively."""
return [item for item in self.get_items(recursive) if isinstance(item, Symbol)]
def get_title(self):
"""Returns the title text of the menu."""
return self.title
def get_parent(self):
"""Returns the menu or choice statement that contains the menu, or
None if the menu is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_location(self):
"""Returns the location of the menu as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def __str__(self):
"""Returns a string containing various information about the menu."""
depends_on_str = self.config._expr_val_str(self.orig_deps,
"(no dependencies)")
visible_if_str = self.config._expr_val_str(self.visible_if_expr,
"(no dependencies)")
additional_deps_str = " " + self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _sep_lines("Menu",
"Title : " + self.title,
"'depends on' dependencies : " + depends_on_str,
"'visible if' dependencies : " + visible_if_str,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Menu constructor -- not intended to be called directly by
kconfiglib clients."""
self.config = None
self.parent = None
self.title = None
self.block = None
self.dep_expr = None
# Dependency expression without dependencies from enclosing menus and
# if's propagated
self.orig_deps = None
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
# The 'visible if' expression
self.visible_if_expr = None
# The set of symbols referenced by this menu (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self):
item_conf = _make_block_conf(self.block)
if self.config._eval_expr(self.dep_expr) != "n" and \
self.config._eval_expr(self.visible_if_expr) != "n":
return ["\n#\n# {0}\n#".format(self.title)] + item_conf
return item_conf
class Choice(Item):
"""Represents a choice statement. A choice can be in one of three modes:
"n" - The choice is not visible and no symbols can be selected.
"m" - Any number of symbols can be set to "m". The rest will be "n". This
is safe since potentially conflicting options don't actually get
compiled into the kernel simultaneously with "m".
"y" - One symbol will be "y" while the rest are "n".
Only tristate choices can be in "m" mode, and the visibility of the choice
is an upper bound on the mode, so that e.g. a choice that depends on a
symbol with value "m" will be in "m" mode.
The mode changes automatically when a value is assigned to a symbol within
the choice.
See Symbol.get_visibility() too."""
#
# Public interface
#
def get_selection(self):
"""Returns the symbol selected (either by the user or through
defaults), or None if either no symbol is selected or the mode is not
"y"."""
if self.cached_selection is not None:
if self.cached_selection == NO_SELECTION:
return None
return self.cached_selection
if self.get_mode() != "y":
return self._cache_ret(None)
# User choice available?
if self.user_val is not None and _get_visibility(self.user_val) == "y":
return self._cache_ret(self.user_val)
if self.optional:
return self._cache_ret(None)
return self._cache_ret(self.get_selection_from_defaults())
def get_selection_from_defaults(self):
"""Like Choice.get_selection(), but acts as if no symbol has been
selected by the user and no 'optional' flag is in effect."""
if self.actual_symbols == []:
return None
for symbol, cond_expr in self.def_exprs:
if self.config._eval_expr(cond_expr) != "n":
chosen_symbol = symbol
break
else:
chosen_symbol = self.actual_symbols[0]
# Is the chosen symbol visible?
if _get_visibility(chosen_symbol) != "n":
return chosen_symbol
# Otherwise, pick the first visible symbol
for sym in self.actual_symbols:
if _get_visibility(sym) != "n":
return sym
return None
def get_user_selection(self):
"""If the choice is in "y" mode and has a user-selected symbol, returns
that symbol. Otherwise, returns None."""
return self.user_val
def get_config(self):
"""Returns the Config instance this choice is from."""
return self.config
def get_name(self):
"""For named choices, returns the name. Returns None for unnamed
choices. No named choices appear anywhere in the kernel Kconfig files
as of Linux 3.7.0-rc8."""
return self.name
def get_prompts(self):
"""Returns a list of prompts defined for the choice, in the order they
appear in the configuration files. Returns the empty list for choices
with no prompt.
This list will have a single entry for the vast majority of choices
having prompts, but having multiple prompts for a single choice is
possible through having multiple 'choice' entries for it (though I'm
not sure if that ever happens in practice)."""
return [prompt for prompt, _ in self.orig_prompts]
def get_help(self):
"""Returns the help text of the choice, or None if the choice has no
help text."""
return self.help
def get_type(self):
"""Returns the type of the choice. See Symbol.get_type()."""
return self.type
def get_items(self):
"""Gets all items contained in the choice in the same order as within
the configuration ("items" instead of "symbols" since choices and
comments might appear within choices. This only happens in one place as
of Linux 3.7.0-rc8, in drivers/usb/gadget/Kconfig)."""
return self.block
def get_symbols(self):
"""Returns a list containing the choice's symbols.
A quirk (perhaps a bug) of Kconfig is that you can put items within a
choice that will not be considered members of the choice insofar as
selection is concerned. This happens for example if one symbol within a
choice 'depends on' the symbol preceding it, or if you put non-symbol
items within choices.
As of Linux 3.7.0-rc8, this seems to be used intentionally in one
place: drivers/usb/gadget/Kconfig.
This function returns the "proper" symbols of the choice in the order
they appear in the choice, excluding such items. If you want all items
in the choice, use get_items()."""
return self.actual_symbols
def get_parent(self):
"""Returns the menu or choice statement that contains the choice, or
None if the choice is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_def_locations(self):
"""Returns a list of (filename, linenr) tuples, where filename (string)
and linenr (int) represent a location where the choice is defined. For
the vast majority of choices (all of them as of Linux 3.7.0-rc8) this
list will only contain one element, but its possible for named choices
to be defined in multiple locations."""
return self.def_locations
def get_visibility(self):
"""Returns the visibility of the choice statement: one of "n", "m" or
"y". This acts as an upper limit on the mode of the choice (though bool
choices can only have the mode "y"). See the class documentation for an
explanation of modes."""
return _get_visibility(self)
def get_mode(self):
"""Returns the mode of the choice. See the class documentation for
an explanation of modes."""
minimum_mode = "n" if self.optional else "m"
mode = self.user_mode if self.user_mode is not None else minimum_mode
mode = self.config._eval_min(mode, _get_visibility(self))
# Promote "m" to "y" for boolean choices
if mode == "m" and self.type == BOOL:
return "y"
return mode
def is_optional(self):
"""Returns True if the choice has the 'optional' flag set (and so will
default to "n" mode)."""
return self.optional
def __str__(self):
"""Returns a string containing various information about the choice
statement."""
return self.config._get_sym_or_choice_str(self)
#
# Private methods
#
def __init__(self):
"""Choice constructor -- not intended to be called directly by
kconfiglib clients."""
self.prompts = []
self.cached_visibility = None
self.config = None
self.parent = None
self.name = None # Yes, choices can be named
self.type = UNKNOWN
self.def_exprs = []
self.help = None
self.optional = False
self.block = None
# The prompts and default values without any dependencies from
# enclosing menus or if's propagated
self.orig_prompts = []
self.orig_def_exprs = []
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
# We need to filter out symbols that appear within the choice block but
# are not considered choice items (see
# Choice._determine_actual_symbols()) This list holds the "actual" choice
# items.
self.actual_symbols = []
# The set of symbols referenced by this choice (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = set()
# See Choice.get_def_locations()
self.def_locations = []
self.user_val = None
self.user_mode = None
self.cached_selection = None
def _determine_actual_symbols(self):
"""If a symbol's visibility depends on the preceding symbol within a
choice, it is no longer viewed as a choice item. (This is quite
possibly a bug, but some things consciously use it... ugh. It stems
from automatic submenu creation.) In addition, it's possible to have
choices and comments within choices, and those shouldn't be considered
choice items either. Only drivers/usb/gadget/Kconfig seems to depend on
any of this. This method computes the "actual" items in the choice and
sets the is_choice_symbol_ flag on them (retrieved via
is_choice_symbol()).
Don't let this scare you: an earlier version simply checked for a
sequence of symbols where all symbols after the first appeared in the
'depends on' expression of the first, and that worked fine. The added
complexity is to be future-proof in the event that
drivers/usb/gadget/Kconfig turns even more sinister. It might very well
be overkilling things (especially if that file is refactored ;)."""
# Items might depend on each other in a tree structure, so we need a
# stack to keep track of the current tentative parent
stack = []
for item in self.block:
if not isinstance(item, Symbol):
stack = []
continue
while stack != []:
if item._has_auto_menu_dep_on(stack[-1]):
# The item should not be viewed as a choice item, so don't
# set item.is_choice_symbol_.
stack.append(item)
break
else:
stack.pop()
else:
item.is_choice_symbol_ = True
self.actual_symbols.append(item)
stack.append(item)
def _cache_ret(self, selection):
# As None is used to indicate the lack of a cached value we can't use
# that to cache the fact that the choice has no selection. Instead, we
# use the symbolic constant NO_SELECTION.
if selection is None:
self.cached_selection = NO_SELECTION
else:
self.cached_selection = selection
return selection
def _invalidate(self):
self.cached_selection = None
self.cached_visibility = None
def _unset_user_value(self):
self._invalidate()
self.user_val = None
self.user_mode = None
def _make_conf(self):
return _make_block_conf(self.block)
class Comment(Item):
"""Represents a comment statement."""
#
# Public interface
#
def get_config(self):
"""Returns the Config instance this comment is from."""
return self.config
def get_visibility(self):
"""Returns the visibility of the comment. See also
Symbol.get_visibility()."""
return self.config._eval_expr(self.dep_expr)
def get_text(self):
"""Returns the text of the comment."""
return self.text
def get_parent(self):
"""Returns the menu or choice statement that contains the comment, or
None if the comment is at the top level. Note that if statements are
treated as syntactic sugar and do not have an explicit class
representation."""
return self.parent
def get_referenced_symbols(self, refs_from_enclosing = False):
"""See Symbol.get_referenced_symbols()."""
return self.all_referenced_syms if refs_from_enclosing else self.referenced_syms
def get_location(self):
"""Returns the location of the comment as a (filename, linenr) tuple,
where filename is a string and linenr an int."""
return (self.filename, self.linenr)
def __str__(self):
"""Returns a string containing various information about the comment."""
dep_str = self.config._expr_val_str(self.orig_deps, "(no dependencies)")
additional_deps_str = " " + self.config._expr_val_str(self.deps_from_containing,
"(no additional dependencies)")
return _sep_lines("Comment",
"Text: " + str(self.text),
"Dependencies: " + dep_str,
"Additional dependencies from enclosing menus and if's:",
additional_deps_str,
"Location: {0}:{1}".format(self.filename, self.linenr))
#
# Private methods
#
def __init__(self):
"""Comment constructor -- not intended to be called directly by
kconfiglib clients."""
self.config = None
self.parent = None
self.text = None
self.dep_expr = None
# Dependency expression without dependencies from enclosing menus and
# if's propagated
self.orig_deps = None
# Dependencies inherited from containing menus and if's
self.deps_from_containing = None
# The set of symbols referenced by this comment (see
# get_referenced_symbols())
self.referenced_syms = set()
# Like 'referenced_syms', but includes symbols from
# dependencies inherited from enclosing menus and if's
self.all_referenced_syms = None
self.filename = None
self.linenr = None
def _make_conf(self):
if self.config._eval_expr(self.dep_expr) != "n":
return ["\n#\n# {0}\n#".format(self.text)]
return []
class Kconfig_Syntax_Error(Exception):
"""Exception raised for syntax errors."""
pass
class Internal_Error(Exception):
"""Exception raised for internal errors."""
pass
#
# Public functions
#
def tri_less(v1, v2):
"""Returns True if the tristate v1 is less than the tristate v2, where "n",
"m" and "y" are ordered from lowest to highest."""
return tri_to_int[v1] < tri_to_int[v2]
def tri_less_eq(v1, v2):
"""Returns True if the tristate v1 is less than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest."""
return tri_to_int[v1] <= tri_to_int[v2]
def tri_greater(v1, v2):
"""Returns True if the tristate v1 is greater than the tristate v2, where
"n", "m" and "y" are ordered from lowest to highest."""
return tri_to_int[v1] > tri_to_int[v2]
def tri_greater_eq(v1, v2):
"""Returns True if the tristate v1 is greater than or equal to the tristate
v2, where "n", "m" and "y" are ordered from lowest to highest."""
return tri_to_int[v1] >= tri_to_int[v2]
#
# Internal classes
#
class _Feed(object):
"""Class for working with sequences in a stream-like fashion; handy for tokens."""
def __init__(self, items):
self.items = items
self.length = len(self.items)
self.i = 0
def get_next(self):
if self.i >= self.length:
return None
item = self.items[self.i]
self.i += 1
return item
def peek_next(self):
return None if self.i >= self.length else self.items[self.i]
def check(self, token):
"""Check if the next token is 'token'. If so, remove it from the token
feed and return True. Otherwise, leave it in and return False."""
if self.i < self.length and self.items[self.i] == token:
self.i += 1
return True
return False
def go_back(self):
if self.i <= 0:
_internal_error("Attempt to move back in Feed while already at the beginning.")
self.i -= 1
def go_to_start(self):
self.i = 0
def __len__(self):
return self.length
class _FileFeed(_Feed):
"""_Feed subclass that feeds lines from a file. Joins any line ending in
\\ with the following line. Keeps track of the filename and current line
number."""
def __init__(self, filename):
self.filename = _clean_up_path(filename)
_Feed.__init__(self, _get_lines(filename))
def remove_blank(self):
"""Removes lines until the first non-blank (not all-space) line."""
while self.i < self.length and self.items[self.i].isspace():
self.i += 1
def get_filename(self):
return self.filename
def get_linenr(self):
return self.i
#
# Internal functions
#
def _get_visibility(sc):
"""Symbols and Choices have a "visibility" that acts as an upper bound on
the values a user can set for them, corresponding to the visibility in e.g.
'make menuconfig'. This function calculates the visibility for the Symbol
or Choice 'sc' -- the logic is nearly identical."""
if sc.cached_visibility is None:
vis = "n"
for _, cond_expr in sc.prompts:
vis = sc.config._eval_max(vis, cond_expr)
if isinstance(sc, Symbol) and sc.is_choice_symbol_:
vis = sc.config._eval_min(vis, _get_visibility(sc.parent))
# Promote "m" to "y" if we're dealing with a non-tristate
if vis == "m" and sc.type != TRISTATE:
vis = "y"
sc.cached_visibility = vis
return sc.cached_visibility
def _make_and(e1, e2):
"""Constructs an AND (&&) expression. Performs trivial simplification.
Nones equate to 'y'.
Note: returns None if e1 == e2 == None."""
if e1 == "n" or e2 == "n":
return "n"
if e1 is None or e1 == "y":
return e2
if e2 is None or e2 == "y":
return e1
# Prefer to merge/update argument list if possible instead of creating
# a new AND node
if isinstance(e1, tuple) and e1[0] == AND:
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e1[1] + e2[1])
return (AND, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == AND:
return (AND, e2[1] + [e1])
return (AND, [e1, e2])
def _make_or(e1, e2):
"""Constructs an OR (||) expression. Performs trivial simplification and
avoids Nones. Nones equate to 'y', which is usually what we want, but needs
to be kept in mind."""
# Perform trivial simplification and avoid None's (which
# correspond to y's)
if e1 is None or e2 is None or e1 == "y" or e2 == "y":
return "y"
if e1 == "n":
return e2
if e2 == "n":
return e1
# Prefer to merge/update argument list if possible instead of creating
# a new OR node
if isinstance(e1, tuple) and e1[0] == OR:
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e1[1] + e2[1])
return (OR, e1[1] + [e2])
if isinstance(e2, tuple) and e2[0] == OR:
return (OR, e2[1] + [e1])
return (OR, [e1, e2])
def _get_expr_syms(expr):
"""Returns the set() of symbols appearing in expr."""
res = set()
if expr is None:
return res
def rec(expr):
if isinstance(expr, Symbol):
res.add(expr)
return
if isinstance(expr, str):
return
e0 = expr[0]
if e0 == AND or e0 == OR:
for term in expr[1]:
rec(term)
elif e0 == NOT:
rec(expr[1])
elif e0 == EQUAL or e0 == UNEQUAL:
_, v1, v2 = expr
if isinstance(v1, Symbol):
res.add(v1)
if isinstance(v2, Symbol):
res.add(v2)
else:
_internal_error("Internal error while fetching symbols from an "
"expression with token stream {0}.".format(expr))
rec(expr)
return res
def _str_val(obj):
"""Returns the value of obj as a string. If obj is not a string (constant
symbol), it must be a Symbol."""
return obj if isinstance(obj, str) else obj.get_value()
def _make_block_conf(block):
"""Returns a list of .config strings for a block (list) of items."""
# Collect the substrings in a list and later use join() instead of += to
# build the final .config contents. With older Python versions, this yields
# linear instead of quadratic complexity.
strings = []
for item in block:
strings.extend(item._make_conf())
return strings
def _sym_str_string(sym_or_str):
if isinstance(sym_or_str, str):
return '"' + sym_or_str + '"'
return sym_or_str.name
def _intersperse(lst, op):
"""_expr_to_str() helper. Gets the string representation of each expression in lst
and produces a list where op has been inserted between the elements."""
if lst == []:
return ""
res = []
def handle_sub_expr(expr):
no_parens = isinstance(expr, (str, Symbol)) or \
expr[0] in (EQUAL, UNEQUAL) or \
precedence[op] <= precedence[expr[0]]
if not no_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr))
if not no_parens:
res.append(")")
op_str = op_to_str[op]
handle_sub_expr(lst[0])
for expr in lst[1:]:
res.append(op_str)
handle_sub_expr(expr)
return res
def _expr_to_str_rec(expr):
if expr is None:
return [""]
if isinstance(expr, (Symbol, str)):
return [_sym_str_string(expr)]
e0 = expr[0]
if e0 == AND or e0 == OR:
return _intersperse(expr[1], expr[0])
if e0 == NOT:
need_parens = not isinstance(expr[1], (str, Symbol))
res = ["!"]
if need_parens:
res.append("(")
res.extend(_expr_to_str_rec(expr[1]))
if need_parens:
res.append(")")
return res
if e0 == EQUAL or e0 == UNEQUAL:
return [_sym_str_string(expr[1]),
op_to_str[expr[0]],
_sym_str_string(expr[2])]
def _expr_to_str(expr):
return "".join(_expr_to_str_rec(expr))
def _indentation(line):
"""Returns the length of the line's leading whitespace, treating tab stops
as being spaced 8 characters apart."""
line = line.expandtabs()
return len(line) - len(line.lstrip())
def _deindent(line, indent):
"""Deindent 'line' by 'indent' spaces."""
line = line.expandtabs()
if len(line) <= indent:
return line
return line[indent:]
def _is_base_n(s, n):
try:
int(s, n)
return True
except ValueError:
return False
def _sep_lines(*args):
"""Returns a string comprised of all arguments, with newlines inserted
between them."""
return "\n".join(args)
def _comment(s):
"""Returns a new string with "#" inserted before each line in 's'."""
if not s:
return "#"
res = "".join(["#" + line for line in s.splitlines(True)])
if s.endswith("\n"):
return res + "#"
return res
def _get_lines(filename):
"""Returns a list of lines from 'filename', joining any line ending in \\
with the following line."""
with open(filename, "r") as f:
lines = []
accum = ""
for line in f:
if line.endswith("\\\n"):
accum += line[:-2]
else:
lines.append(accum + line)
accum = ""
return lines
def _clean_up_path(path):
"""Strips an initial "./" and any trailing slashes from 'path'."""
if path.startswith("./"):
path = path[2:]
return path.rstrip("/")
def _stderr_msg(msg, filename, linenr):
if filename is not None:
sys.stderr.write("{0}:{1}: ".format(_clean_up_path(filename), linenr))
sys.stderr.write(msg + "\n")
def _tokenization_error(s, filename, linenr):
loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
raise Kconfig_Syntax_Error("{0}Couldn't tokenize '{1}'"
.format(loc, s.strip()))
def _parse_error(s, msg, filename, linenr):
loc = "" if filename is None else "{0}:{1}: ".format(filename, linenr)
raise Kconfig_Syntax_Error("{0}Couldn't parse '{1}'{2}"
.format(loc, s.strip(),
"." if msg is None else ": " + msg))
def _internal_error(msg):
raise Internal_Error(msg +
"\nSorry! You may want to send an email to ulfalizer a.t Google's " \
"email service to tell me about this. Include the message above " \
"and the stack trace and describe what you were doing.")
#
# Internal global constants
#
# Tokens
(T_AND, T_OR, T_NOT,
T_OPEN_PAREN, T_CLOSE_PAREN,
T_EQUAL, T_UNEQUAL,
T_MAINMENU, T_MENU, T_ENDMENU,
T_SOURCE, T_CHOICE, T_ENDCHOICE,
T_COMMENT, T_CONFIG, T_MENUCONFIG,
T_HELP, T_IF, T_ENDIF, T_DEPENDS, T_ON,
T_OPTIONAL, T_PROMPT, T_DEFAULT,
T_BOOL, T_TRISTATE, T_HEX, T_INT, T_STRING,
T_DEF_BOOL, T_DEF_TRISTATE,
T_SELECT, T_RANGE, T_OPTION, T_ALLNOCONFIG_Y, T_ENV,
T_DEFCONFIG_LIST, T_MODULES, T_VISIBLE) = range(39)
# The leading underscore before the function assignments below prevent pydoc
# from listing them. The constants could be hidden too, but they're fairly
# obviously internal anyway, so don't bother spamming the code.
# Keyword to token map. Note that the get() method is assigned directly as a
# small optimization.
_get_keyword = { "mainmenu" : T_MAINMENU,
"menu" : T_MENU,
"endmenu" : T_ENDMENU,
"endif" : T_ENDIF,
"endchoice" : T_ENDCHOICE,
"source" : T_SOURCE,
"choice" : T_CHOICE,
"config" : T_CONFIG,
"comment" : T_COMMENT,
"menuconfig" : T_MENUCONFIG,
"help" : T_HELP,
"if" : T_IF,
"depends" : T_DEPENDS,
"on" : T_ON,
"optional" : T_OPTIONAL,
"prompt" : T_PROMPT,
"default" : T_DEFAULT,
"bool" : T_BOOL,
"boolean" : T_BOOL,
"tristate" : T_TRISTATE,
"int" : T_INT,
"hex" : T_HEX,
"def_bool" : T_DEF_BOOL,
"def_tristate" : T_DEF_TRISTATE,
"string" : T_STRING,
"select" : T_SELECT,
"range" : T_RANGE,
"option" : T_OPTION,
"allnoconfig_y" : T_ALLNOCONFIG_Y,
"env" : T_ENV,
"defconfig_list" : T_DEFCONFIG_LIST,
"modules" : T_MODULES,
"visible" : T_VISIBLE }.get
# Strings to use for True and False
bool_str = { False : "false", True : "true" }
# Tokens after which identifier-like lexemes are treated as strings. T_CHOICE
# is included to avoid symbols being registered for named choices.
string_lex = frozenset((T_BOOL, T_TRISTATE, T_INT, T_HEX, T_STRING, T_CHOICE,
T_PROMPT, T_MENU, T_COMMENT, T_SOURCE, T_MAINMENU))
# Matches the initial token on a line; see _tokenize().
_initial_token_re_match = re.compile(r"[^\w]*(\w+)").match
# Matches an identifier/keyword optionally preceded by whitespace
_id_keyword_re_match = re.compile(r"\s*([\w./-]+)").match
# Regular expressions for parsing .config files
_set_re_match = re.compile(r"CONFIG_(\w+)=(.*)").match
_unset_re_match = re.compile(r"# CONFIG_(\w+) is not set").match
# Regular expression for finding $-references to symbols in strings
_sym_ref_re_search = re.compile(r"\$[A-Za-z0-9_]+").search
# Integers representing symbol types
UNKNOWN, BOOL, TRISTATE, STRING, HEX, INT = range(6)
# Strings to use for types
typename = { UNKNOWN : "unknown", BOOL : "bool", TRISTATE : "tristate",
STRING : "string", HEX : "hex", INT : "int" }
# Token to type mapping
token_to_type = { T_BOOL : BOOL, T_TRISTATE : TRISTATE, T_STRING : STRING,
T_INT : INT, T_HEX : HEX }
# Default values for symbols of different types (the value the symbol gets if
# it is not assigned a user value and none of its 'default' clauses kick in)
default_value = { BOOL : "n", TRISTATE : "n", STRING : "", INT : "", HEX : "" }
# Indicates that no item is selected in a choice statement
NO_SELECTION = 0
# Integers representing expression types
AND, OR, NOT, EQUAL, UNEQUAL = range(5)
# Map from tristate values to integers
tri_to_int = { "n" : 0, "m" : 1, "y" : 2 }
# Printing-related stuff
op_to_str = { AND : " && ", OR : " || ", EQUAL : " = ", UNEQUAL : " != " }
precedence = { OR : 0, AND : 1, NOT : 2 }
| gpl-2.0 |
jeffwen/self_driving_car_nd | term1/finding_lane_lines/color_region.py | 1 | 2321 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# Read in the image and print out some stats
# Note: in the previous example we were reading a .jpg
# Here we read a .png and convert to 0,255 bytescale
image = mpimg.imread('test.jpg')
# Grab the x and y size and make a copy of the image
ysize = image.shape[0]
xsize = image.shape[1]
color_select = np.copy(image)
line_image = np.copy(image)
# Define color selection criteria
# MODIFY THESE VARIABLES TO MAKE YOUR COLOR SELECTION
red_threshold = 200
green_threshold = 200
blue_threshold = 200
rgb_threshold = [red_threshold, green_threshold, blue_threshold]
# Define the vertices of a triangular mask.
# Keep in mind the origin (x=0, y=0) is in the upper left
# MODIFY THESE VALUES TO ISOLATE THE REGION
# WHERE THE LANE LINES ARE IN THE IMAGE
left_bottom = [0, ysize]
right_bottom = [xsize, ysize]
apex = [xsize/2, ysize/2]
# Perform a linear fit (y=Ax+B) to each of the three sides of the triangle
# np.polyfit returns the coefficients [A, B] of the fit
fit_left = np.polyfit((left_bottom[0], apex[0]), (left_bottom[1], apex[1]), 1)
fit_right = np.polyfit((right_bottom[0], apex[0]), (right_bottom[1], apex[1]), 1)
fit_bottom = np.polyfit((left_bottom[0], right_bottom[0]), (left_bottom[1], right_bottom[1]), 1)
# Mask pixels below the threshold
color_thresholds = (image[:,:,0] < rgb_threshold[0]) | \
(image[:,:,1] < rgb_threshold[1]) | \
(image[:,:,2] < rgb_threshold[2])
# Find the region inside the lines
XX, YY = np.meshgrid(np.arange(0, xsize), np.arange(0, ysize))
region_thresholds = (YY > (XX*fit_left[0] + fit_left[1])) & \
(YY > (XX*fit_right[0] + fit_right[1])) & \
(YY < (XX*fit_bottom[0] + fit_bottom[1]))
# Mask color and region selection
color_select[color_thresholds | ~region_thresholds] = [0, 0, 0]
# Color pixels red where both color and region selections met
line_image[~color_thresholds & region_thresholds] = [255, 0, 0]
# Display the image and show region and color selections
plt.imshow(image)
x = [left_bottom[0], right_bottom[0], apex[0], left_bottom[0]]
y = [left_bottom[1], right_bottom[1], apex[1], left_bottom[1]]
plt.plot(x, y, 'b--', lw=4)
plt.imshow(color_select)
plt.imshow(line_image)
| mit |
hainm/mdtraj | mdtraj/geometry/tests/test_drid.py | 6 | 2295 | from __future__ import print_function
import numpy as np
import mdtraj as md
from mdtraj.testing import get_fn, eq
from mdtraj.geometry import compute_drid
import scipy.special
from scipy.stats import nanmean
from scipy.spatial.distance import euclidean, pdist, squareform
def test_drid_1():
n_frames = 1
n_atoms = 20
top = md.Topology()
chain = top.add_chain()
residue = top.add_residue('X', chain)
for i in range(n_atoms):
top.add_atom('X', None, residue)
t = md.Trajectory(xyz=np.random.RandomState(0).randn(n_frames, n_atoms, 3),
topology=top)
# t contains no bonds
got = compute_drid(t).reshape(n_frames, n_atoms, 3)
for i in range(n_atoms):
others = set(range(n_atoms)) - set([i])
rd = 1 / np.array([euclidean(t.xyz[0, i], t.xyz[0, e]) for e in others])
mean = np.mean(rd)
second = np.mean((rd - mean)**2)**(0.5)
third = scipy.special.cbrt(np.mean((rd - mean)**3))
ref = np.array([mean, second, third])
np.testing.assert_array_almost_equal(got[0, i], ref, decimal=5)
def test_drid_2():
n_frames = 3
n_atoms = 11
n_bonds = 5
top = md.Topology()
chain = top.add_chain()
residue = top.add_residue('X', chain)
for i in range(n_atoms):
top.add_atom('X', None, residue)
random = np.random.RandomState(0)
bonds = random.randint(n_atoms, size=(n_bonds, 2))
for a, b in bonds:
top.add_bond(top.atom(a), top.atom(b))
t = md.Trajectory(xyz=random.randn(n_frames, n_atoms, 3), topology=top)
got = compute_drid(t).reshape(n_frames, n_atoms, 3)
for i in range(n_frames):
recip = 1 / squareform(pdist(t.xyz[i]))
recip[np.diag_indices(n=recip.shape[0])] = np.nan
recip[bonds[:, 0], bonds[:, 1]] = np.nan
recip[bonds[:, 1], bonds[:, 0]] = np.nan
mean = nanmean(recip, axis=0)
second = nanmean((recip - mean)**2, axis=0)**(0.5)
third = scipy.special.cbrt(nanmean((recip - mean)**3, axis=0))
np.testing.assert_array_almost_equal(got[i, :, 0], mean, decimal=5)
np.testing.assert_array_almost_equal(got[i, :, 1], second, decimal=5)
np.testing.assert_array_almost_equal(got[i, :, 2], third, decimal=5)
| lgpl-2.1 |
android-ia/platform_external_chromium_org | ppapi/cpp/documentation/doxy_cleanup.py | 173 | 4455 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This utility cleans up the html files as emitted by doxygen so
that they are suitable for publication on a Google documentation site.
'''
import optparse
import os
import re
import shutil
import string
import sys
try:
from BeautifulSoup import BeautifulSoup, Tag
except (ImportError, NotImplementedError):
print ("This tool requires the BeautifulSoup package "
"(see http://www.crummy.com/software/BeautifulSoup/).\n"
"Make sure that the file BeautifulSoup.py is either in this directory "
"or is available in your PYTHON_PATH")
raise
class HTMLFixer(object):
'''This class cleans up the html strings as produced by Doxygen
'''
def __init__(self, html):
self.soup = BeautifulSoup(html)
def FixTableHeadings(self):
'''Fixes the doxygen table headings.
This includes:
- Using bare <h2> title row instead of row embedded in <tr><td> in table
- Putting the "name" attribute into the "id" attribute of the <tr> tag.
- Splitting up tables into multiple separate tables if a table
heading appears in the middle of a table.
For example, this html:
<table>
<tr><td colspan="2"><h2><a name="pub-attribs"></a>
Data Fields List</h2></td></tr>
...
</table>
would be converted to this:
<h2>Data Fields List</h2>
<table>
...
</table>
'''
table_headers = []
for tag in self.soup.findAll('tr'):
if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']:
#tag['id'] = tag.td.h2.a['name']
tag.string = tag.td.h2.a.next
tag.name = 'h2'
table_headers.append(tag)
# reverse the list so that earlier tags don't delete later tags
table_headers.reverse()
# Split up tables that have multiple table header (th) rows
for tag in table_headers:
print "Header tag: %s is %s" % (tag.name, tag.string.strip())
# Is this a heading in the middle of a table?
if tag.findPreviousSibling('tr') and tag.parent.name == 'table':
print "Splitting Table named %s" % tag.string.strip()
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
new_table = Tag(self.soup, name='table', attrs=table.attrs)
table_parent.insert(table_index + 1, new_table)
tag_index = table.contents.index(tag)
for index, row in enumerate(table.contents[tag_index:]):
new_table.insert(index, row)
# Now move the <h2> tag to be in front of the <table> tag
assert tag.parent.name == 'table'
table = tag.parent
table_parent = table.parent
table_index = table_parent.contents.index(table)
table_parent.insert(table_index, tag)
def RemoveTopHeadings(self):
'''Removes <div> sections with a header, tabs, or navpath class attribute'''
header_tags = self.soup.findAll(
name='div',
attrs={'class' : re.compile('^(header|tabs[0-9]*|navpath)$')})
[tag.extract() for tag in header_tags]
def FixAll(self):
self.FixTableHeadings()
self.RemoveTopHeadings()
def __str__(self):
return str(self.soup)
def main():
'''Main entry for the doxy_cleanup utility
doxy_cleanup takes a list of html files and modifies them in place.'''
parser = optparse.OptionParser(usage='Usage: %prog [options] files...')
parser.add_option('-m', '--move', dest='move', action='store_true',
default=False, help='move html files to "original_html"')
options, files = parser.parse_args()
if not files:
parser.print_usage()
return 1
for filename in files:
try:
with open(filename, 'r') as file:
html = file.read()
print "Processing %s" % filename
fixer = HTMLFixer(html)
fixer.FixAll()
with open(filename, 'w') as file:
file.write(str(fixer))
if options.move:
new_directory = os.path.join(
os.path.dirname(os.path.dirname(filename)), 'original_html')
if not os.path.exists(new_directory):
os.mkdir(new_directory)
shutil.move(filename, new_directory)
except:
print "Error while processing %s" % filename
raise
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
channing/gyp | test/mac/gyptest-postbuild-fail.py | 84 | 1594 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a failing postbuild step lets the build fail.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
match = lambda a, b: True)
test.run_gyp('test.gyp', chdir='postbuild-fail')
build_error_code = {
'xcode': 1,
'make': 2,
'ninja': 1,
}[test.format]
# If a postbuild fails, all postbuilds should be re-run on the next build.
# However, even if the first postbuild fails the other postbuilds are still
# executed.
# Non-bundles
test.build('test.gyp', 'nonbundle', chdir='postbuild-fail',
status=build_error_code)
test.built_file_must_exist('static_touch',
chdir='postbuild-fail')
# Check for non-up-to-date-ness by checking if building again produces an
# error.
test.build('test.gyp', 'nonbundle', chdir='postbuild-fail',
status=build_error_code)
# Bundles
test.build('test.gyp', 'bundle', chdir='postbuild-fail',
status=build_error_code)
test.built_file_must_exist('dynamic_touch',
chdir='postbuild-fail')
# Check for non-up-to-date-ness by checking if building again produces an
# error.
test.build('test.gyp', 'bundle', chdir='postbuild-fail',
status=build_error_code)
test.pass_test()
| bsd-3-clause |
nonZero/demos-python | src/examples/short/ftp/ftp_rmdir.py | 1 | 1098 | #!/usr/bin/python3
#
# imports #
#
import ftplib # for FTP
import sys # for argv
import os.path # for join
#
# parameters #
#
# want debugging?
p_debug = False
#
# functions #
#
def ftp_rmdir(ftp, folder, remove_toplevel, dontremove):
for filename, attr in ftp.mlsd(folder):
if attr['type'] == 'file' and filename not in dontremove:
if p_debug:
print(
'removing file [{0}] from folder [{1}]'.format(filename, folder))
ftp.delete(os.path.join(folder, filename))
if attr['type'] == 'dir':
ftp_rmdir(ftp, filename, True, dontremove)
if remove_toplevel:
if p_debug:
print('removing folder [{0}]'.format(folder))
ftp.rmd(folder)
#
# code #
#
p_host = sys.argv[1]
p_user = sys.argv[2]
p_pass = sys.argv[3]
p_dir = sys.argv[4]
if p_debug:
print(p_host)
print(p_user)
print(p_pass)
print(p_dir)
ftp = ftplib.FTP(p_host)
ftp.login(user=p_user, passwd=p_pass)
# ftp_rmdir(ftp, p_dir, False, set(['.ftpquota']))
ftp_rmdir(ftp, p_dir, False, set())
ftp.quit()
| gpl-3.0 |
Jimmy-Morzaria/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
cpennington/edx-platform | common/djangoapps/third_party_auth/migrations/0023_auto_20190418_2033.py | 5 | 1506 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-18 20:33
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations', '0006_auto_20171207_0259'),
('third_party_auth', '0022_auto_20181012_0307'),
]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='organization',
field=models.OneToOneField(blank=True, help_text="optional. If this provider is an Organization, this attribute can be used reference users in that Organization", null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='organization',
field=models.OneToOneField(blank=True, help_text="optional. If this provider is an Organization, this attribute can be used reference users in that Organization", null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
migrations.AddField(
model_name='samlproviderconfig',
name='organization',
field=models.OneToOneField(blank=True, help_text="optional. If this provider is an Organization, this attribute can be used reference users in that Organization", null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
]
| agpl-3.0 |
ygormutti/VSSP | vssp.py | 1 | 8524 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import struct
import socket
import os
import threading
import time
DEBUG = False
T_REQ = 0
T_END = 1 << 14
T_ETC = 2 << 14
T_DAT = 3 << 14
T_REQ_OPEN_A = T_REQ
T_REQ_OPEN_B = 1 + T_REQ
T_REQ_OK = 2 + T_REQ
T_REQ_DENIED = 3 + T_REQ
T_REQ_STREAM = 4 + T_REQ
T_END_CONNECTION = T_END
T_END_STREAM = 1 + T_END
T_ETC_PMTU_PROBE = T_ETC
T_ETC_PMTU_ACK = 1 + T_ETC
TYPE_BITMASK = 49152
SEQNO_BITMASK = 16383
MAX_PAYLOAD = 16386
MAX_SEQNO = 16384
_formatter = struct.Struct('!H')
def debug(*args, **kwargs):
if DEBUG:
print(*args, **kwargs)
def _format_header(type, seqno=None):
if (type == T_DAT):
return _formatter.pack(type | seqno)
return _formatter.pack(type)
def _parse_header(raw):
bytes = _formatter.unpack(raw)[0]
type_ = bytes & TYPE_BITMASK
if type_ == T_DAT:
return (type_, bytes & SEQNO_BITMASK)
return (bytes, None)
def _increment_seqno(seqno):
return (seqno + 1) % MAX_SEQNO
def _decrement_seqno(seqno):
return (seqno - 1) % MAX_SEQNO
class VSSPError(Exception):
pass
class MissingSegment(Exception):
pass
class VSSPStream(threading.Thread):
def __init__(self, socket, addr, mss):
super(VSSPStream, self).__init__()
self.socket = socket
self.addr = addr
self.mss = mss
self.seqno = 0
self.timestamp = 0
self.buffer = ''
self.window = {}
self.buffer_lock = threading.Lock()
self.receiving = True
def read(self):
debug('Aplicação solicitou dados do fluxo. Adquirindo trava...')
with self.buffer_lock:
debug('Trava adquirida! Lendo dados do buffer.')
data = self.buffer[:self.mss]
if len(data) == 0 and self.receiving and \
len(self.window) > 0 and \
not self.window[self.seqno][0] and \
self.window[self.seqno][2] < time.time():
prev_seqno = self.seqno
self.seqno = _increment_seqno(self.seqno)
raise MissingSegment(prev_seqno)
self.buffer = self.buffer[self.mss:]
return data
def _append_buffer(self, data):
debug('Movendo segmentos contíguos para buffer. Adquirindo trava...')
with self.buffer_lock:
debug('Trava adquirida! Escrevendo dados no buffer.')
self.buffer += data
def _store_packet(self, seqno, data):
debug('Pacote', seqno, 'recebido e adicionado na janela de recepção.')
self.window[seqno] = (True, data, time.time())
def _insert_placeholder(self, seqno):
debug('O pacote', seqno, 'está faltando. Iniciando temporizador.')
self.window[seqno] = (False, None, time.time() + self._timeout)
def _receive(self):
addr = None
while addr != self.addr:
data, addr = self.socket.recvfrom(self.mss)
return (_parse_header(data[:2]) + (data[2:],))
@property
def _next_seqno(self):
seqno = self.seqno
while True:
seqno = _increment_seqno(seqno)
if seqno not in self.window or self.window[seqno][2] < self.timestamp:
return _decrement_seqno(seqno)
if self.window[seqno][0]:
return seqno
@property
def _timeout(self):
return 1
def run(self):
while True:
type_, seqno, data = self._receive()
if type_ == T_END_STREAM:
debug('T_END_STREAM recebido')
self.receiving = False
break
elif type_ == T_DAT:
self._store_packet(seqno, data)
i = self.seqno
while i != seqno:
if i not in self.window or self.window[i][2] < self.timestamp:
self._insert_placeholder(i)
i = _increment_seqno(i)
while seqno == self.seqno:
self._append_buffer(self.window[seqno][1])
(received, data, timestamp) = self.window[seqno]
self.window[seqno] = (received, None, timestamp)
self.timestamp = timestamp
seqno = self._next_seqno
self.seqno = _increment_seqno(self.seqno)
class VSSPReceiver(object):
def __init__(self, host, port):
(family, socktype, proto, canonname, sockaddr) = \
socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_DGRAM)[0]
self.addr = sockaddr
self.socket = socket.socket(family, socktype, proto)
self.socket.connect(self.addr)
self.mss = MAX_PAYLOAD
def _send(self, type_, data=''):
self.socket.send(_format_header(type_) + data)
def _receive(self):
addr = None
while addr != self.addr:
data, addr = self.socket.recvfrom(self.mss)
return (data[:2], data[2:])
def connect(self):
debug('Enviando T_REQ_OPEN_A')
self._send(T_REQ_OPEN_A)
hdr, data = self._receive()
if _parse_header(hdr)[0] != T_REQ_OK:
raise VSSPError('O transmissor recusou o pedido de conexão.')
debug('T_REQ_OK recebido, conexão aberta')
while True:
rawhdr, data = self._receive()
type_ = _parse_header(rawhdr)[0]
if type_ == T_ETC_PMTU_PROBE:
debug('T_ETC_PMTU_PROBE recebido, enviando T_ETC_PMTU_ACK')
self._send(_format_header(T_ETC_PMTU_ACK),
_formatter.pack(len(data)))
elif type_ == T_ETC_PMTU_ACK:
self.mss = _formatter.unpack(data)[0]
debug('T_ETC_PMTU_ACK recebido, o MSS é', self.mss)
self._send(T_ETC_PMTU_ACK, data)
debug('Enviando T_ETC_PMTU_ACK')
break
def request(self, url):
if not isinstance(url, unicode):
raise ValueError('URLs devem ser do tipo unicode')
debug('Enviando REQ_STREAM')
self._send(T_REQ_STREAM, url.encode('utf-8'))
stream = VSSPStream(self.socket, self.addr, self.mss)
stream.start()
return stream
class VSSPTransmitter(object):
def __init__(self, port, mss=1450):
(family, socktype, proto, canonname, sockaddr) = \
socket.getaddrinfo(None, port, socket.AF_UNSPEC, socket.SOCK_DGRAM,
socket.IPPROTO_UDP, socket.AI_PASSIVE)[0]
self.addr = sockaddr
self.socket = socket.socket(family, socktype, proto)
self.socket.bind(self.addr)
self.mss = mss
def _send(self, header, data=''):
self.socket.sendto(_format_header(*header) + data, self.addr)
def _receive(self):
addr = None
while addr != self.addr:
data, addr = self.socket.recvfrom(self.mss)
return (data[:2], data[2:])
def listen(self):
debug('Esperando T_REQ_OPEN_A')
while True:
data, addr = self.socket.recvfrom(self.mss)
type_ = _parse_header(data[:2])[0]
if type_ == T_REQ_OPEN_A:
debug('T_REQ_OPEN_A recebido de', addr)
return addr
debug('Enviando T_REQ_DENIED')
self.socket.sendto(_format_header(T_REQ_DENIED), addr)
def accept(self, addr):
debug('Enviando T_REQ_OK')
self.addr = addr
self._send((T_REQ_OK,))
debug('Enviando T_ETC_PMTU_ACK')
self._send((T_ETC_PMTU_ACK,), _formatter.pack(1450))
rawhdr, data = self._receive()
if _parse_header(rawhdr)[0] == T_ETC_PMTU_ACK:
self.mss = _formatter.unpack(data)[0]
debug('T_ETC_PMTU_ACK recebido, confirmado MSS de', self.mss)
def handle_request(self, interval=0):
url = None
while True:
debug('Esperando T_REQ_STREAM')
rawhdr, data = self._receive()
header = _parse_header(rawhdr)
if header[0] == T_REQ_STREAM:
debug('T_REQ_STREAM recebido para o url', data)
url = data.decode('utf-8')
break
stream = open(url, 'r')
seqno = 0
segment = stream.read(self.mss - 2)
while segment:
self._send((T_DAT, seqno), segment)
time.sleep(interval)
seqno += 1
segment = stream.read(self.mss - 2)
self._send((T_END_STREAM,))
| gpl-2.0 |
shawnadelic/shuup | shuup/admin/modules/service_providers/__init__.py | 2 | 2054 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from shuup.admin.base import AdminModule, MenuEntry
from shuup.admin.menu import SETTINGS_MENU_CATEGORY
from shuup.admin.utils.permissions import get_default_model_permissions
from shuup.admin.utils.urls import (
admin_url, derive_model_url, get_edit_and_list_urls
)
from shuup.core.models import ServiceProvider
class ServiceProviderModule(AdminModule):
name = _("Service Providers")
category = _("Payment and Shipping")
def get_urls(self):
return [
admin_url(
"^service_provider/(?P<pk>\d+)/delete/$",
"shuup.admin.modules.service_providers.views.ServiceProviderDeleteView",
name="service_provider.delete",
permissions=["shuup.delete_serviceprovider"]
)
] + get_edit_and_list_urls(
url_prefix="^service_provider",
view_template="shuup.admin.modules.service_providers.views.ServiceProvider%sView",
name_template="service_provider.%s",
permissions=get_default_model_permissions(ServiceProvider)
)
def get_menu_category_icons(self):
return {self.category: "fa fa-cubes"}
def get_menu_entries(self, request):
return [
MenuEntry(
text=self.name,
icon="fa fa-truck",
url="shuup_admin:service_provider.list",
category=SETTINGS_MENU_CATEGORY,
ordering=2
)
]
def get_required_permissions(self):
return get_default_model_permissions(ServiceProvider)
def get_model_url(self, object, kind):
return derive_model_url(ServiceProvider, "shuup_admin:service_provider", object, kind)
| agpl-3.0 |
stamhe/bitcoin | test/functional/interface_bitcoin_cli.py | 10 | 4065 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test bitcoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
self.log.info("Compare responses from gewalletinfo RPC and `bitcoin-cli getwalletinfo`")
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `bitcoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
self.log.info("Test connecting with non-existing RPC cookie file")
assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `bitcoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
| mit |
Jgarcia-IAS/SITE | addons/gamification/tests/__init__.py | 124 | 1043 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_challenge
checks = [
test_challenge,
]
| agpl-3.0 |
BenKeyFSI/poedit | deps/boost/libs/python/test/slice.py | 40 | 1798 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""
>>> from slice_ext import *
>>> accept_slice(slice(1, None, (1,2)))
1
>>> try:
... accept_slice(list((1,2)))
... print "test failed"
... except:
... print "test passed"
...
test passed
>>> try:
... from Numeric import array
... except:
... print 1
... else:
... check_numeric_array_rich_slice('Numeric', 'ArrayType', lambda x:x)
...
1
>>> try:
... from numarray import array, all
... except:
... print 1
... else:
... check_numeric_array_rich_slice('numarray', 'NDArray', all)
...
1
>>> import sys
>>> if sys.version_info[0] == 2 and sys.version_info[1] >= 3:
... check_string_rich_slice()
... elif sys.version_info[0] > 2:
... check_string_rich_slice()
... else:
... print 1
...
1
>>> check_slice_get_indices( slice(None))
0
>>> check_slice_get_indices( slice(2,-2))
0
>>> check_slice_get_indices( slice(2, None, 2))
5
>>> check_slice_get_indices( slice(2, None, -1))
-12
>>> check_slice_get_indices( slice( 20, None))
0
>>> check_slice_get_indices( slice( -2, -5, -2))
6
"""
# Performs an affirmative and negative argument resolution check,
# checks the operation of extended slicing in Numeric arrays
# (only performed if Numeric.array or numarray.array can be found).
# checks the operation of extended slicing in new strings (Python 2.3 only).
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| mit |
vmendez/DIRAC | StorageManagementSystem/DB/StorageManagementDB.py | 3 | 56401 | """ StorageManagementDB is a front end to the Stager Database.
There are five tables in the StorageManagementDB: Tasks, CacheReplicas, TaskReplicas, StageRequests.
The Tasks table is the place holder for the tasks that have requested files to be staged.
These can be from different systems and have different associated call back methods.
The CacheReplicas table keeps the information on all the CacheReplicas in the system.
It maps all the file information LFN, PFN, SE to an assigned ReplicaID.
The TaskReplicas table maps the TaskIDs from the Tasks table to the ReplicaID from the CacheReplicas table.
The StageRequests table contains each of the prestage request IDs for each of the replicas.
"""
__RCSID__ = "$Id$"
import inspect
import types
import threading
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities.List import intListToString, stringListToString
# Stage Request are issue with a length of "PinLength"
# However, once Staged, the entry in the StageRequest will set a PinExpiryTime only for "PinLength" / THROTTLING_STEPS
# As PinExpiryTime arrives, StageRequest and their corresponding CacheReplicas entries are cleaned
# This allows to throttle the submission of Stage Requests up to a maximum of "DiskCacheTB" per "PinLength"
# After "PinLength" / THROTTLING_STEPS seconds, entries are removed, so new requests for the same replica will trigger
# a new Stage Request to the SE, and thus an update of the Pinning on the SE.
#
# - "PinLength" is an Option of the StageRequest Agent that defaults to THROTTLING_TIME
# - "DiskCacheTB" is an Option of the StorageElement that defaults to 1 (TB)
#
THROTTLING_TIME = 86400
THROTTLING_STEPS = 12
class StorageManagementDB( DB ):
def __init__( self, systemInstance = 'Default' ):
DB.__init__( self, 'StorageManagementDB', 'StorageManagement/StorageManagementDB' )
self.lock = threading.Lock()
self.TASKPARAMS = ['TaskID', 'Status', 'Source', 'SubmitTime', 'LastUpdate', 'CompleteTime', 'CallBackMethod', 'SourceTaskID']
self.REPLICAPARAMS = ['ReplicaID', 'Type', 'Status', 'SE', 'LFN', 'PFN', 'Size', 'FileChecksum', 'GUID', 'SubmitTime', 'LastUpdate', 'Reason', 'Links']
self.STAGEPARAMS = ['ReplicaID', 'StageStatus', 'RequestID', 'StageRequestSubmitTime', 'StageRequestCompletedTime', 'PinLength', 'PinExpiryTime']
self.STATES = ['Failed', 'New', 'Waiting', 'Offline', 'StageSubmitted', 'Staged']
def __getConnection( self, connection ):
if connection:
return connection
res = self._getConnection()
if res['OK']:
return res['Value']
gLogger.warn( "Failed to get MySQL connection", res['Message'] )
return connection
def _caller( self ):
return inspect.stack()[2][3]
################################################################
#
# State machine management
#
def updateTaskStatus( self, taskIDs, newTaskStatus, connection = False ):
return self.__updateTaskStatus( taskIDs, newTaskStatus, connection = connection )
def __updateTaskStatus( self, taskIDs, newTaskStatus, force = False, connection = False ):
connection = self.__getConnection( connection )
if not taskIDs:
return S_OK( taskIDs )
if force:
toUpdate = taskIDs
else:
res = self._checkTaskUpdate( taskIDs, newTaskStatus, connection = connection )
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK( toUpdate )
# reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newTaskStatus )
reqSelect = "SELECT TaskID FROM Tasks WHERE TaskID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newTaskStatus )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record:" % ( self._caller(), '__updateTaskStatus' ),
"%s. %s" % ( reqSelect, resSelect['Message'] ) )
req = "UPDATE Tasks SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE TaskID IN (%s) AND Status != '%s';" % ( newTaskStatus, intListToString( toUpdate ), newTaskStatus )
res = self._update( req, connection )
if not res['OK']:
return res
taskIDs = []
for record in resSelect['Value']:
taskIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update Tasks = %s" % ( self._caller(), '__updateTaskStatus', record ) )
if len( taskIDs ) > 0:
reqSelect1 = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect1 = self._query( reqSelect1, connection )
if not resSelect1["OK"]:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), '__updateTaskStatus', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated Tasks = %s" % ( self._caller(), '__updateTaskStatus', record ) )
return S_OK( toUpdate )
def _checkTaskUpdate( self, taskIDs, newTaskState, connection = False ):
connection = self.__getConnection( connection )
if not taskIDs:
return S_OK( taskIDs )
# * -> Failed
if newTaskState == 'Failed':
oldTaskState = []
# StageCompleting -> Done
elif newTaskState == 'Done':
oldTaskState = ['StageCompleting']
# StageSubmitted -> StageCompleting
elif newTaskState == 'StageCompleting':
oldTaskState = ['StageSubmitted']
# Waiting -> StageSubmitted
elif newTaskState == 'StageSubmitted':
oldTaskState = ['Waiting', 'Offline']
# New -> Waiting
elif newTaskState == 'Waiting':
oldTaskState = ['New']
elif newTaskState == 'Offline':
oldTaskState = ['Waiting']
else:
return S_ERROR( "Task status not recognized" )
if not oldTaskState:
toUpdate = taskIDs
else:
req = "SELECT TaskID FROM Tasks WHERE Status in (%s) AND TaskID IN (%s)" % ( stringListToString( oldTaskState ), intListToString( taskIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK( toUpdate )
def updateReplicaStatus( self, replicaIDs, newReplicaStatus, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
res = self._checkReplicaUpdate( replicaIDs, newReplicaStatus )
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK( toUpdate )
# reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newReplicaStatus )
reqSelect = "SELECT ReplicaID FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newReplicaStatus )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record:" % ( self._caller(), 'updateReplicaStatus' ),
"%s. %s" % ( reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE ReplicaID IN (%s) AND Status != '%s';" % ( newReplicaStatus, intListToString( toUpdate ), newReplicaStatus )
res = self._update( req, connection )
if not res['OK']:
return res
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateReplicaStatus', record ) )
if len( replicaIDs ) > 0:
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1, connection )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'updateReplicaStatus', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateReplicaStatus', record ) )
res = self._updateTasksForReplica( replicaIDs, connection = connection )
if not res['OK']:
return res
return S_OK( toUpdate )
def _updateTasksForReplica( self, replicaIDs, connection = False ):
tasksInStatus = {}
for state in self.STATES:
tasksInStatus[state] = []
req = "SELECT T.TaskID,T.Status FROM Tasks AS T, TaskReplicas AS R WHERE R.ReplicaID IN ( %s ) AND R.TaskID = T.TaskID GROUP BY T.TaskID;" % intListToString( replicaIDs )
res = self._query( req, connection )
if not res['OK']:
return res
for taskId, status in res['Value']:
subreq = "SELECT DISTINCT(C.Status) FROM TaskReplicas AS R, CacheReplicas AS C WHERE R.TaskID=%s AND R.ReplicaID = C.ReplicaID;" % taskId
subres = self._query( subreq, connection )
if not subres['OK']:
return subres
cacheStatesForTask = [row[0] for row in subres['Value']]
if not cacheStatesForTask:
tasksInStatus['Failed'].append( taskId )
continue
wrongState = False
for state in cacheStatesForTask:
if state not in self.STATES:
wrongState = True
break
if wrongState:
tasksInStatus['Failed'].append( taskId )
continue
for state in self.STATES:
if state in cacheStatesForTask:
if status != state:
tasksInStatus[state].append( taskId )
break
for newStatus in tasksInStatus.keys():
if tasksInStatus[newStatus]:
res = self.__updateTaskStatus( tasksInStatus[newStatus], newStatus, True, connection = connection )
if not res['OK']:
gLogger.warn( "Failed to update task associated to replicas", res['Message'] )
# return res
return S_OK( tasksInStatus )
def getAssociatedReplicas( self, replicaIDs ):
""" Retrieve the list of Replicas that belong to the same Tasks as the provided list
"""
res = self._getReplicaIDTasks( replicaIDs )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getAssociatedReplicas: Failed to get Tasks.', res['Message'] )
return res
taskIDs = res['Value']
return self.getCacheReplicas( {'TaskID':taskIDs} )
def _checkReplicaUpdate( self, replicaIDs, newReplicaState, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
# * -> Failed
if newReplicaState == 'Failed':
oldReplicaState = []
# New -> Waiting
elif newReplicaState == 'Waiting':
oldReplicaState = ['New']
# Waiting -> StageSubmitted
elif newReplicaState == 'StageSubmitted':
oldReplicaState = ['Waiting', 'Offline']
# StageSubmitted -> Staged
elif newReplicaState == 'Staged':
oldReplicaState = ['StageSubmitted']
elif newReplicaState == 'Offline':
oldReplicaState = ['Waiting']
else:
return S_ERROR( "Replica status not recognized" )
if not oldReplicaState:
toUpdate = replicaIDs
else:
req = "SELECT ReplicaID FROM CacheReplicas WHERE Status IN (%s) AND ReplicaID IN (%s)" % ( stringListToString( oldReplicaState ), intListToString( replicaIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK( toUpdate )
def __getTaskStateFromReplicaState( self, replicaState ):
# For the moment the task state just references to the replicaState
return replicaState
def updateStageRequestStatus( self, replicaIDs, newStageStatus, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
res = self._checkStageUpdate( replicaIDs, newStageStatus, connection = connection )
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK( toUpdate )
# reqSelect = "Select * FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newStageStatus )
reqSelect = "Select ReplicaID FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newStageStatus )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateStageRequestStatus', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE ReplicaID IN (%s) AND Status != '%s';" % ( newStageStatus, intListToString( toUpdate ), newStageStatus )
res = self._update( req, connection )
if not res['OK']:
return res
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateStageRequestStatus', record ) )
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1, connection )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'updateStageRequestStatus', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateStageRequestStatus', record ) )
# Now update the replicas associated to the replicaIDs
newReplicaStatus = self.__getReplicaStateFromStageState( newStageStatus )
res = self.updateReplicaStatus( toUpdate, newReplicaStatus, connection = connection )
if not res['OK']:
gLogger.warn( "Failed to update cache replicas associated to stage requests", res['Message'] )
return S_OK( toUpdate )
def _checkStageUpdate( self, replicaIDs, newStageState, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
# * -> Failed
if newStageState == 'Failed':
oldStageState = []
elif newStageState == 'Staged':
oldStageState = ['StageSubmitted']
else:
return S_ERROR( "StageRequest status not recognized" )
if not oldStageState:
toUpdate = replicaIDs
else:
req = "SELECT ReplicaID FROM StageRequests WHERE StageStatus = '%s' AND ReplicaID IN (%s)" % ( oldStageState, intListToString( replicaIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK( toUpdate )
def __getReplicaStateFromStageState( self, stageState ):
# For the moment the replica state just references to the stage state
return stageState
#
# End of state machine management
#
################################################################
################################################################
#
# Monitoring of stage tasks
#
def getTaskStatus( self, taskID, connection = False ):
""" Obtain the task status from the Tasks table. """
connection = self.__getConnection( connection )
res = self.getTaskInfo( taskID, connection = connection )
if not res['OK']:
return res
taskInfo = res['Value'][taskID]
return S_OK( taskInfo['Status'] )
def getTaskInfo( self, taskID, connection = False ):
""" Obtain all the information from the Tasks table for a supplied task. """
connection = self.__getConnection( connection )
req = "SELECT TaskID,Status,Source,SubmitTime,CompleteTime,CallBackMethod,SourceTaskID from Tasks WHERE TaskID IN (%s);" % intListToString( taskID )
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getTaskInfo: Failed to get task information.', res['Message'] )
return res
resDict = {}
for taskID, status, source, submitTime, completeTime, callBackMethod, sourceTaskID in res['Value']:
resDict[sourceTaskID] = {'Status':status, 'Source':source, 'SubmitTime':submitTime, 'CompleteTime':completeTime, 'CallBackMethod':callBackMethod, 'SourceTaskID':sourceTaskID}
if not resDict:
gLogger.error( 'StorageManagementDB.getTaskInfo: The supplied task did not exist', taskID )
return S_ERROR( 'The supplied task %s did not exist' % taskID )
return S_OK( resDict )
def _getTaskIDForJob ( self, jobID, connection = False ):
# Stager taskID is retrieved from the source DIRAC jobID
connection = self.__getConnection( connection )
req = "SELECT TaskID from Tasks WHERE SourceTaskID=%s;" % int( jobID )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record:" % ( self._caller(), '_getTaskIDForJob' ),
"%s. %s" % ( req, res['Message'] ) )
return S_ERROR( 'The supplied JobID does not exist!' )
taskID = [ row[0] for row in res['Value'] ]
return S_OK( taskID )
def getTaskSummary( self, jobID, connection = False ):
""" Obtain the task summary from the database. """
connection = self.__getConnection( connection )
res = self._getTaskIDForJob( jobID, connection = connection )
if not res['OK']:
return res
if res['Value']:
taskID = res['Value']
else:
return S_OK()
res = self.getTaskInfo( taskID, connection = connection )
if not res['OK']:
return res
taskInfo = res['Value']
req = "SELECT R.LFN,R.SE,R.PFN,R.Size,R.Status,R.LastUpdate,R.Reason FROM CacheReplicas AS R, TaskReplicas AS TR WHERE TR.TaskID in (%s) AND TR.ReplicaID=R.ReplicaID;" % intListToString( taskID )
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getTaskSummary: Failed to get Replica summary for task.', res['Message'] )
return res
replicaInfo = {}
for lfn, storageElement, pfn, fileSize, status, lastupdate, reason in res['Value']:
replicaInfo[lfn] = {'StorageElement':storageElement, 'PFN':pfn, 'FileSize':fileSize,
'Status':status, 'LastUpdate':lastupdate, 'Reason':reason}
resDict = {'TaskInfo':taskInfo, 'ReplicaInfo':replicaInfo}
return S_OK( resDict )
def getTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'SubmitTime', orderAttribute = None,
limit = None, connection = False ):
""" Get stage requests for the supplied selection with support for web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM Tasks" % ( intListToString( self.TASKPARAMS ) )
if condDict or older or newer:
if condDict.has_key( 'ReplicaID' ):
replicaIDs = condDict.pop( 'ReplicaID' )
if type( replicaIDs ) not in ( types.ListType, types.TupleType ):
replicaIDs = [replicaIDs]
res = self._getReplicaIDTasks( replicaIDs, connection = connection )
if not res['OK']:
return res
condDict['TaskID'] = res['Value']
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit ) )
res = self._query( req, connection )
if not res['OK']:
return res
tasks = res['Value']
resultDict = {}
for row in tasks:
resultDict[row[0]] = dict( zip( self.TASKPARAMS[1:], row[1:] ) )
result = S_OK( resultDict )
result['Records'] = tasks
result['ParameterNames'] = self.TASKPARAMS
return result
def getCacheReplicas( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate', orderAttribute = None, limit = None, connection = False ):
""" Get cache replicas for the supplied selection with support for the web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM CacheReplicas" % ( intListToString( self.REPLICAPARAMS ) )
if condDict or older or newer:
if condDict.has_key( 'TaskID' ):
taskIDs = condDict.pop( 'TaskID' )
if type( taskIDs ) not in ( types.ListType, types.TupleType ):
taskIDs = [taskIDs]
res = self._getTaskReplicaIDs( taskIDs, connection = connection )
if not res['OK']:
return res
if res['Value']:
condDict['ReplicaID'] = res['Value']
else:
condDict['ReplicaID'] = [-1]
# BUG: limit is ignored unless there is a nonempty condition dictionary OR older OR newer is nonemtpy
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit ) )
res = self._query( req, connection )
if not res['OK']:
return res
cacheReplicas = res['Value']
resultDict = {}
for row in cacheReplicas:
resultDict[row[0]] = dict( zip( self.REPLICAPARAMS[1:], row[1:] ) )
result = S_OK( resultDict )
result['Records'] = cacheReplicas
result['ParameterNames'] = self.REPLICAPARAMS
return result
def getStageRequests( self, condDict = {}, older = None, newer = None, timeStamp = 'StageRequestSubmitTime', orderAttribute = None, limit = None, connection = False ):
""" Get stage requests for the supplied selection with support for web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM StageRequests" % ( intListToString( self.STAGEPARAMS ) )
if condDict or older or newer:
if condDict.has_key( 'TaskID' ):
taskIDs = condDict.pop( 'TaskID' )
if type( taskIDs ) not in ( types.ListType, types.TupleType ):
taskIDs = [taskIDs]
res = self._getTaskReplicaIDs( taskIDs, connection = connection )
if not res['OK']:
return res
if res['Value']:
condDict['ReplicaID'] = res['Value']
else:
condDict['ReplicaID'] = [-1]
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit ) )
res = self._query( req, connection )
if not res['OK']:
return res
stageRequests = res['Value']
resultDict = {}
for row in stageRequests:
resultDict[row[0]] = dict( zip( self.STAGEPARAMS[1:], row[1:] ) )
result = S_OK( resultDict )
result['Records'] = stageRequests
result['ParameterNames'] = self.STAGEPARAMS
return result
def _getTaskReplicaIDs( self, taskIDs, connection = False ):
if not taskIDs:
return S_OK( [] )
req = "SELECT DISTINCT(ReplicaID) FROM TaskReplicas WHERE TaskID IN (%s);" % intListToString( taskIDs )
res = self._query( req, connection )
if not res['OK']:
return res
replicaIDs = [row[0] for row in res['Value']]
return S_OK( replicaIDs )
def _getReplicaIDTasks( self, replicaIDs, connection = False ):
if not replicaIDs:
return S_OK( [] )
req = "SELECT DISTINCT(TaskID) FROM TaskReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
res = self._query( req, connection )
if not res['OK']:
return res
taskIDs = [row[0] for row in res['Value']]
return S_OK( taskIDs )
#
# End of monitoring of stage tasks
#
################################################################
####################################################################
#
# Submission of stage requests
#
def setRequest( self, lfnDict, source, callbackMethod, sourceTaskID, connection = False ):
""" This method populates the StorageManagementDB Tasks table with the requested files. """
connection = self.__getConnection( connection )
if not lfnDict:
return S_ERROR( "No files supplied in request" )
# The first step is to create the task in the Tasks table
res = self._createTask( source, callbackMethod, sourceTaskID, connection = connection )
if not res['OK']:
return res
taskID = res['Value']
# Get the Replicas which already exist in the CacheReplicas table
allReplicaIDs = []
taskStates = []
for se, lfns in lfnDict.items():
if type( lfns ) in types.StringTypes:
lfns = [lfns]
res = self._getExistingReplicas( se, lfns, connection = connection )
if not res['OK']:
return res
existingReplicas = res['Value']
# Insert the CacheReplicas that do not already exist
for lfn in lfns:
if lfn in existingReplicas.keys():
gLogger.verbose( 'StorageManagementDB.setRequest: Replica already exists in CacheReplicas table %s @ %s' % ( lfn, se ) )
existingFileState = existingReplicas[lfn][1]
taskState = self.__getTaskStateFromReplicaState( existingFileState )
else:
res = self._insertReplicaInformation( lfn, se, 'Stage', connection = connection )
if not res['OK']:
self._cleanTask( taskID, connection = connection )
return res
existingReplicas[lfn] = ( res['Value'], 'New' )
newFileState = existingReplicas[lfn][1]
taskState = self.__getTaskStateFromReplicaState( newFileState )
if not taskState in taskStates:
taskStates.append( taskState )
allReplicaIDs.extend( existingReplicas.values() )
# Insert all the replicas into the TaskReplicas table
res = self._insertTaskReplicaInformation( taskID, allReplicaIDs, connection = connection )
if not res['OK']:
self._cleanTask( taskID, connection = connection )
return res
# Check whether the the task status is Done based on the existing file states
# If all the files for a particular Task are 'Staged', update the Task
if taskStates == ['Staged']:
# so if the tasks are for LFNs from the lfns dictionary, which are already staged,
# they immediately change state New->Done. Fixed it to translate such tasks to 'Staged' state
self.__updateTaskStatus( [taskID], 'Staged', True, connection = connection )
if 'Failed' in taskStates:
self.__updateTaskStatus( [taskID], 'Failed', True, connection = connection )
return S_OK( taskID )
def _cleanTask( self, taskID, connection = False ):
""" Remove a task and any related information """
connection = self.__getConnection( connection )
self.removeTasks( [taskID], connection = connection )
self.removeUnlinkedReplicas( connection = connection )
def _createTask( self, source, callbackMethod, sourceTaskID, connection = False ):
""" Enter the task details into the Tasks table """
connection = self.__getConnection( connection )
req = "INSERT INTO Tasks (Source,SubmitTime,CallBackMethod,SourceTaskID) VALUES ('%s',UTC_TIMESTAMP(),'%s','%s');" % ( source, callbackMethod, sourceTaskID )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB._createTask: Failed to create task.", res['Message'] )
return res
# gLogger.info( "%s_DB:%s" % ('_createTask',req))
taskID = res['lastRowId']
reqSelect = "SELECT * FROM Tasks WHERE TaskID = %s;" % ( taskID )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.info( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), '_createTask', reqSelect, resSelect['Message'] ) )
else:
gLogger.verbose( "%s.%s_DB: inserted Tasks = %s" % ( self._caller(), '_createTask', resSelect['Value'][0] ) )
# gLogger.info("StorageManagementDB._createTask: Created task with ('%s','%s','%s') and obtained TaskID %s" % (source,callbackMethod,sourceTaskID,taskID))
return S_OK( taskID )
def _getExistingReplicas( self, storageElement, lfns, connection = False ):
""" Obtains the ReplicasIDs for the replicas already entered in the CacheReplicas table """
connection = self.__getConnection( connection )
req = "SELECT ReplicaID,LFN,Status FROM CacheReplicas WHERE SE = '%s' AND LFN IN (%s);" % ( storageElement, stringListToString( lfns ) )
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB._getExistingReplicas: Failed to get existing replicas.', res['Message'] )
return res
existingReplicas = {}
for replicaID, lfn, status in res['Value']:
existingReplicas[lfn] = ( replicaID, status )
return S_OK( existingReplicas )
def _insertReplicaInformation( self, lfn, storageElement, rType, connection = False ):
""" Enter the replica into the CacheReplicas table """
connection = self.__getConnection( connection )
req = "INSERT INTO CacheReplicas (Type,SE,LFN,PFN,Size,FileChecksum,GUID,SubmitTime,LastUpdate) VALUES ('%s','%s','%s','',0,'','',UTC_TIMESTAMP(),UTC_TIMESTAMP());" % ( rType, storageElement, lfn )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "_insertReplicaInformation: Failed to insert to CacheReplicas table.", res['Message'] )
return res
# gLogger.info( "%s_DB:%s" % ('_insertReplicaInformation',req))
replicaID = res['lastRowId']
reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID = %s;" % ( replicaID )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), '_insertReplicaInformation', reqSelect, resSelect['Message'] ) )
else:
gLogger.verbose( "%s.%s_DB: inserted CacheReplicas = %s" % ( self._caller(), '_insertReplicaInformation', resSelect['Value'][0] ) )
# gLogger.verbose("_insertReplicaInformation: Inserted Replica ('%s','%s') and obtained ReplicaID %s" % (lfn,storageElement,replicaID))
return S_OK( replicaID )
def _insertTaskReplicaInformation( self, taskID, replicaIDs, connection = False ):
""" Enter the replicas into TaskReplicas table """
connection = self.__getConnection( connection )
req = "INSERT INTO TaskReplicas (TaskID,ReplicaID) VALUES "
for replicaID, _status in replicaIDs:
replicaString = "(%s,%s)," % ( taskID, replicaID )
req = "%s %s" % ( req, replicaString )
req = req.rstrip( ',' )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB._insertTaskReplicaInformation: Failed to insert to TaskReplicas table.', res['Message'] )
return res
# gLogger.info( "%s_DB:%s" % ('_insertTaskReplicaInformation',req))
gLogger.verbose( "StorageManagementDB._insertTaskReplicaInformation: Successfully added %s CacheReplicas to Task %s." % ( res['Value'], taskID ) )
return S_OK()
#
# End of insertion methods
#
################################################################
####################################################################
def getStagedReplicas( self, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT TR.TaskID, R.Status, COUNT(*) from TaskReplicas as TR, CacheReplicas as R where TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID,R.Status;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getStagedReplicas: Failed to get eligible TaskReplicas', res['Message'] )
return res
goodTasks = []
for taskID, status, _count in res['Value']:
if taskID in goodTasks:
continue
elif status in ( 'Staged', 'StageSubmitted' ):
goodTasks.append( taskID )
return self.getCacheReplicas( {'Status':'Staged', 'TaskID':goodTasks}, connection = connection )
def getWaitingReplicas( self, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT TR.TaskID, R.Status, COUNT(*) from TaskReplicas as TR, CacheReplicas as R where TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID,R.Status;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getWaitingReplicas: Failed to get eligible TaskReplicas', res['Message'] )
return res
badTasks = []
goodTasks = []
for taskID, status, _count in res['Value']:
if taskID in badTasks:
continue
elif status in ( 'New', 'Failed' ):
badTasks.append( taskID )
elif status == 'Waiting':
goodTasks.append( taskID )
return self.getCacheReplicas( {'Status':'Waiting', 'TaskID':goodTasks}, connection = connection )
####################################################################
def getOfflineReplicas( self, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT TR.TaskID, R.Status, COUNT(*) from TaskReplicas as TR, CacheReplicas as R where TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID,R.Status;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getOfflineReplicas: Failed to get eligible TaskReplicas', res['Message'] )
return res
badTasks = []
goodTasks = []
for taskID, status, _count in res['Value']:
if taskID in badTasks:
continue
elif status in ( 'New', 'Failed' ):
badTasks.append( taskID )
elif status == 'Offline':
goodTasks.append( taskID )
return self.getCacheReplicas( {'Status':'Offline', 'TaskID':goodTasks}, connection = connection )
####################################################################
def getTasksWithStatus( self, status ):
""" This method retrieves the TaskID from the Tasks table with the supplied Status. """
req = "SELECT TaskID,Source,CallBackMethod,SourceTaskID from Tasks WHERE Status = '%s';" % status
res = self._query( req )
if not res['OK']:
return res
taskIDs = {}
for taskID, source, callback, sourceTask in res['Value']:
taskIDs[taskID] = ( source, callback, sourceTask )
return S_OK( taskIDs )
####################################################################
#
# The state transition of the CacheReplicas from *->Failed
#
def updateReplicaFailure( self, terminalReplicaIDs ):
""" This method sets the status to Failure with the failure reason for the supplied Replicas. """
res = self.updateReplicaStatus( terminalReplicaIDs.keys(), 'Failed' )
if not res['OK']:
return res
updated = res['Value']
if not updated:
return S_OK( updated )
for replicaID in updated:
reqSelect = "Select * FROM CacheReplicas WHERE ReplicaID = %d" % ( replicaID )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaFailure', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET Reason = '%s' WHERE ReplicaID = %d" % ( terminalReplicaIDs[replicaID], replicaID )
res = self._update( req )
if not res['OK']:
gLogger.error( 'StorageManagementDB.updateReplicaFailure: Failed to update replica fail reason.', res['Message'] )
return res
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateReplicaFailure', record ) )
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'updateReplicaFailure', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateReplicaFailure', record ) )
return S_OK( updated )
####################################################################
#
# The state transition of the CacheReplicas from New->Waiting
#
def updateReplicaInformation( self, replicaTuples ):
""" This method set the replica size information and pfn for the requested storage element. """
for replicaID, pfn, size in replicaTuples:
# reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID = %s and Status != 'Cancelled';" % ( replicaID )
reqSelect = "SELECT ReplicaID FROM CacheReplicas WHERE ReplicaID = %s and Status != 'Cancelled';" % ( replicaID )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaInformation', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET PFN = '%s', Size = %s, Status = 'Waiting' WHERE ReplicaID = %s and Status != 'Cancelled';" % ( pfn, size, replicaID )
res = self._update( req )
if not res['OK']:
gLogger.error( 'StagerDB.updateReplicaInformation: Failed to insert replica information.', res['Message'] )
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateReplicaInformation', record ) )
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaInformation', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateReplicaInformation', record ) )
gLogger.debug( 'StagerDB.updateReplicaInformation: Successfully updated CacheReplicas record With Status=Waiting, for ReplicaID= %s' % ( replicaID ) )
return S_OK()
####################################################################
#
# The state transition of the CacheReplicas from Waiting->StageSubmitted
#
def getSubmittedStagePins( self ):
# change the query to take into account pin expiry time
req = "SELECT SE,COUNT(*),SUM(Size) from CacheReplicas WHERE Status NOT IN ('New','Waiting','Offline','Failed') GROUP BY SE;"
# req = "SELECT SE,Count(*),SUM(Size) from CacheReplicas,StageRequests WHERE Status NOT IN ('New','Waiting','Failed') and CacheReplicas.ReplicaID=StageRequests.ReplicaID and PinExpiryTime>UTC_TIMESTAMP() GROUP BY SE;"
res = self._query( req )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getSubmittedStagePins: Failed to obtain submitted requests.', res['Message'] )
return res
storageRequests = {}
for storageElement, replicas, totalSize in res['Value']:
storageRequests[storageElement] = {'Replicas':int( replicas ), 'TotalSize':int( totalSize )}
return S_OK( storageRequests )
def insertStageRequest( self, requestDict, pinLifeTime ):
req = "INSERT INTO StageRequests (ReplicaID,RequestID,StageRequestSubmitTime,PinLength) VALUES "
for requestID, replicaIDs in requestDict.items():
for replicaID in replicaIDs:
replicaString = "(%s,'%s',UTC_TIMESTAMP(),%d)," % ( replicaID, requestID, pinLifeTime )
req = "%s %s" % ( req, replicaString )
req = req.rstrip( ',' )
res = self._update( req )
if not res['OK']:
gLogger.error( 'StorageManagementDB.insertStageRequest: Failed to insert to StageRequests table.', res['Message'] )
return res
for requestID, replicaIDs in requestDict.items():
for replicaID in replicaIDs:
# fix, no individual queries
reqSelect = "SELECT * FROM StageRequests WHERE ReplicaID = %s AND RequestID = '%s';" % ( replicaID, requestID )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'insertStageRequest', reqSelect, resSelect['Message'] ) )
else:
gLogger.verbose( "%s.%s_DB: inserted StageRequests = %s" % ( self._caller(), 'insertStageRequest', resSelect['Value'][0] ) )
# gLogger.info( "%s_DB: howmany = %s" % ('insertStageRequest',res))
# gLogger.info( "%s_DB:%s" % ('insertStageRequest',req))
gLogger.debug( "StorageManagementDB.insertStageRequest: Successfully added %s StageRequests with RequestID %s." % ( res['Value'], requestID ) )
return S_OK()
####################################################################
#
# The state transition of the CacheReplicas from StageSubmitted->Staged
#
def setStageComplete( self, replicaIDs ):
# Daniela: FIX wrong PinExpiryTime (84000->86400 seconds = 1 day)
reqSelect = "SELECT * FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setStageComplete', reqSelect, resSelect['Message'] ) )
return resSelect
req = "UPDATE StageRequests SET StageStatus='Staged',StageRequestCompletedTime = UTC_TIMESTAMP(),PinExpiryTime = DATE_ADD(UTC_TIMESTAMP(),INTERVAL ( PinLength / %s ) SECOND) WHERE ReplicaID IN (%s);" % ( THROTTLING_STEPS, intListToString( replicaIDs ) )
res = self._update( req )
if not res['OK']:
gLogger.error( "StorageManagementDB.setStageComplete: Failed to set StageRequest completed.", res['Message'] )
return res
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_update StageRequests = %s" % ( self._caller(), 'setStageComplete', record ) )
reqSelect1 = "SELECT * FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setStageComplete', reqSelect1, resSelect1['Message'] ) )
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated StageRequests = %s" % ( self._caller(), 'setStageComplete', record ) )
gLogger.debug( "StorageManagementDB.setStageComplete: Successfully updated %s StageRequests table with StageStatus=Staged for ReplicaIDs: %s." % ( res['Value'], replicaIDs ) )
return res
def wakeupOldRequests( self, replicaIDs , retryInterval, connection = False ):
"""
get only StageRequests with StageRequestSubmitTime older than 1 day AND are still not staged
delete these requests
reset Replicas with corresponding ReplicaIDs to Status='New'
"""
try:
retryInterval = max( retryInterval, 2 )
retryInterval = min( retryInterval, 24 )
retryInterval = int( retryInterval )
except Exception:
errorString = 'Wrong argument type'
gLogger.exception( errorString )
return S_ERROR( errorString )
if( replicaIDs ) > 0:
req = "SELECT ReplicaID FROM StageRequests WHERE ReplicaID IN (%s) AND StageStatus='StageSubmitted' AND DATE_ADD( StageRequestSubmitTime, INTERVAL %s HOUR ) < UTC_TIMESTAMP();" % ( intListToString( replicaIDs ), retryInterval )
res = self._query( req )
if not res['OK']:
gLogger.error( "StorageManagementDB.wakeupOldRequests: Failed to select old StageRequests.", res['Message'] )
return res
old_replicaIDs = [ row[0] for row in res['Value'] ]
if len( old_replicaIDs ) > 0:
req = "UPDATE CacheReplicas SET Status='New',LastUpdate = UTC_TIMESTAMP(), Reason = 'wakeupOldRequests' WHERE ReplicaID in (%s);" % intListToString( old_replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.wakeupOldRequests: Failed to roll CacheReplicas back to Status=New.", res['Message'] )
return res
req = "DELETE FROM StageRequests WHERE ReplicaID in (%s);" % intListToString( old_replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.wakeupOldRequests. Problem removing entries from StageRequests." )
return res
return S_OK()
####################################################################
#
# This code handles the finalization of stage tasks
#
# Daniela: useless method
'''
def updateStageCompletingTasks(self):
""" This will select all the Tasks in StageCompleting status and check whether all the associated files are Staged. """
req = "SELECT TR.TaskID,COUNT(if(R.Status NOT IN ('Staged'),1,NULL)) FROM Tasks AS T, TaskReplicas AS TR, CacheReplicas AS R WHERE T.Status='StageCompleting' AND T.TaskID=TR.TaskID AND TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID;"
res = self._query(req)
if not res['OK']:
return res
taskIDs = []
for taskID,count in res['Value']:
if int(count) == 0:
taskIDs.append(taskID)
if not taskIDs:
return S_OK(taskIDs)
req = "UPDATE Tasks SET Status = 'Staged' WHERE TaskID IN (%s);" % intListToString(taskIDs)
res = self._update(req)
if not res['OK']:
return res
return S_OK(taskIDs)
'''
def setTasksDone( self, taskIDs ):
""" This will update the status for a list of taskIDs to Done. """
reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setTasksDone', reqSelect, resSelect['Message'] ) )
req = "UPDATE Tasks SET Status = 'Done', CompleteTime = UTC_TIMESTAMP() WHERE TaskID IN (%s);" % intListToString( taskIDs )
res = self._update( req )
if not res['OK']:
gLogger.error( "StorageManagementDB.setTasksDone: Failed to set Tasks status to Done.", res['Message'] )
return res
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_update Tasks = %s" % ( self._caller(), 'setTasksDone', record ) )
# fix, no individual queries
reqSelect1 = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setTasksDone', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated Tasks = %s" % ( self._caller(), 'setTasksDone', record ) )
gLogger.debug( "StorageManagementDB.setTasksDone: Successfully updated %s Tasks with StageStatus=Done for taskIDs: %s." % ( res['Value'], taskIDs ) )
return res
def killTasksBySourceTaskID( self, sourceTaskIDs, connection = False ):
""" Given SourceTaskIDs (jobs), this will cancel further staging of files for the corresponding tasks.
The "cancel" is actually removing all stager DB records for these jobs.
Care must be taken to NOT cancel staging of files that are requested also by other tasks. """
connection = self.__getConnection( connection )
# get the TaskIDs
req = "SELECT TaskID from Tasks WHERE SourceTaskID IN (%s);" % intListToString( sourceTaskIDs )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
taskIDs = [ row[0] for row in res['Value'] ]
# ! Make sure to only cancel file staging for files with no relations with other tasks (jobs) but the killed ones
if taskIDs:
req = "SELECT DISTINCT(CR.ReplicaID) FROM TaskReplicas AS TR, CacheReplicas AS CR WHERE TR.TaskID IN (%s) AND CR.Links=1 and TR.ReplicaID=CR.ReplicaID;" % intListToString( taskIDs )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
replicaIDs = [ row[0] for row in res['Value'] ]
if replicaIDs:
req = "DELETE FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString ( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem removing records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
req = "DELETE FROM CacheReplicas WHERE ReplicaID in (%s) AND Links=1;" % intListToString ( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem removing records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
# Finally, remove the Task and TaskReplicas entries.
res = self.removeTasks( taskIDs, connection )
return res
def removeStageRequests( self, replicaIDs, connection = False ):
connection = self.__getConnection( connection )
req = "DELETE FROM StageRequests WHERE ReplicaID in (%s);" % intListToString( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeStageRequests. Problem removing entries from StageRequests." )
return res
return res
def removeTasks( self, taskIDs, connection = False ):
""" This will delete the entries from the TaskReplicas for the provided taskIDs. """
connection = self.__getConnection( connection )
req = "DELETE FROM TaskReplicas WHERE TaskID IN (%s);" % intListToString( taskIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeTasks. Problem removing entries from TaskReplicas." )
return res
# gLogger.info( "%s_DB:%s" % ('removeTasks',req))
reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'removeTasks', reqSelect, resSelect['Message'] ) )
else:
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_delete Tasks = %s" % ( self._caller(), 'removeTasks', record ) )
req = "DELETE FROM Tasks WHERE TaskID in (%s);" % intListToString( taskIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeTasks. Problem removing entries from Tasks." )
gLogger.verbose( "%s.%s_DB: deleted Tasks" % ( self._caller(), 'removeTasks' ) )
# gLogger.info( "%s_DB:%s" % ('removeTasks',req))
return res
def setOldTasksAsFailed( self, daysOld, connection = False ):
"""
Set Tasks older than "daysOld" number of days to Failed
These tasks have already been retried every day for staging
"""
req = "UPDATE Tasks SET Status='Failed' WHERE DATE_ADD(SubmitTime, INTERVAL %s DAY ) < UTC_TIMESTAMP();" % ( daysOld )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.setOldTasksAsFailed. Problem setting old Tasks to Failed." )
return res
return res
def getCacheReplicasSummary( self, connection = False ):
"""
Reports breakdown of file number/size in different staging states across storage elements
"""
connection = self.__getConnection( connection )
req = "SELECT DISTINCT(Status),SE,COUNT(*),sum(size)/(1024*1024*1024) FROM CacheReplicas GROUP BY Status,SE;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.getCacheReplicasSummary failed." )
return res
resSummary = {}
i = 1
for status, se, numFiles, sumFiles in res['Value']:
resSummary[i] = {'Status':status, 'SE':se, 'NumFiles':long( numFiles ), 'SumFiles':float( sumFiles )}
i += 1
return S_OK( resSummary )
def removeUnlinkedReplicas( self, connection = False ):
""" This will remove Replicas from the CacheReplicas that are not associated to any Task.
If the Replica has been Staged,
wait until StageRequest.PinExpiryTime and remove the StageRequest and CacheReplicas entries
"""
connection = self.__getConnection( connection )
# First, check if there is a StageRequest and PinExpiryTime has arrived
req = "select SR.ReplicaID from CacheReplicas CR,StageRequests SR WHERE CR.Links = 0 and CR.ReplicaID=SR.ReplicaID group by SR.ReplicaID HAVING max(SR.PinExpiryTime) < UTC_TIMESTAMP();"
# req = "SELECT ReplicaID from CacheReplicas WHERE Links = 0;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem selecting entries from CacheReplicas where Links = 0." )
return res
replicaIDs = [ row[0] for row in res['Value'] ]
# Look for Failed CacheReplicas which are not associated to any Task. These have no PinExpiryTime in StageRequests
# as they were not staged successfully (for various reasons), even though a staging request had been submitted
req = "SELECT ReplicaID FROM CacheReplicas WHERE Links = 0 AND Status = 'Failed';"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem selecting entries from CacheReplicas where Links = 0 AND Status=Failed." )
else:
replicaIDs.extend( [ row[0] for row in res['Value'] ] )
if replicaIDs:
# Removed the entries from the StageRequests table that are expired
reqSelect = "SELECT * FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'removeUnlinkedReplicas',
reqSelect, resSelect['Message'] ) )
else:
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_delete StageRequests = %s" % ( self._caller(), 'removeUnlinkedReplicas',
record ) )
req = "DELETE FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem deleting from StageRequests." )
return res
gLogger.verbose( "%s.%s_DB: deleted StageRequests" % ( self._caller(), 'removeUnlinkedReplicas' ) )
gLogger.debug( "StorageManagementDB.removeUnlinkedReplicas: Successfully removed %s StageRequests entries for ReplicaIDs: %s." % ( res['Value'], replicaIDs ) )
# Second look for CacheReplicas for which there is no entry in StageRequests
req = 'SELECT ReplicaID FROM CacheReplicas WHERE Links = 0 AND ReplicaID NOT IN ( SELECT DISTINCT( ReplicaID ) FROM StageRequests )'
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem selecting entries from CacheReplicas where Links = 0." )
else:
replicaIDs.extend( [ row[0] for row in res['Value'] ] )
if not replicaIDs:
return S_OK()
# Now delete all CacheReplicas
reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'removeUnlinkedReplicas', reqSelect, resSelect['Message'] ) )
else:
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_delete CacheReplicas = %s" % ( self._caller(), 'removeUnlinkedReplicas', record ) )
req = "DELETE FROM CacheReplicas WHERE ReplicaID IN (%s) AND Links= 0;" % intListToString( replicaIDs )
res = self._update( req, connection )
if res['OK']:
gLogger.verbose( "%s.%s_DB: deleted CacheReplicas" % ( self._caller(), 'removeUnlinkedReplicas' ) )
gLogger.debug( "StorageManagementDB.removeUnlinkedReplicas: Successfully removed %s CacheReplicas entries for ReplicaIDs: %s." % ( res['Value'], replicaIDs ) )
else:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem removing entries from CacheReplicas." )
return res
| gpl-3.0 |
splice/splice-server | dev_setup.py | 1 | 7847 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
# This script is intended to be run from a git checkout of RCS
# it will create symlinks and update paths as needed so RCS can be
# run without needing to install the RPM.
# Script is based on 'pulp-dev.py' from pulpproject.org
import optparse
import os
import shlex
import shutil
import sys
import subprocess
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DJANGO_APP_DIR = os.path.join(BASE_DIR, "src", "splice")
WARNING_COLOR = '\033[31m'
WARNING_RESET = '\033[0m'
#
# Below are directories we will ensure exist
#
DIRS = ("/etc/splice",
"/etc/splice/celery",
"/etc/pki/splice",
"/var/lib/splice",
"/var/log/splice",
"/var/log/splice/celery",
"/srv/splice",
)
#
# Below are files we will symlink from the git checkout to local system settings
#
LINKS = (
('etc/httpd/conf.d/splice.conf', '/etc/httpd/conf.d/splice.conf'),
('etc/pki/splice/Splice_testing_root_CA.crt', '/etc/pki/splice/Splice_testing_root_CA.crt'),
('etc/pki/splice/Splice_testing_root_CA.key', '/etc/pki/splice/Splice_testing_root_CA.key'),
('etc/rc.d/init.d/splice_celerybeat', '/etc/rc.d/init.d/splice_celerybeat'),
('etc/rc.d/init.d/splice_celeryd', '/etc/rc.d/init.d/splice_celeryd'),
('etc/rc.d/init.d/splice_all', '/etc/rc.d/init.d/splice_all'),
('etc/splice/celery/celerybeat', '/etc/splice/celery/celerybeat'),
('etc/splice/logging', '/etc/splice/logging'),
('etc/splice/splice.conf', '/etc/splice/splice.conf'),
('etc/splice/conf.d', '/etc/splice/conf.d'),
('srv/splice/webservices.wsgi', '/srv/splice/webservices.wsgi'),
)
#
# Below are files we copy from the git checkout to a local location and then modify
#
LOCAL_DEV_COPIES = (
('etc/splice/celery/celeryd', '/etc/splice/celery/celeryd'),
)
def parse_cmdline():
"""
Parse and validate the command line options.
"""
parser = optparse.OptionParser()
parser.add_option('-I', '--install', action='store_true', help='install splice development files')
parser.add_option('-U', '--uninstall', action='store_true', help='uninstall splice development files')
parser.set_defaults(install=False, uninstall=False)
opts, args = parser.parse_args()
if opts.install and opts.uninstall:
parser.error('both install and uninstall specified')
if not (opts.install or opts.uninstall):
parser.error('neither install or uninstall specified')
return (opts, args)
def warning(msg):
print "%s%s%s" % (WARNING_COLOR, msg, WARNING_RESET)
def debug(msg):
sys.stderr.write('%s\n' % msg)
def create_dirs(opts):
for d in DIRS:
if os.path.exists(d) and os.path.isdir(d):
debug('skipping %s exists' % d)
continue
debug('creating directory: %s' % d)
os.makedirs(d, 0777)
def getlinks():
links = []
for l in LINKS:
if isinstance(l, (list, tuple)):
src = l[0]
dst = l[1]
else:
src = l
dst = os.path.join('/', l)
links.append((src, dst))
return links
def install(opts):
warnings = []
create_splice_user()
create_dirs(opts)
currdir = os.path.abspath(os.path.dirname(__file__))
for src, dst in getlinks():
warning_msg = create_link(opts, os.path.join(currdir,src), dst)
if warning_msg:
warnings.append(warning_msg)
warnings.extend(create_local_copies())
if warnings:
print "\n***\nPossible problems: Please read below\n***"
for w in warnings:
warning(w)
update_celeryd_config()
update_permissions()
return os.EX_OK
def uninstall(opts):
for src, dst in getlinks():
debug('removing link: %s' % dst)
if not os.path.islink(dst):
debug('%s does not exist, skipping' % dst)
continue
os.unlink(dst)
return os.EX_OK
def create_link(opts, src, dst):
if not os.path.lexists(dst):
return _create_link(opts, src, dst)
if not os.path.islink(dst):
return "[%s] is not a symbolic link as we expected, please adjust if this is not what you intended." % (dst)
if not os.path.exists(os.readlink(dst)):
warning('BROKEN LINK: [%s] attempting to delete and fix it to point to %s.' % (dst, src))
try:
os.unlink(dst)
return _create_link(opts, src, dst)
except:
msg = "[%s] was a broken symlink, failed to delete and relink to [%s], please fix this manually" % (dst, src)
return msg
debug('verifying link: %s points to %s' % (dst, src))
dst_stat = os.stat(dst)
src_stat = os.stat(src)
if dst_stat.st_ino != src_stat.st_ino:
msg = "[%s] is pointing to [%s] which is different than the intended target [%s]" % (dst, os.readlink(dst), src)
return msg
def _create_link(opts, src, dst):
debug('creating link: %s pointing to %s' % (dst, src))
try:
os.symlink(src, dst)
except OSError, e:
msg = "Unable to create symlink for [%s] pointing to [%s], received error: <%s>" % (dst, src, e)
return msg
def create_local_copies():
warnings = []
currdir = os.path.abspath(os.path.dirname(__file__))
for src, dst in LOCAL_DEV_COPIES:
warning_msg = copy_file(src, dst)
if warning_msg:
warnings.append(warning_msg)
return warnings
def copy_file(src, dst):
if os.path.exists(dst):
debug("Skipping copy of [%s] to [%s] since [%s] already exists." % (src, dst, dst))
return
try:
debug("Copying [%s] to [%s]" % (src, dst))
shutil.copyfile(src, dst)
except OSError, e:
msg = "Unable to copy [%s] to [%s], received error: <%s>" % (src, dst, e)
return msg
return
def update_celeryd_config():
# Update celeryd configuration
django_dir = DJANGO_APP_DIR.replace("/", "\/")
cmd = "sed -i 's/^CELERYD_CHDIR=.*/CELERYD_CHDIR=%s/' %s" % (django_dir, '/etc/splice/celery/celeryd')
run_command(cmd)
def create_splice_user():
os.system("getent group splice >/dev/null || groupadd -r splice")
os.system("getent passwd splice >/dev/null || useradd -r -g splice -G apache -d /var/lib/splice -s /sbin/nologin -c 'splice user' splice")
def update_permissions():
cmd = "chown -R apache:splice /var/log/splice"
run_command(cmd)
cmd = "chmod -R g+rwX /var/log/splice"
run_command(cmd)
def run_command(cmd, verbose=True):
if verbose:
print "Running: %s" % (cmd)
if isinstance(cmd, str):
cmd = shlex.split(cmd.encode('ascii', 'ignore'))
handle = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_msg, err_msg = handle.communicate(None)
if handle.returncode != 0:
print "Error running: %s" % (cmd)
print "stdout:\n%s" % (out_msg)
print "stderr:\n%s" % (err_msg)
return False
return True, out_msg, err_msg
if __name__ == '__main__':
opts, args = parse_cmdline()
if opts.install:
sys.exit(install(opts))
if opts.uninstall:
sys.exit(uninstall(opts))
| gpl-2.0 |
NikNitro/Python-iBeacon-Scan | sympy/polys/tests/test_heuristicgcd.py | 126 | 3554 | from sympy.polys.rings import ring
from sympy.polys.domains import ZZ
from sympy.polys.heuristicgcd import heugcd
def test_heugcd_univariate_integers():
R, x = ring("x", ZZ)
f = x**4 + 8*x**3 + 21*x**2 + 22*x + 8
g = x**3 + 6*x**2 + 11*x + 6
h = x**2 + 3*x + 2
cff = x**2 + 5*x + 4
cfg = x + 3
assert heugcd(f, g) == (h, cff, cfg)
f = x**4 - 4
g = x**4 + 4*x**2 + 4
h = x**2 + 2
cff = x**2 - 2
cfg = x**2 + 2
assert heugcd(f, g) == (h, cff, cfg)
f = x**8 + x**6 - 3*x**4 - 3*x**3 + 8*x**2 + 2*x - 5
g = 3*x**6 + 5*x**4 - 4*x**2 - 9*x + 21
h = 1
cff = f
cfg = g
assert heugcd(f, g) == (h, cff, cfg)
f = - 352518131239247345597970242177235495263669787845475025293906825864749649589178600387510272*x**49 \
+ 46818041807522713962450042363465092040687472354933295397472942006618953623327997952*x**42 \
+ 378182690892293941192071663536490788434899030680411695933646320291525827756032*x**35 \
+ 112806468807371824947796775491032386836656074179286744191026149539708928*x**28 \
- 12278371209708240950316872681744825481125965781519138077173235712*x**21 \
+ 289127344604779611146960547954288113529690984687482920704*x**14 \
+ 19007977035740498977629742919480623972236450681*x**7 \
+ 311973482284542371301330321821976049
g = 365431878023781158602430064717380211405897160759702125019136*x**21 \
+ 197599133478719444145775798221171663643171734081650688*x**14 \
- 9504116979659010018253915765478924103928886144*x**7 \
- 311973482284542371301330321821976049
# TODO: assert heugcd(f, f.diff(x))[0] == g
f = 1317378933230047068160*x + 2945748836994210856960
g = 120352542776360960*x + 269116466014453760
h = 120352542776360960*x + 269116466014453760
cff = 10946
cfg = 1
assert heugcd(f, g) == (h, cff, cfg)
def test_heugcd_multivariate_integers():
R, x, y = ring("x,y", ZZ)
f, g = 2*x**2 + 4*x + 2, x + 1
assert heugcd(f, g) == (x + 1, 2*x + 2, 1)
f, g = x + 1, 2*x**2 + 4*x + 2
assert heugcd(f, g) == (x + 1, 1, 2*x + 2)
R, x, y, z, u = ring("x,y,z,u", ZZ)
f, g = u**2 + 2*u + 1, 2*u + 2
assert heugcd(f, g) == (u + 1, u + 1, 2)
f, g = z**2*u**2 + 2*z**2*u + z**2 + z*u + z, u**2 + 2*u + 1
h, cff, cfg = u + 1, z**2*u + z**2 + z, u + 1
assert heugcd(f, g) == (h, cff, cfg)
assert heugcd(g, f) == (h, cfg, cff)
R, x, y, z = ring("x,y,z", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = heugcd(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, u, v = ring("x,y,z,u,v", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = heugcd(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, u, v, a, b = ring("x,y,z,u,v,a,b", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = heugcd(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, u, v, a, b, c, d = ring("x,y,z,u,v,a,b,c,d", ZZ)
f, g, h = R.fateman_poly_F_1()
H, cff, cfg = heugcd(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z = ring("x,y,z", ZZ)
f, g, h = R.fateman_poly_F_2()
H, cff, cfg = heugcd(f, g)
assert H == h and H*cff == f and H*cfg == g
f, g, h = R.fateman_poly_F_3()
H, cff, cfg = heugcd(f, g)
assert H == h and H*cff == f and H*cfg == g
R, x, y, z, t = ring("x,y,z,t", ZZ)
f, g, h = R.fateman_poly_F_3()
H, cff, cfg = heugcd(f, g)
assert H == h and H*cff == f and H*cfg == g
| gpl-3.0 |
spasovski/zamboni | apps/devhub/tests/test_views_perf.py | 7 | 2325 | # -*- coding: utf8 -*-
import json
from mock import patch
from nose.tools import eq_
from addons.models import Addon
from amo.urlresolvers import reverse
import amo.tests
from files.models import Platform
class TestPerfViews(amo.tests.TestCase):
fixtures = ['base/apps', 'base/users', 'base/platforms',
'base/addon_3615']
def setUp(self):
super(TestPerfViews, self).setUp()
assert self.client.login(username='[email protected]', password='password')
addon = Addon.objects.get(pk=3615)
self.file = addon.latest_version.files.get()
self.patches = [patch('waffle.flag_is_active'),
patch('waffle.helpers.flag_is_active')]
for p in self.patches:
p.start().return_value = True
p = patch('devhub.perf.start_perf_test')
self.perf_test = p.start()
self.patches.append(p)
self.perf_calls = None
def tearDown(self):
super(TestPerfViews, self).tearDown()
for p in self.patches:
p.stop()
def assert_call(self, expected_call):
if not self.perf_calls:
self.perf_calls = [tuple(c) for c in
self.perf_test.call_args_list]
assert expected_call in self.perf_calls, (
'Call was not made: %s' % str(expected_call))
def start(self):
re = self.client.get(reverse('devhub.file_perf_tests_start',
args=[self.file.version.addon.id, self.file.id]),
follow=True)
eq_(re.status_code, 200)
return json.loads(re.content)
def set_platform(self, platform):
self.file.update(platform=Platform.objects.get(pk=platform.id))
def test_start_linux(self):
self.set_platform(amo.PLATFORM_LINUX)
re = self.start()
eq_(re, {'success': True})
self.assert_call(((self.file, 'linux', 'firefox3.6'), {}))
self.assert_call(((self.file, 'linux', 'firefox6.0'), {}))
def test_start_all(self):
self.set_platform(amo.PLATFORM_ALL)
self.start()
self.assert_call(((self.file, 'linux', 'firefox6.0'), {}))
def test_unsupported_plat(self):
self.set_platform(amo.PLATFORM_ANDROID)
eq_(self.start(), {'success': False})
| bsd-3-clause |
bastik/youtube-dl | youtube_dl/extractor/nova.py | 114 | 7044 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
unified_strdate,
)
class NovaIE(InfoExtractor):
IE_DESC = 'TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz'
_VALID_URL = 'http://(?:[^.]+\.)?(?P<site>tv(?:noviny)?|tn|novaplus|vymena|fanda|krasna|doma|prask)\.nova\.cz/(?:[^/]+/)+(?P<id>[^/]+?)(?:\.html|/|$)'
_TESTS = [{
'url': 'http://tvnoviny.nova.cz/clanek/novinky/co-na-sebe-sportaci-praskli-vime-jestli-pujde-hrdlicka-na-materskou.html?utm_source=tvnoviny&utm_medium=cpfooter&utm_campaign=novaplus',
'info_dict': {
'id': '1608920',
'display_id': 'co-na-sebe-sportaci-praskli-vime-jestli-pujde-hrdlicka-na-materskou',
'ext': 'flv',
'title': 'Duel: Michal Hrdlička a Petr Suchoň',
'description': 'md5:d0cc509858eee1b1374111c588c6f5d5',
'thumbnail': 're:^https?://.*\.(?:jpg)',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://tn.nova.cz/clanek/tajemstvi-ukryte-v-podzemi-specialni-nemocnice-v-prazske-krci.html#player_13260',
'md5': '1dd7b9d5ea27bc361f110cd855a19bd3',
'info_dict': {
'id': '1757139',
'display_id': 'tajemstvi-ukryte-v-podzemi-specialni-nemocnice-v-prazske-krci',
'ext': 'mp4',
'title': 'Podzemní nemocnice v pražské Krči',
'description': 'md5:f0a42dd239c26f61c28f19e62d20ef53',
'thumbnail': 're:^https?://.*\.(?:jpg)',
}
}, {
'url': 'http://novaplus.nova.cz/porad/policie-modrava/video/5591-policie-modrava-15-dil-blondynka-na-hrbitove',
'info_dict': {
'id': '1756825',
'display_id': '5591-policie-modrava-15-dil-blondynka-na-hrbitove',
'ext': 'flv',
'title': 'Policie Modrava - 15. díl - Blondýnka na hřbitově',
'description': 'md5:dc24e50be5908df83348e50d1431295e', # Make sure this description is clean of html tags
'thumbnail': 're:^https?://.*\.(?:jpg)',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://novaplus.nova.cz/porad/televizni-noviny/video/5585-televizni-noviny-30-5-2015/',
'info_dict': {
'id': '1756858',
'ext': 'flv',
'title': 'Televizní noviny - 30. 5. 2015',
'thumbnail': 're:^https?://.*\.(?:jpg)',
'upload_date': '20150530',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://fanda.nova.cz/clanek/fun-and-games/krvavy-epos-zaklinac-3-divoky-hon-vychazi-vyhrajte-ho-pro-sebe.html',
'info_dict': {
'id': '1753621',
'ext': 'mp4',
'title': 'Zaklínač 3: Divoký hon',
'description': 're:.*Pokud se stejně jako my nemůžete.*',
'thumbnail': 're:https?://.*\.jpg(\?.*)?',
'upload_date': '20150521',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://sport.tn.nova.cz/clanek/sport/hokej/nhl/zivot-jde-dal-hodnotil-po-vyrazeni-z-playoff-jiri-sekac.html',
'only_matching': True,
}, {
'url': 'http://fanda.nova.cz/clanek/fun-and-games/krvavy-epos-zaklinac-3-divoky-hon-vychazi-vyhrajte-ho-pro-sebe.html',
'only_matching': True,
}, {
'url': 'http://doma.nova.cz/clanek/zdravi/prijdte-se-zapsat-do-registru-kostni-drene-jiz-ve-stredu-3-cervna.html',
'only_matching': True,
}, {
'url': 'http://prask.nova.cz/clanek/novinky/co-si-na-sobe-nase-hvezdy-nechaly-pojistit.html',
'only_matching': True,
}, {
'url': 'http://tv.nova.cz/clanek/novinky/zivot-je-zivot-bondovsky-trailer.html',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
site = mobj.group('site')
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
[r"(?:media|video_id)\s*:\s*'(\d+)'",
r'media=(\d+)',
r'id="article_video_(\d+)"',
r'id="player_(\d+)"'],
webpage, 'video id')
config_url = self._search_regex(
r'src="(http://tn\.nova\.cz/bin/player/videojs/config\.php\?[^"]+)"',
webpage, 'config url', default=None)
if not config_url:
DEFAULT_SITE_ID = '23000'
SITES = {
'tvnoviny': DEFAULT_SITE_ID,
'novaplus': DEFAULT_SITE_ID,
'vymena': DEFAULT_SITE_ID,
'krasna': DEFAULT_SITE_ID,
'fanda': '30',
'tn': '30',
'doma': '30',
}
site_id = self._search_regex(
r'site=(\d+)', webpage, 'site id', default=None) or SITES.get(site, DEFAULT_SITE_ID)
config_url = ('http://tn.nova.cz/bin/player/videojs/config.php?site=%s&media=%s&jsVar=vjsconfig'
% (site_id, video_id))
config = self._download_json(
config_url, display_id,
'Downloading config JSON',
transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1])
mediafile = config['mediafile']
video_url = mediafile['src']
m = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>[^/]+?))/&*(?P<playpath>.+)$', video_url)
if m:
formats = [{
'url': m.group('url'),
'app': m.group('app'),
'play_path': m.group('playpath'),
'player_path': 'http://tvnoviny.nova.cz/static/shared/app/videojs/video-js.swf',
'ext': 'flv',
}]
else:
formats = [{
'url': video_url,
}]
self._sort_formats(formats)
title = mediafile.get('meta', {}).get('title') or self._og_search_title(webpage)
description = clean_html(self._og_search_description(webpage, default=None))
thumbnail = config.get('poster')
if site == 'novaplus':
upload_date = unified_strdate(self._search_regex(
r'(\d{1,2}-\d{1,2}-\d{4})$', display_id, 'upload date', default=None))
elif site == 'fanda':
upload_date = unified_strdate(self._search_regex(
r'<span class="date_time">(\d{1,2}\.\d{1,2}\.\d{4})', webpage, 'upload date', default=None))
else:
upload_date = None
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'upload_date': upload_date,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense |
mpetyx/palmdrop | venv/lib/python2.7/site-packages/PIL/ImageCms.py | 40 | 31153 | #
# The Python Imaging Library.
# $Id$
#
# optional color managment support, based on Kevin Cazabon's PyCMS
# library.
#
# History:
# 2009-03-08 fl Added to PIL.
#
# Copyright (C) 2002-2003 Kevin Cazabon
# Copyright (c) 2009 by Fredrik Lundh
#
# See the README file for information on usage and redistribution. See
# below for the original description.
#
DESCRIPTION = """
pyCMS
a Python / PIL interface to the littleCMS ICC Color Management System
Copyright (C) 2002-2003 Kevin Cazabon
[email protected]
http://www.cazabon.com
pyCMS home page: http://www.cazabon.com/pyCMS
littleCMS home page: http://www.littlecms.com
(littleCMS is Copyright (C) 1998-2001 Marti Maria)
Originally released under LGPL. Graciously donated to PIL in
March 2009, for distribution under the standard PIL license
The pyCMS.py module provides a "clean" interface between Python/PIL and
pyCMSdll, taking care of some of the more complex handling of the direct
pyCMSdll functions, as well as error-checking and making sure that all
relevant data is kept together.
While it is possible to call pyCMSdll functions directly, it's not highly
recommended.
Version History:
0.1.0 pil mod March 10, 2009
Renamed display profile to proof profile. The proof
profile is the profile of the device that is being
simulated, not the profile of the device which is
actually used to display/print the final simulation
(that'd be the output profile) - also see LCMSAPI.txt
input colorspace -> using 'renderingIntent' -> proof
colorspace -> using 'proofRenderingIntent' -> output
colorspace
Added LCMS FLAGS support.
Added FLAGS["SOFTPROOFING"] as default flag for
buildProofTransform (otherwise the proof profile/intent
would be ignored).
0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms
0.0.2 alpha Jan 6, 2002
Added try/except statements arount type() checks of
potential CObjects... Python won't let you use type()
on them, and raises a TypeError (stupid, if you ask me!)
Added buildProofTransformFromOpenProfiles() function.
Additional fixes in DLL, see DLL code for details.
0.0.1 alpha first public release, Dec. 26, 2002
Known to-do list with current version (of Python interface, not pyCMSdll):
none
"""
VERSION = "0.1.0 pil"
# --------------------------------------------------------------------.
import Image
import _imagingcms
core = _imagingcms
#
# intent/direction values
INTENT_PERCEPTUAL = 0
INTENT_RELATIVE_COLORIMETRIC = 1
INTENT_SATURATION = 2
INTENT_ABSOLUTE_COLORIMETRIC = 3
DIRECTION_INPUT = 0
DIRECTION_OUTPUT = 1
DIRECTION_PROOF = 2
#
# flags
FLAGS = {
"MATRIXINPUT": 1,
"MATRIXOUTPUT": 2,
"MATRIXONLY": (1|2),
"NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot
"NOPRELINEARIZATION": 16, # Don't create prelinearization tables on precalculated transforms (internal use)
"GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink)
"NOTCACHE": 64, # Inhibit 1-pixel cache
"NOTPRECALC": 256,
"NULLTRANSFORM": 512, # Don't transform anyway
"HIGHRESPRECALC": 1024, # Use more memory to give better accurancy
"LOWRESPRECALC": 2048, # Use less memory to minimize resouces
"WHITEBLACKCOMPENSATION": 8192,
"BLACKPOINTCOMPENSATION": 8192,
"GAMUTCHECK": 4096, # Out of Gamut alarm
"SOFTPROOFING": 16384, # Do softproofing
"PRESERVEBLACK": 32768, # Black preservation
"NODEFAULTRESOURCEDEF": 16777216, # CRD special
"GRIDPOINTS": lambda n: ((n) & 0xFF) << 16 # Gridpoints
}
_MAX_FLAG = 0
for flag in FLAGS.values():
if isinstance(flag, type(0)):
_MAX_FLAG = _MAX_FLAG | flag
# --------------------------------------------------------------------.
# Experimental PIL-level API
# --------------------------------------------------------------------.
##
# Profile.
class ImageCmsProfile:
def __init__(self, profile):
# accepts a string (filename), a file-like object, or a low-level
# profile object
if Image.isStringType(profile):
self._set(core.profile_open(profile), profile)
elif hasattr(profile, "read"):
self._set(core.profile_fromstring(profile.read()))
else:
self._set(profile) # assume it's already a profile
def _set(self, profile, filename=None):
self.profile = profile
self.filename = filename
if profile:
self.product_name = profile.product_name
self.product_info = profile.product_info
else:
self.product_name = None
self.product_info = None
##
# Transform. This can be used with the procedural API, or with the
# standard {@link Image.point} method.
class ImageCmsTransform(Image.ImagePointHandler):
def __init__(self, input, output, input_mode, output_mode,
intent=INTENT_PERCEPTUAL,
proof=None, proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, flags=0):
if proof is None:
self.transform = core.buildTransform(
input.profile, output.profile,
input_mode, output_mode,
intent,
flags
)
else:
self.transform = core.buildProofTransform(
input.profile, output.profile, proof.profile,
input_mode, output_mode,
intent, proof_intent,
flags
)
# Note: inputMode and outputMode are for pyCMS compatibility only
self.input_mode = self.inputMode = input_mode
self.output_mode = self.outputMode = output_mode
def point(self, im):
return self.apply(im)
def apply(self, im, imOut=None):
im.load()
if imOut is None:
imOut = Image.new(self.output_mode, im.size, None)
result = self.transform.apply(im.im.id, imOut.im.id)
return imOut
def apply_in_place(self, im):
im.load()
if im.mode != self.output_mode:
raise ValueError("mode mismatch") # wrong output mode
result = self.transform.apply(im.im.id, im.im.id)
return im
##
# (experimental) Fetches the profile for the current display device.
# Returns None if the profile is not known.
def get_display_profile(handle=None):
import sys
if sys.platform == "win32":
import ImageWin
if isinstance(handle, ImageWin.HDC):
profile = core.get_display_profile_win32(handle, 1)
else:
profile = core.get_display_profile_win32(handle or 0)
else:
try:
get = _imagingcms.get_display_profile
except AttributeError:
return None
else:
profile = get()
return ImageCmsProfile(profile)
# --------------------------------------------------------------------.
# pyCMS compatible layer
# --------------------------------------------------------------------.
##
# (pyCMS) Exception class. This is used for all errors in the pyCMS API.
class PyCMSError(Exception):
pass
##
# (pyCMS) Applies an ICC transformation to a given image, mapping from
# inputProfile to outputProfile.
def profileToProfile(im, inputProfile, outputProfile, renderingIntent=INTENT_PERCEPTUAL, outputMode=None, inPlace=0, flags=0):
"""
ImageCms.profileToProfile(im, inputProfile, outputProfile,
[renderingIntent], [outputMode], [inPlace])
Returns either None or a new PIL image object, depending on value of
inPlace (see below).
im = an open PIL image object (i.e. Image.new(...) or
Image.open(...), etc.)
inputProfile = string, as a valid filename path to the ICC input
profile you wish to use for this image, or a profile object
outputProfile = string, as a valid filename path to the ICC output
profile you wish to use for this image, or a profile object
renderingIntent = integer (0-3) specifying the rendering intent you
wish to use for the transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC =1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC =3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and
what they do.
outputMode = a valid PIL mode for the output image (i.e. "RGB", "CMYK",
etc.). Note: if rendering the image "inPlace", outputMode MUST be
the same mode as the input, or omitted completely. If omitted, the
outputMode will be the same as the mode of the input image (im.mode)
inPlace = BOOL (1 = TRUE, None or 0 = FALSE). If TRUE, the original
image is modified in-place, and None is returned. If FALSE
(default), a new Image object is returned with the transform
applied.
flags = integer (0-...) specifying additional flags
If the input or output profiles specified are not valid filenames, a
PyCMSError will be raised. If inPlace == TRUE and outputMode != im.mode,
a PyCMSError will be raised. If an error occurs during application of
the profiles, a PyCMSError will be raised. If outputMode is not a mode
supported by the outputProfile (or by pyCMS), a PyCMSError will be
raised.
This function applies an ICC transformation to im from inputProfile's
color space to outputProfile's color space using the specified rendering
intent to decide how to handle out-of-gamut colors.
OutputMode can be used to specify that a color mode conversion is to
be done using these profiles, but the specified profiles must be able
to handle that mode. I.e., if converting im from RGB to CMYK using
profiles, the input profile must handle RGB data, and the output
profile must handle CMYK data.
"""
if outputMode is None:
outputMode = im.mode
if type(renderingIntent) != type(1) or not (0 <= renderingIntent <=3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if type(flags) != type(1) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
transform = ImageCmsTransform(
inputProfile, outputProfile, im.mode, outputMode, renderingIntent, flags=flags
)
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (IOError, TypeError, ValueError), v:
raise PyCMSError(v)
return imOut
##
# (pyCMS) Opens an ICC profile file.
def getOpenProfile(profileFilename):
"""
ImageCms.getOpenProfile(profileFilename)
Returns a CmsProfile class object.
profileFilename = string, as a valid filename path to the ICC profile
you wish to open, or a file-like object.
The PyCMSProfile object can be passed back into pyCMS for use in creating
transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
If profileFilename is not a vaild filename for an ICC profile, a
PyCMSError will be raised.
"""
try:
return ImageCmsProfile(profileFilename)
except (IOError, TypeError, ValueError), v:
raise PyCMSError(v)
##
# (pyCMS) Builds an ICC transform mapping from the inputProfile to the
# outputProfile. Use applyTransform to apply the transform to a given
# image.
def buildTransform(inputProfile, outputProfile, inMode, outMode, renderingIntent=INTENT_PERCEPTUAL, flags=0):
"""
ImageCms.buildTransform(inputProfile, outputProfile, inMode, outMode,
[renderingIntent])
Returns a CmsTransform class object.
inputProfile = string, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
outputProfile = string, as a valid filename path to the ICC output
profile you wish to use for this transform, or a profile object
inMode = string, as a valid PIL mode that the appropriate profile also
supports (i.e. "RGB", "RGBA", "CMYK", etc.)
outMode = string, as a valid PIL mode that the appropriate profile also
supports (i.e. "RGB", "RGBA", "CMYK", etc.)
renderingIntent = integer (0-3) specifying the rendering intent you
wish to use for the transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC =1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC =3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and
what they do.
flags = integer (0-...) specifying additional flags
If the input or output profiles specified are not valid filenames, a
PyCMSError will be raised. If an error occurs during creation of the
transform, a PyCMSError will be raised.
If inMode or outMode are not a mode supported by the outputProfile (or
by pyCMS), a PyCMSError will be raised.
This function builds and returns an ICC transform from the inputProfile
to the outputProfile using the renderingIntent to determine what to do
with out-of-gamut colors. It will ONLY work for converting images that
are in inMode to images that are in outMode color format (PIL mode,
i.e. "RGB", "RGBA", "CMYK", etc.).
Building the transform is a fair part of the overhead in
ImageCms.profileToProfile(), so if you're planning on converting multiple
images using the same input/output settings, this can save you time.
Once you have a transform object, it can be used with
ImageCms.applyProfile() to convert images without the need to re-compute
the lookup table for the transform.
The reason pyCMS returns a class object rather than a handle directly
to the transform is that it needs to keep track of the PIL input/output
modes that the transform is meant for. These attributes are stored in
the "inMode" and "outMode" attributes of the object (which can be
manually overridden if you really want to, but I don't know of any
time that would be of use, or would even work).
"""
if type(renderingIntent) != type(1) or not (0 <= renderingIntent <=3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if type(flags) != type(1) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
return ImageCmsTransform(inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags)
except (IOError, TypeError, ValueError), v:
raise PyCMSError(v)
##
# (pyCMS) Builds an ICC transform mapping from the inputProfile to the
# outputProfile, but tries to simulate the result that would be
# obtained on the proofProfile device.
def buildProofTransform(inputProfile, outputProfile, proofProfile, inMode, outMode, renderingIntent=INTENT_PERCEPTUAL, proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC, flags=FLAGS["SOFTPROOFING"]):
"""
ImageCms.buildProofTransform(inputProfile, outputProfile, proofProfile,
inMode, outMode, [renderingIntent], [proofRenderingIntent])
Returns a CmsTransform class object.
inputProfile = string, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
outputProfile = string, as a valid filename path to the ICC output
(monitor, usually) profile you wish to use for this transform,
or a profile object
proofProfile = string, as a valid filename path to the ICC proof
profile you wish to use for this transform, or a profile object
inMode = string, as a valid PIL mode that the appropriate profile also
supports (i.e. "RGB", "RGBA", "CMYK", etc.)
outMode = string, as a valid PIL mode that the appropriate profile also
supports (i.e. "RGB", "RGBA", "CMYK", etc.)
renderingIntent = integer (0-3) specifying the rendering intent you
wish to use for the input->proof (simulated) transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC =1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC =3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and
what they do.
proofRenderingIntent = integer (0-3) specifying the rendering intent
you wish to use for proof->output transform
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC =1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC =3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and
what they do.
flags = integer (0-...) specifying additional flags
If the input, output, or proof profiles specified are not valid
filenames, a PyCMSError will be raised.
If an error occurs during creation of the transform, a PyCMSError will
be raised.
If inMode or outMode are not a mode supported by the outputProfile
(or by pyCMS), a PyCMSError will be raised.
This function builds and returns an ICC transform from the inputProfile
to the outputProfile, but tries to simulate the result that would be
obtained on the proofProfile device using renderingIntent and
proofRenderingIntent to determine what to do with out-of-gamut
colors. This is known as "soft-proofing". It will ONLY work for
converting images that are in inMode to images that are in outMode
color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
Usage of the resulting transform object is exactly the same as with
ImageCms.buildTransform().
Proof profiling is generally used when using an output device to get a
good idea of what the final printed/displayed image would look like on
the proofProfile device when it's quicker and easier to use the
output device for judging color. Generally, this means that the
output device is a monitor, or a dye-sub printer (etc.), and the simulated
device is something more expensive, complicated, or time consuming
(making it difficult to make a real print for color judgement purposes).
Soft-proofing basically functions by adjusting the colors on the
output device to match the colors of the device being simulated. However,
when the simulated device has a much wider gamut than the output
device, you may obtain marginal results.
"""
if type(renderingIntent) != type(1) or not (0 <= renderingIntent <=3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if type(flags) != type(1) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
if not isinstance(proofProfile, ImageCmsProfile):
proofProfile = ImageCmsProfile(proofProfile)
return ImageCmsTransform(inputProfile, outputProfile, inMode, outMode, renderingIntent, proofProfile, proofRenderingIntent, flags)
except (IOError, TypeError, ValueError), v:
raise PyCMSError(v)
buildTransformFromOpenProfiles = buildTransform
buildProofTransformFromOpenProfiles = buildProofTransform
##
# (pyCMS) Applies a transform to a given image.
def applyTransform(im, transform, inPlace=0):
"""
ImageCms.applyTransform(im, transform, [inPlace])
Returns either None, or a new PIL Image object, depending on the value
of inPlace (see below)
im = a PIL Image object, and im.mode must be the same as the inMode
supported by the transform.
transform = a valid CmsTransform class object
inPlace = BOOL (1 == TRUE, 0 or None == FALSE). If TRUE, im is
modified in place and None is returned, if FALSE, a new Image
object with the transform applied is returned (and im is not
changed). The default is FALSE.
If im.mode != transform.inMode, a PyCMSError is raised.
If inPlace == TRUE and transform.inMode != transform.outMode, a
PyCMSError is raised.
If im.mode, transfer.inMode, or transfer.outMode is not supported by
pyCMSdll or the profiles you used for the transform, a PyCMSError is
raised.
If an error occurs while the transform is being applied, a PyCMSError
is raised.
This function applies a pre-calculated transform (from
ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles()) to an
image. The transform can be used for multiple images, saving
considerable calcuation time if doing the same conversion multiple times.
If you want to modify im in-place instead of receiving a new image as
the return value, set inPlace to TRUE. This can only be done if
transform.inMode and transform.outMode are the same, because we can't
change the mode in-place (the buffer sizes for some modes are
different). The default behavior is to return a new Image object of
the same dimensions in mode transform.outMode.
"""
try:
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (TypeError, ValueError), v:
raise PyCMSError(v)
return imOut
##
# (pyCMS) Creates a profile.
def createProfile(colorSpace, colorTemp=-1):
"""
ImageCms.createProfile(colorSpace, [colorTemp])
Returns a CmsProfile class object
colorSpace = string, the color space of the profile you wish to create.
Currently only "LAB", "XYZ", and "sRGB" are supported.
colorTemp = positive integer for the white point for the profile, in
degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for
D50 illuminant if omitted (5000k). colorTemp is ONLY applied to
LAB profiles, and is ignored for XYZ and sRGB.
If colorSpace not in ["LAB", "XYZ", "sRGB"], a PyCMSError is raised
If using LAB and colorTemp != a positive integer, a PyCMSError is raised.
If an error occurs while creating the profile, a PyCMSError is raised.
Use this function to create common profiles on-the-fly instead of
having to supply a profile on disk and knowing the path to it. It
returns a normal CmsProfile object that can be passed to
ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
to images.
"""
if colorSpace not in ["LAB", "XYZ", "sRGB"]:
raise PyCMSError("Color space not supported for on-the-fly profile creation (%s)" % colorSpace)
if colorSpace == "LAB":
if type(colorTemp) == type(5000.0):
colorTemp = int(colorTemp + 0.5)
if type (colorTemp) != type (5000):
raise PyCMSError("Color temperature must be a positive integer, \"%s\" not valid" % colorTemp)
try:
return core.createProfile(colorSpace, colorTemp)
except (TypeError, ValueError), v:
raise PyCMSError(v)
##
# (pyCMS) Gets the internal product name for the given profile.
def getProfileName(profile):
"""
ImageCms.getProfileName(profile)
Returns a string containing the internal name of the profile as stored
in an ICC tag.
profile = EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised If an error occurs while trying to obtain the
name tag, a PyCMSError is raised.
Use this function to obtain the INTERNAL name of the profile (stored
in an ICC tag in the profile itself), usually the one used when the
profile was originally created. Sometimes this tag also contains
additional information supplied by the creator.
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_name + "\n"
except (AttributeError, IOError, TypeError, ValueError), v:
raise PyCMSError(v)
##
# (pyCMS) Gets the internal product information for the given profile.
def getProfileInfo(profile):
"""
ImageCms.getProfileInfo(profile)
Returns a string containing the internal profile information stored in
an ICC tag.
profile = EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the info tag, a PyCMSError
is raised
Use this function to obtain the information stored in the profile's
info tag. This often contains details about the profile, and how it
was created, as supplied by the creator.
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# add an extra newline to preserve pyCMS compatibility
return profile.product_info + "\n"
except (AttributeError, IOError, TypeError, ValueError), v:
raise PyCMSError(v)
##
# (pyCMS) Gets the default intent name for the given profile.
def getDefaultIntent(profile):
"""
ImageCms.getDefaultIntent(profile)
Returns integer 0-3 specifying the default rendering intent for this
profile.
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC =1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC =3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and
what they do.
profile = EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
If profile isn't a valid CmsProfile object or filename to a profile,
a PyCMSError is raised.
If an error occurs while trying to obtain the default intent, a
PyCMSError is raised.
Use this function to determine the default (and usually best optomized)
rendering intent for this profile. Most profiles support multiple
rendering intents, but are intended mostly for one type of conversion.
If you wish to use a different intent than returned, use
ImageCms.isIntentSupported() to verify it will work first.
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.rendering_intent
except (AttributeError, IOError, TypeError, ValueError), v:
raise PyCMSError(v)
##
# (pyCMS) Checks if a given intent is supported.
def isIntentSupported(profile, intent, direction):
"""
ImageCms.isIntentSupported(profile, intent, direction)
Returns 1 if the intent/direction are supported, -1 if they are not.
profile = EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
intent = integer (0-3) specifying the rendering intent you wish to use
with this profile
INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
INTENT_RELATIVE_COLORIMETRIC =1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
INTENT_ABSOLUTE_COLORIMETRIC =3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
see the pyCMS documentation for details on rendering intents and
what they do.
direction = integer specifing if the profile is to be used for input,
output, or proof
INPUT = 0 (or use ImageCms.DIRECTION_INPUT)
OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT)
PROOF = 2 (or use ImageCms.DIRECTION_PROOF)
Use this function to verify that you can use your desired
renderingIntent with profile, and that profile can be used for the
input/output/proof profile as you desire.
Some profiles are created specifically for one "direction", can cannot
be used for others. Some profiles can only be used for certain
rendering intents... so it's best to either verify this before trying
to create a transform with them (using this function), or catch the
potential PyCMSError that will occur if they don't support the modes
you select.
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# FIXME: I get different results for the same data w. different
# compilers. Bug in LittleCMS or in the binding?
if profile.profile.is_intent_supported(intent, direction):
return 1
else:
return -1
except (AttributeError, IOError, TypeError, ValueError), v:
raise PyCMSError(v)
##
# (pyCMS) Fetches versions.
def versions():
import sys
return (
VERSION, core.littlecms_version, sys.version.split()[0], Image.VERSION
)
# --------------------------------------------------------------------
if __name__ == "__main__":
# create a cheap manual from the __doc__ strings for the functions above
import ImageCms
import string
print __doc__
for f in dir(pyCMS):
print "="*80
print "%s" %f
try:
exec ("doc = ImageCms.%s.__doc__" %(f))
if string.find(doc, "pyCMS") >= 0:
# so we don't get the __doc__ string for imported modules
print doc
except AttributeError:
pass
| apache-2.0 |
JioCloud/neutron | neutron/tests/unit/test_ipam.py | 4 | 11422 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import types
import mock
import netaddr
from oslo_config import cfg
from neutron.common import constants
from neutron.common import ipv6_utils
from neutron import context
from neutron import ipam
from neutron.ipam import driver
from neutron.ipam import exceptions as ipam_exc
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.tests import base
from neutron.tests.unit.ipam import fake_driver
FAKE_IPAM_CLASS = 'neutron.tests.unit.ipam.fake_driver.FakeDriver'
class IpamSubnetRequestTestCase(base.BaseTestCase):
def setUp(self):
super(IpamSubnetRequestTestCase, self).setUp()
self.tenant_id = uuidutils.generate_uuid()
self.subnet_id = uuidutils.generate_uuid()
class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
def test_subnet_request(self):
pool = ipam.SubnetRequest(self.tenant_id,
self.subnet_id)
self.assertEqual(self.tenant_id, pool.tenant_id)
self.assertEqual(self.subnet_id, pool.subnet_id)
self.assertEqual(None, pool.gateway_ip)
self.assertEqual(None, pool.allocation_pools)
def test_subnet_request_gateway(self):
request = ipam.SubnetRequest(self.tenant_id,
self.subnet_id,
gateway_ip='1.2.3.1')
self.assertEqual('1.2.3.1', str(request.gateway_ip))
def test_subnet_request_bad_gateway(self):
self.assertRaises(netaddr.core.AddrFormatError,
ipam.SubnetRequest,
self.tenant_id,
self.subnet_id,
gateway_ip='1.2.3.')
def test_subnet_request_with_range(self):
allocation_pools = [netaddr.IPRange('1.2.3.4', '1.2.3.5'),
netaddr.IPRange('1.2.3.7', '1.2.3.9')]
request = ipam.SubnetRequest(self.tenant_id,
self.subnet_id,
allocation_pools=allocation_pools)
self.assertEqual(allocation_pools, request.allocation_pools)
def test_subnet_request_range_not_list(self):
self.assertRaises(TypeError,
ipam.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=1)
def test_subnet_request_bad_range(self):
self.assertRaises(TypeError,
ipam.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=['1.2.3.4'])
def test_subnet_request_different_versions(self):
pools = [netaddr.IPRange('0.0.0.1', '0.0.0.2'),
netaddr.IPRange('::1', '::2')]
self.assertRaises(ValueError,
ipam.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=pools)
def test_subnet_request_overlap(self):
pools = [netaddr.IPRange('0.0.0.10', '0.0.0.20'),
netaddr.IPRange('0.0.0.8', '0.0.0.10')]
self.assertRaises(ValueError,
ipam.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=pools)
class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
def test_subnet_request(self):
request = ipam.AnySubnetRequest(self.tenant_id,
self.subnet_id,
constants.IPv4,
24,
gateway_ip='0.0.0.1')
self.assertEqual(24, request.prefixlen)
def test_subnet_request_bad_prefix_type(self):
self.assertRaises(netaddr.core.AddrFormatError,
ipam.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv4,
'A')
def test_subnet_request_bad_prefix(self):
self.assertRaises(netaddr.core.AddrFormatError,
ipam.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv4,
33)
self.assertRaises(netaddr.core.AddrFormatError,
ipam.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv6,
129)
def test_subnet_request_bad_gateway(self):
self.assertRaises(ValueError,
ipam.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv6,
64,
gateway_ip='2000::1')
def test_subnet_request_allocation_pool_wrong_version(self):
pools = [netaddr.IPRange('0.0.0.4', '0.0.0.5')]
self.assertRaises(ValueError,
ipam.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv6,
64,
allocation_pools=pools)
def test_subnet_request_allocation_pool_not_in_net(self):
pools = [netaddr.IPRange('0.0.0.64', '0.0.0.128')]
self.assertRaises(ValueError,
ipam.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv4,
25,
allocation_pools=pools)
class TestIpamSpecificSubnetRequest(IpamSubnetRequestTestCase):
def test_subnet_request(self):
request = ipam.SpecificSubnetRequest(self.tenant_id,
self.subnet_id,
'1.2.3.0/24',
gateway_ip='1.2.3.1')
self.assertEqual(24, request.prefixlen)
self.assertEqual(netaddr.IPAddress('1.2.3.1'), request.gateway_ip)
self.assertEqual(netaddr.IPNetwork('1.2.3.0/24'), request.subnet_cidr)
def test_subnet_request_bad_gateway(self):
self.assertRaises(ValueError,
ipam.SpecificSubnetRequest,
self.tenant_id,
self.subnet_id,
'2001::1',
gateway_ip='2000::1')
class TestAddressRequest(base.BaseTestCase):
# This class doesn't test much. At least running through all of the
# constructors may shake out some trivial bugs.
EUI64 = ipam.AutomaticAddressRequest.EUI64
def setUp(self):
super(TestAddressRequest, self).setUp()
def test_specific_address_ipv6(self):
request = ipam.SpecificAddressRequest('2000::45')
self.assertEqual(netaddr.IPAddress('2000::45'), request.address)
def test_specific_address_ipv4(self):
request = ipam.SpecificAddressRequest('1.2.3.32')
self.assertEqual(netaddr.IPAddress('1.2.3.32'), request.address)
def test_any_address(self):
ipam.AnyAddressRequest()
def test_automatic_address_request_eui64(self):
subnet_cidr = '2607:f0d0:1002:51::/64'
port_mac = 'aa:bb:cc:dd:ee:ff'
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
port_mac))
request = ipam.AutomaticAddressRequest(
address_type=self.EUI64,
prefix=subnet_cidr,
mac=port_mac)
self.assertEqual(request.address, netaddr.IPAddress(eui_addr))
def test_automatic_address_request_invalid_address_type_raises(self):
self.assertRaises(ipam_exc.InvalidAddressType,
ipam.AutomaticAddressRequest,
address_type='kaboom')
def test_automatic_address_request_eui64_no_mac_raises(self):
self.assertRaises(ipam_exc.AddressCalculationFailure,
ipam.AutomaticAddressRequest,
address_type=self.EUI64,
prefix='meh')
def test_automatic_address_request_eui64_alien_param_raises(self):
self.assertRaises(ipam_exc.AddressCalculationFailure,
ipam.AutomaticAddressRequest,
address_type=self.EUI64,
mac='meh',
alien='et',
prefix='meh')
class TestIpamDriverLoader(base.BaseTestCase):
def setUp(self):
super(TestIpamDriverLoader, self).setUp()
self.ctx = context.get_admin_context()
def _verify_fake_ipam_driver_is_loaded(self, driver_name):
mgr = manager.NeutronManager
ipam_driver = mgr.load_class_for_provider('neutron.ipam_drivers',
driver_name)
self.assertEqual(
fake_driver.FakeDriver, ipam_driver,
"loaded ipam driver should be FakeDriver")
def _verify_import_error_is_generated(self, driver_name):
mgr = manager.NeutronManager
self.assertRaises(ImportError, mgr.load_class_for_provider,
'neutron.ipam_drivers',
driver_name)
def test_ipam_driver_is_loaded_by_class(self):
self._verify_fake_ipam_driver_is_loaded(FAKE_IPAM_CLASS)
def test_ipam_driver_is_loaded_by_name(self):
self._verify_fake_ipam_driver_is_loaded('fake')
def test_ipam_driver_raises_import_error(self):
self._verify_import_error_is_generated(
'neutron.tests.unit.ipam.SomeNonExistentClass')
def test_ipam_driver_raises_import_error_for_none(self):
self._verify_import_error_is_generated(None)
def _load_ipam_driver(self, driver_name, subnet_pool_id):
cfg.CONF.set_override("ipam_driver", driver_name)
return driver.Pool.get_instance(subnet_pool_id, self.ctx)
def test_ipam_driver_is_loaded_from_ipam_driver_config_value(self):
ipam_driver = self._load_ipam_driver('fake', None)
self.assertIsInstance(
ipam_driver, (fake_driver.FakeDriver, types.ClassType),
"loaded ipam driver should be of type FakeDriver")
@mock.patch(FAKE_IPAM_CLASS)
def test_ipam_driver_is_loaded_with_subnet_pool_id(self, ipam_mock):
subnet_pool_id = 'SomePoolID'
self._load_ipam_driver('fake', subnet_pool_id)
ipam_mock.assert_called_once_with(subnet_pool_id, self.ctx)
| apache-2.0 |
DrOctogon/unwash_ecom | oscar/apps/catalogue/managers.py | 1 | 1491 | from django.db import models
class ProductQuerySet(models.query.QuerySet):
def base_queryset(self):
"""
Applies select_related and prefetch_related for commonly related
models to save on queries
"""
return self.select_related('product_class')\
.prefetch_related('variants',
'product_options',
'product_class__options',
'stockrecords',
'images',
)
def browsable(self):
"""
Excludes non-canonical products.
"""
return self.filter(parent=None)
class ProductManager(models.Manager):
"""
Uses ProductQuerySet and proxies its methods to allow chaining
Once Django 1.7 lands, this class can probably be removed:
https://docs.djangoproject.com/en/dev/releases/1.7/#calling-custom-queryset-methods-from-the-manager # noqa
"""
def get_queryset(self):
return ProductQuerySet(self.model, using=self._db)
def browsable(self):
return self.get_queryset().browsable()
def base_queryset(self):
return self.get_queryset().base_queryset()
class BrowsableProductManager(ProductManager):
"""
Excludes non-canonical products
Could be deprecated after Oscar 0.7 is released
"""
def get_queryset(self):
return super(BrowsableProductManager, self).get_queryset().browsable()
| bsd-3-clause |
JoeyCodinja/INFO3180PROJECT3 | lib/werkzeug/security.py | 302 | 8407 | # -*- coding: utf-8 -*-
"""
werkzeug.security
~~~~~~~~~~~~~~~~~
Security related helpers such as secure password hashing tools.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import hmac
import hashlib
import posixpath
import codecs
from struct import Struct
from random import SystemRandom
from operator import xor
from itertools import starmap
from werkzeug._compat import range_type, PY2, text_type, izip, to_bytes, \
string_types, to_native
SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
DEFAULT_PBKDF2_ITERATIONS = 1000
_pack_int = Struct('>I').pack
_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None)
_sys_rng = SystemRandom()
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _find_hashlib_algorithms():
algos = getattr(hashlib, 'algorithms', None)
if algos is None:
algos = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
rv = {}
for algo in algos:
func = getattr(hashlib, algo, None)
if func is not None:
rv[algo] = func
return rv
_hash_funcs = _find_hashlib_algorithms()
def pbkdf2_hex(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Like :func:`pbkdf2_bin` but returns a hex encoded string.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha1.
"""
rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
return to_native(codecs.encode(rv, 'hex_codec'))
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` time and produces a
key of `keylen` bytes. By default SHA-1 is used as hash function,
a different hashlib `hashfunc` can be provided.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha1.
"""
if isinstance(hashfunc, string_types):
hashfunc = _hash_funcs[hashfunc]
elif not hashfunc:
hashfunc = hashlib.sha1
salt = to_bytes(salt)
mac = hmac.HMAC(to_bytes(data), None, hashfunc)
if not keylen:
keylen = mac.digest_size
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
return bytearray(h.digest())
buf = bytearray()
for block in range_type(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
for i in range_type(iterations - 1):
u = _pseudorandom(bytes(u))
rv = bytearray(starmap(xor, izip(rv, u)))
buf.extend(rv)
return bytes(buf[:keylen])
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal or `False` if they are not.
.. versionadded:: 0.7
"""
if _builtin_safe_str_cmp is not None:
return _builtin_safe_str_cmp(a, b)
if len(a) != len(b):
return False
rv = 0
if isinstance(a, bytes) and isinstance(b, bytes) and not PY2:
for x, y in izip(a, b):
rv |= x ^ y
else:
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
return rv == 0
def gen_salt(length):
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError('requested salt of length <= 0')
return ''.join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
def _hash_internal(method, salt, password):
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == 'plain':
return password, method
if isinstance(password, text_type):
password = password.encode('utf-8')
if method.startswith('pbkdf2:'):
args = method[7:].split(':')
if len(args) not in (1, 2):
raise ValueError('Invalid number of arguments for PBKDF2')
method = args.pop(0)
iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
is_pbkdf2 = True
actual_method = 'pbkdf2:%s:%d' % (method, iterations)
else:
is_pbkdf2 = False
actual_method = method
hash_func = _hash_funcs.get(method)
if hash_func is None:
raise TypeError('invalid method %r' % method)
if is_pbkdf2:
if not salt:
raise ValueError('Salt is required for PBKDF2')
rv = pbkdf2_hex(password, salt, iterations,
hashfunc=hash_func)
elif salt:
if isinstance(salt, text_type):
salt = salt.encode('utf-8')
rv = hmac.HMAC(salt, password, hash_func).hexdigest()
else:
h = hash_func()
h.update(password)
rv = h.hexdigest()
return rv, actual_method
def generate_password_hash(password, method='pbkdf2:sha1', salt_length=8):
"""Hash a password with the given method and salt with with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set the method to plain to enforce plaintext passwords. If a salt
is used, hmac is used internally to salt the password.
If PBKDF2 is wanted it can be enabled by setting the method to
``pbkdf2:method:iterations`` where iterations is optional::
pbkdf2:sha1:2000$salt$hash
pbkdf2:sha1$salt$hash
:param password: the password to hash
:param method: the hash method to use (one that hashlib supports), can
optionally be in the format ``pbpdf2:<method>[:iterations]``
to enable PBKDF2.
:param salt_length: the length of the salt in letters
"""
salt = method != 'plain' and gen_salt(salt_length) or ''
h, actual_method = _hash_internal(method, salt, password)
return '%s$%s$%s' % (actual_method, salt, h)
def check_password_hash(pwhash, password):
"""check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`
:param password: the plaintext password to compare against the hash
"""
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
def safe_join(directory, filename):
"""Safely join `directory` and `filename`. If this cannot be done,
this function returns ``None``.
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename.startswith('../'):
return None
return os.path.join(directory, filename)
| apache-2.0 |
VitalPet/odoo | addons/crm/crm.py | 4 | 10606 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import time
from lxml import etree
from openerp.osv import fields
from openerp.osv import osv
from openerp import tools
from openerp.tools.translate import _
MAX_LEVEL = 15
AVAILABLE_STATES = [
('draft', 'New'),
('cancel', 'Cancelled'),
('open', 'In Progress'),
('pending', 'Pending'),
('done', 'Closed')
]
AVAILABLE_PRIORITIES = [
('1', 'Highest'),
('2', 'High'),
('3', 'Normal'),
('4', 'Low'),
('5', 'Lowest'),
]
class crm_case_channel(osv.osv):
_name = "crm.case.channel"
_description = "Channels"
_order = 'name'
_columns = {
'name': fields.char('Channel Name', size=64, required=True),
'active': fields.boolean('Active'),
}
_defaults = {
'active': lambda *a: 1,
}
class crm_case_stage(osv.osv):
""" Model for case stages. This models the main stages of a document
management flow. Main CRM objects (leads, opportunities, project
issues, ...) will now use only stages, instead of state and stages.
Stages are for example used to display the kanban view of records.
"""
_name = "crm.case.stage"
_description = "Stage of case"
_rec_name = 'name'
_order = "sequence"
_columns = {
'name': fields.char('Stage Name', size=64, required=True, translate=True),
'sequence': fields.integer('Sequence', help="Used to order stages. Lower is better."),
'probability': fields.float('Probability (%)', required=True, help="This percentage depicts the default/average probability of the Case for this stage to be a success"),
'on_change': fields.boolean('Change Probability Automatically', help="Setting this stage will change the probability automatically on the opportunity."),
'requirements': fields.text('Requirements'),
'section_ids':fields.many2many('crm.case.section', 'section_stage_rel', 'stage_id', 'section_id', string='Sections',
help="Link between stages and sales teams. When set, this limitate the current stage to the selected sales teams."),
'state': fields.selection(AVAILABLE_STATES, 'Related Status', required=True,
help="The status of your document will automatically change regarding the selected stage. " \
"For example, if a stage is related to the status 'Close', when your document reaches this stage, it is automatically closed."),
'case_default': fields.boolean('Default to New Sales Team',
help="If you check this field, this stage will be proposed by default on each sales team. It will not assign this stage to existing teams."),
'fold': fields.boolean('Fold by Default',
help="This stage is not visible, for example in status bar or kanban view, when there are no records in that stage to display."),
'type': fields.selection([ ('lead','Lead'),
('opportunity', 'Opportunity'),
('both', 'Both')],
string='Type', size=16, required=True,
help="This field is used to distinguish stages related to Leads from stages related to Opportunities, or to specify stages available for both types."),
}
_defaults = {
'sequence': lambda *args: 1,
'probability': lambda *args: 0.0,
'state': 'open',
'fold': False,
'type': 'both',
'case_default': True,
}
class crm_case_section(osv.osv):
""" Model for sales teams. """
_name = "crm.case.section"
_inherits = {'mail.alias': 'alias_id'}
_inherit = "mail.thread"
_description = "Sales Teams"
_order = "complete_name"
def get_full_name(self, cr, uid, ids, field_name, arg, context=None):
return dict(self.name_get(cr, uid, ids, context=context))
_columns = {
'name': fields.char('Sales Team', size=64, required=True, translate=True),
'complete_name': fields.function(get_full_name, type='char', size=256, readonly=True, store=True),
'code': fields.char('Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to "\
"false, it will allow you to hide the sales team without removing it."),
'change_responsible': fields.boolean('Reassign Escalated', help="When escalating to this team override the salesman with the team leader."),
'user_id': fields.many2one('res.users', 'Team Leader'),
'member_ids':fields.many2many('res.users', 'sale_member_rel', 'section_id', 'member_id', 'Team Members'),
'reply_to': fields.char('Reply-To', size=64, help="The email address put in the 'Reply-To' of all emails sent by OpenERP about cases in this sales team"),
'parent_id': fields.many2one('crm.case.section', 'Parent Team'),
'child_ids': fields.one2many('crm.case.section', 'parent_id', 'Child Teams'),
'resource_calendar_id': fields.many2one('resource.calendar', "Working Time", help="Used to compute open days"),
'note': fields.text('Description'),
'working_hours': fields.float('Working Hours', digits=(16,2 )),
'stage_ids': fields.many2many('crm.case.stage', 'section_stage_rel', 'section_id', 'stage_id', 'Stages'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this team. New emails received will automatically "
"create new leads assigned to the team."),
}
def _get_stage_common(self, cr, uid, context):
ids = self.pool.get('crm.case.stage').search(cr, uid, [('case_default','=',1)], context=context)
return ids
_defaults = {
'active': 1,
'stage_ids': _get_stage_common,
'alias_domain': False, # always hide alias during creation
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The code of the sales team must be unique !')
]
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive Sales team.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
"""Overrides orm name_get method"""
if not isinstance(ids, list) :
ids = [ids]
res = []
if not ids:
return res
reads = self.read(cr, uid, ids, ['name', 'parent_id'], context)
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1] + ' / ' + name
res.append((record['id'], name))
return res
def create(self, cr, uid, vals, context=None):
mail_alias = self.pool.get('mail.alias')
if not vals.get('alias_id'):
vals.pop('alias_name', None) # prevent errors during copy()
alias_id = mail_alias.create_unique_alias(cr, uid,
{'alias_name': vals['name']},
model_name="crm.lead",
context=context)
vals['alias_id'] = alias_id
res = super(crm_case_section, self).create(cr, uid, vals, context)
mail_alias.write(cr, uid, [vals['alias_id']], {'alias_defaults': {'section_id': res, 'type':'lead'}}, context)
return res
def unlink(self, cr, uid, ids, context=None):
# Cascade-delete mail aliases as well, as they should not exist without the sales team.
mail_alias = self.pool.get('mail.alias')
alias_ids = [team.alias_id.id for team in self.browse(cr, uid, ids, context=context) if team.alias_id ]
res = super(crm_case_section, self).unlink(cr, uid, ids, context=context)
mail_alias.unlink(cr, uid, alias_ids, context=context)
return res
class crm_case_categ(osv.osv):
""" Category of Case """
_name = "crm.case.categ"
_description = "Category of Case"
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'object_id': fields.many2one('ir.model', 'Object Name'),
}
def _find_object_id(self, cr, uid, context=None):
"""Finds id for case object"""
context = context or {}
object_id = context.get('object_id', False)
ids = self.pool.get('ir.model').search(cr, uid, ['|',('id', '=', object_id),('model', '=', context.get('object_name', False))])
return ids and ids[0] or False
_defaults = {
'object_id' : _find_object_id
}
class crm_case_resource_type(osv.osv):
""" Resource Type of case """
_name = "crm.case.resource.type"
_description = "Campaign"
_rec_name = "name"
_columns = {
'name': fields.char('Campaign Name', size=64, required=True, translate=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
def _links_get(self, cr, uid, context=None):
"""Gets links value for reference field"""
obj = self.pool.get('res.request.link')
ids = obj.search(cr, uid, [])
res = obj.read(cr, uid, ids, ['object', 'name'], context)
return [(r['object'], r['name']) for r in res]
class crm_payment_mode(osv.osv):
""" Payment Mode for Fund """
_name = "crm.payment.mode"
_description = "CRM Payment Mode"
_columns = {
'name': fields.char('Name', size=64, required=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Jionglun/-w16b_test | static/Brython3.1.3-20150514-095342/Lib/numbers.py | 883 | 10398 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
TODO: Fill out more detailed documentation on the operators."""
from abc import ABCMeta, abstractmethod
__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
class Number(metaclass=ABCMeta):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
## Notes on Decimal
## ----------------
## Decimal has all of the methods specified by the Real abc, but it should
## not be registered as a Real because decimals do not interoperate with
## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
## abstract reals are expected to interoperate (i.e. R1 + R2 should be
## expected to work if R1 and R2 are both Reals).
class Complex(Number):
"""Complex defines the operations that work on the builtin complex type.
In short, those are: a conversion to complex, .real, .imag, +, -,
*, /, abs(), .conjugate, ==, and !=.
If it is given heterogenous arguments, and doesn't have special
knowledge about them, it should fall back to the builtin complex
type as described below.
"""
__slots__ = ()
@abstractmethod
def __complex__(self):
"""Return a builtin complex instance. Called for complex(self)."""
def __bool__(self):
"""True if self != 0. Called for bool(self)."""
return self != 0
@property
@abstractmethod
def real(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@property
@abstractmethod
def imag(self):
"""Retrieve the imaginary component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractmethod
def __add__(self, other):
"""self + other"""
raise NotImplementedError
@abstractmethod
def __radd__(self, other):
"""other + self"""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""-self"""
raise NotImplementedError
@abstractmethod
def __pos__(self):
"""+self"""
raise NotImplementedError
def __sub__(self, other):
"""self - other"""
return self + -other
def __rsub__(self, other):
"""other - self"""
return -self + other
@abstractmethod
def __mul__(self, other):
"""self * other"""
raise NotImplementedError
@abstractmethod
def __rmul__(self, other):
"""other * self"""
raise NotImplementedError
@abstractmethod
def __truediv__(self, other):
"""self / other: Should promote to float when necessary."""
raise NotImplementedError
@abstractmethod
def __rtruediv__(self, other):
"""other / self"""
raise NotImplementedError
@abstractmethod
def __pow__(self, exponent):
"""self**exponent; should promote to float or complex when necessary."""
raise NotImplementedError
@abstractmethod
def __rpow__(self, base):
"""base ** self"""
raise NotImplementedError
@abstractmethod
def __abs__(self):
"""Returns the Real distance from 0. Called for abs(self)."""
raise NotImplementedError
@abstractmethod
def conjugate(self):
"""(x+y*i).conjugate() returns (x-y*i)."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""self == other"""
raise NotImplementedError
def __ne__(self, other):
"""self != other"""
# The default __ne__ doesn't negate __eq__ until 3.0.
return not (self == other)
Complex.register(complex)
class Real(Complex):
"""To Complex, Real adds the operations that work on real numbers.
In short, those are: a conversion to float, trunc(), divmod,
%, <, <=, >, and >=.
Real also provides defaults for the derived operations.
"""
__slots__ = ()
@abstractmethod
def __float__(self):
"""Any Real can be converted to a native float object.
Called for float(self)."""
raise NotImplementedError
@abstractmethod
def __trunc__(self):
"""trunc(self): Truncates self to an Integral.
Returns an Integral i such that:
* i>0 iff self>0;
* abs(i) <= abs(self);
* for any Integral j satisfying the first two conditions,
abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
i.e. "truncate towards 0".
"""
raise NotImplementedError
@abstractmethod
def __floor__(self):
"""Finds the greatest Integral <= self."""
raise NotImplementedError
@abstractmethod
def __ceil__(self):
"""Finds the least Integral >= self."""
raise NotImplementedError
@abstractmethod
def __round__(self, ndigits=None):
"""Rounds self to ndigits decimal places, defaulting to 0.
If ndigits is omitted or None, returns an Integral, otherwise
returns a Real. Rounds half toward even.
"""
raise NotImplementedError
def __divmod__(self, other):
"""divmod(self, other): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (self // other, self % other)
def __rdivmod__(self, other):
"""divmod(other, self): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (other // self, other % self)
@abstractmethod
def __floordiv__(self, other):
"""self // other: The floor() of self/other."""
raise NotImplementedError
@abstractmethod
def __rfloordiv__(self, other):
"""other // self: The floor() of other/self."""
raise NotImplementedError
@abstractmethod
def __mod__(self, other):
"""self % other"""
raise NotImplementedError
@abstractmethod
def __rmod__(self, other):
"""other % self"""
raise NotImplementedError
@abstractmethod
def __lt__(self, other):
"""self < other
< on Reals defines a total ordering, except perhaps for NaN."""
raise NotImplementedError
@abstractmethod
def __le__(self, other):
"""self <= other"""
raise NotImplementedError
# Concrete implementations of Complex abstract methods.
def __complex__(self):
"""complex(self) == complex(float(self), 0)"""
return complex(float(self))
@property
def real(self):
"""Real numbers are their real component."""
return +self
@property
def imag(self):
"""Real numbers have no imaginary component."""
return 0
def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self
Real.register(float)
class Rational(Real):
""".numerator and .denominator should be in lowest terms."""
__slots__ = ()
@property
@abstractmethod
def numerator(self):
raise NotImplementedError
@property
@abstractmethod
def denominator(self):
raise NotImplementedError
# Concrete implementation of Real's conversion to float.
def __float__(self):
"""float(self) = self.numerator / self.denominator
It's important that this conversion use the integer's "true"
division rather than casting one side to float before dividing
so that ratios of huge integers convert without overflowing.
"""
return self.numerator / self.denominator
class Integral(Rational):
"""Integral adds a conversion to int and the bit-string operations."""
__slots__ = ()
@abstractmethod
def __int__(self):
"""int(self)"""
raise NotImplementedError
def __index__(self):
"""Called whenever an index is needed, such as in slicing"""
return int(self)
@abstractmethod
def __pow__(self, exponent, modulus=None):
"""self ** exponent % modulus, but maybe faster.
Accept the modulus argument if you want to support the
3-argument version of pow(). Raise a TypeError if exponent < 0
or any argument isn't Integral. Otherwise, just implement the
2-argument version described in Complex.
"""
raise NotImplementedError
@abstractmethod
def __lshift__(self, other):
"""self << other"""
raise NotImplementedError
@abstractmethod
def __rlshift__(self, other):
"""other << self"""
raise NotImplementedError
@abstractmethod
def __rshift__(self, other):
"""self >> other"""
raise NotImplementedError
@abstractmethod
def __rrshift__(self, other):
"""other >> self"""
raise NotImplementedError
@abstractmethod
def __and__(self, other):
"""self & other"""
raise NotImplementedError
@abstractmethod
def __rand__(self, other):
"""other & self"""
raise NotImplementedError
@abstractmethod
def __xor__(self, other):
"""self ^ other"""
raise NotImplementedError
@abstractmethod
def __rxor__(self, other):
"""other ^ self"""
raise NotImplementedError
@abstractmethod
def __or__(self, other):
"""self | other"""
raise NotImplementedError
@abstractmethod
def __ror__(self, other):
"""other | self"""
raise NotImplementedError
@abstractmethod
def __invert__(self):
"""~self"""
raise NotImplementedError
# Concrete implementations of Rational and Real abstract methods.
def __float__(self):
"""float(self) == float(int(self))"""
return float(int(self))
@property
def numerator(self):
"""Integers are their own numerators."""
return +self
@property
def denominator(self):
"""Integers have a denominator of 1."""
return 1
Integral.register(int)
| agpl-3.0 |
back-to/streamlink | src/streamlink_cli/output.py | 1 | 8609 | import logging
import os
import shlex
import subprocess
import sys
from time import sleep
from streamlink.utils.encoding import get_filesystem_encoding, maybe_encode, maybe_decode
from .compat import is_win32, stdout
from .constants import DEFAULT_PLAYER_ARGUMENTS, SUPPORTED_PLAYERS
from .utils import ignored
if is_win32:
import msvcrt
log = logging.getLogger("streamlink.cli.output")
class Output(object):
def __init__(self):
self.opened = False
def open(self):
self._open()
self.opened = True
def close(self):
if self.opened:
self._close()
self.opened = False
def write(self, data):
if not self.opened:
raise IOError("Output is not opened")
return self._write(data)
def _open(self):
pass
def _close(self):
pass
def _write(self, data):
pass
class FileOutput(Output):
def __init__(self, filename=None, fd=None):
super(FileOutput, self).__init__()
self.filename = filename
self.fd = fd
def _open(self):
if self.filename:
self.fd = open(self.filename, "wb")
if is_win32:
msvcrt.setmode(self.fd.fileno(), os.O_BINARY)
def _close(self):
if self.fd is not stdout:
self.fd.close()
def _write(self, data):
self.fd.write(data)
class PlayerOutput(Output):
PLAYER_TERMINATE_TIMEOUT = 10.0
def __init__(self, cmd, args=DEFAULT_PLAYER_ARGUMENTS, filename=None, quiet=True, kill=True, call=False, http=None,
namedpipe=None, title=None):
super(PlayerOutput, self).__init__()
self.cmd = cmd
self.args = args
self.kill = kill
self.call = call
self.quiet = quiet
self.filename = filename
self.namedpipe = namedpipe
self.http = http
self.title = title
self.player = None
self.player_name = self.supported_player(self.cmd)
if self.namedpipe or self.filename or self.http:
self.stdin = sys.stdin
else:
self.stdin = subprocess.PIPE
if self.quiet:
self.stdout = open(os.devnull, "w")
self.stderr = open(os.devnull, "w")
else:
self.stdout = sys.stdout
self.stderr = sys.stderr
@property
def running(self):
sleep(0.5)
return self.player.poll() is None
@classmethod
def supported_player(cls, cmd):
"""
Check if the current player supports adding a title
:param cmd: command to test
:return: name of the player|None
"""
if not is_win32:
# under a POSIX system use shlex to find the actual command
# under windows this is not an issue because executables end in .exe
cmd = shlex.split(cmd)[0]
cmd = os.path.basename(cmd.lower())
for player, possiblecmds in SUPPORTED_PLAYERS.items():
for possiblecmd in possiblecmds:
if cmd.startswith(possiblecmd):
return player
@classmethod
def _mpv_title_escape(cls, title_string):
# mpv has a "disable property-expansion" token which must be handled in order to accurately represent $$ in title
if r'\$>' in title_string:
processed_title = ""
double_dollars = True
i = dollars = 0
while i < len(title_string):
if double_dollars:
if title_string[i] == "\\":
if title_string[i + 1] == "$":
processed_title += "$"
dollars += 1
i += 1
if title_string[i + 1] == ">" and dollars % 2 == 1:
double_dollars = False
processed_title += ">"
i += 1
else:
processed_title += "\\"
elif title_string[i] == "$":
processed_title += "$$"
else:
dollars = 0
processed_title += title_string[i]
else:
if title_string[i:i + 2] == "\\$":
processed_title += "$"
i += 1
else:
processed_title += title_string[i]
i += 1
return processed_title
else:
# not possible for property-expansion to be disabled, happy days
return title_string.replace("$", "$$").replace(r'\$$', "$")
def _create_arguments(self):
if self.namedpipe:
filename = self.namedpipe.path
elif self.filename:
filename = self.filename
elif self.http:
filename = self.http.url
else:
filename = "-"
args = self.args.format(filename=filename)
cmd = self.cmd
extra_args = []
if self.title is not None:
# vlc
if self.player_name == "vlc":
# see https://wiki.videolan.org/Documentation:Format_String/, allow escaping with \$
self.title = self.title.replace("$", "$$").replace(r'\$$', "$")
extra_args.extend(["--input-title-format", self.title])
# mpv
if self.player_name == "mpv":
# see https://mpv.io/manual/stable/#property-expansion, allow escaping with \$, respect mpv's $>
self.title = self._mpv_title_escape(self.title)
extra_args.extend(["--title", self.title])
# player command
if is_win32:
eargs = maybe_decode(subprocess.list2cmdline(extra_args))
# do not insert and extra " " when there are no extra_args
return maybe_encode(u' '.join([cmd] + ([eargs] if eargs else []) + [args]),
encoding=get_filesystem_encoding())
return shlex.split(cmd) + extra_args + shlex.split(args)
def _open(self):
try:
if self.call and self.filename:
self._open_call()
else:
self._open_subprocess()
finally:
if self.quiet:
# Output streams no longer needed in parent process
self.stdout.close()
self.stderr.close()
def _open_call(self):
args = self._create_arguments()
log.debug(u"Calling: {0}".format(subprocess.list2cmdline(args)))
subprocess.call(args,
stdout=self.stdout,
stderr=self.stderr)
def _open_subprocess(self):
# Force bufsize=0 on all Python versions to avoid writing the
# unflushed buffer when closing a broken input pipe
args = self._create_arguments()
log.debug(u"Opening subprocess: {0}".format(subprocess.list2cmdline(args)))
self.player = subprocess.Popen(args,
stdin=self.stdin, bufsize=0,
stdout=self.stdout,
stderr=self.stderr)
# Wait 0.5 seconds to see if program exited prematurely
if not self.running:
raise OSError("Process exited prematurely")
if self.namedpipe:
self.namedpipe.open("wb")
elif self.http:
self.http.open()
def _close(self):
# Close input to the player first to signal the end of the
# stream and allow the player to terminate of its own accord
if self.namedpipe:
self.namedpipe.close()
elif self.http:
self.http.close()
elif not self.filename:
self.player.stdin.close()
if self.kill:
with ignored(Exception):
self.player.terminate()
if not is_win32:
t, timeout = 0.0, self.PLAYER_TERMINATE_TIMEOUT
while self.player.poll() is None and t < timeout:
sleep(0.5)
t += 0.5
if not self.player.returncode:
self.player.kill()
self.player.wait()
def _write(self, data):
if self.namedpipe:
self.namedpipe.write(data)
elif self.http:
self.http.write(data)
else:
self.player.stdin.write(data)
__all__ = ["PlayerOutput", "FileOutput"]
| bsd-2-clause |
SancharovK/_local_engine | tools/b43-tools/files/b43-fwsquash.py | 494 | 4767 | #!/usr/bin/env python
#
# b43 firmware file squasher
# Removes unnecessary firmware files
#
# Copyright (c) 2009 Michael Buesch <[email protected]>
#
# Licensed under the GNU/GPL version 2 or (at your option) any later version.
#
import sys
import os
def usage():
print("Usage: %s PHYTYPES COREREVS /path/to/extracted/firmware" % sys.argv[0])
print("")
print("PHYTYPES is a comma separated list of:")
print("A => A-PHY")
print("AG => Dual A-PHY G-PHY")
print("G => G-PHY")
print("LP => LP-PHY")
print("N => N-PHY")
print("HT => HT-PHY")
print("LCN => LCN-PHY")
print("LCN40 => LCN40-PHY")
print("AC => AC-PHY")
print("")
print("COREREVS is a comma separated list of core revision numbers.")
if len(sys.argv) != 4:
usage()
sys.exit(1)
phytypes = sys.argv[1]
corerevs = sys.argv[2]
fwpath = sys.argv[3]
phytypes = phytypes.split(',')
try:
corerevs = map(lambda r: int(r), corerevs.split(','))
except ValueError:
print("ERROR: \"%s\" is not a valid COREREVS string\n" % corerevs)
usage()
sys.exit(1)
fwfiles = os.listdir(fwpath)
fwfiles = filter(lambda str: str.endswith(".fw"), fwfiles)
if not fwfiles:
print("ERROR: No firmware files found in %s" % fwpath)
sys.exit(1)
required_fwfiles = []
def revs_match(revs_a, revs_b):
for rev in revs_a:
if rev in revs_b:
return True
return False
def phytypes_match(types_a, types_b):
for type in types_a:
type = type.strip().upper()
if type in types_b:
return True
return False
revmapping = {
"ucode2.fw" : ( (2,3,), ("G",), ),
"ucode4.fw" : ( (4,), ("G",), ),
"ucode5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
"ucode11.fw" : ( (11,12,), ("N",), ),
"ucode13.fw" : ( (13,), ("LP","G",), ),
"ucode14.fw" : ( (14,), ("LP",), ),
"ucode15.fw" : ( (15,), ("LP",), ),
"ucode16_mimo.fw" : ( (16,17,18,19,23,), ("N",), ),
# "ucode16_lp.fw" : ( (16,17,18,19,), ("LP",), ),
"ucode24_lcn.fw" : ( (24,), ("LCN",), ),
"ucode25_mimo.fw" : ( (25,28,), ("N",), ),
"ucode25_lcn.fw" : ( (25,28,), ("LCN",), ),
"ucode26_mimo.fw" : ( (26,), ("HT",), ),
"ucode29_mimo.fw" : ( (29,), ("HT",), ),
"ucode30_mimo.fw" : ( (30,), ("N",), ),
"ucode33_lcn40.fw" : ( (33,), ("LCN40",), ),
"ucode40.fw" : ( (40,), ("AC",), ),
"ucode42.fw" : ( (42,), ("AC",), ),
"pcm4.fw" : ( (1,2,3,4,), ("G",), ),
"pcm5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
}
initvalmapping = {
"a0g1initvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG",), ),
"b0g0initvals2.fw" : ( (2,4,), ("G",), ),
"b0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"b0g0initvals13.fw" : ( (13,), ("G",), ),
"n0initvals11.fw" : ( (11,12,), ("N",), ),
"n0initvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0initvals24.fw" : ( (24,), ("N",), ),
"n0initvals25.fw" : ( (25,28,), ("N",), ),
"n16initvals30.fw" : ( (30,), ("N",), ),
"lp0initvals13.fw" : ( (13,), ("LP",), ),
"lp0initvals14.fw" : ( (14,), ("LP",), ),
"lp0initvals15.fw" : ( (15,), ("LP",), ),
# "lp0initvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0initvals24.fw" : ( (24,), ("LCN",), ),
"ht0initvals26.fw" : ( (26,), ("HT",), ),
"ht0initvals29.fw" : ( (29,), ("HT",), ),
"lcn400initvals33.fw" : ( (33,), ("LCN40",), ),
"ac0initvals40.fw" : ( (40,), ("AC",), ),
"ac1initvals42.fw" : ( (42,), ("AC",), ),
"a0g1bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG"), ),
"b0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"n0bsinitvals11.fw" : ( (11,12,), ("N",), ),
"n0bsinitvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0bsinitvals24.fw" : ( (24,), ("N",), ),
"n0bsinitvals25.fw" : ( (25,28,), ("N",), ),
"n16bsinitvals30.fw" : ( (30,), ("N",), ),
"lp0bsinitvals13.fw" : ( (13,), ("LP",), ),
"lp0bsinitvals14.fw" : ( (14,), ("LP",), ),
"lp0bsinitvals15.fw" : ( (15,), ("LP",), ),
# "lp0bsinitvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0bsinitvals24.fw" : ( (24,), ("LCN",), ),
"ht0bsinitvals26.fw" : ( (26,), ("HT",), ),
"ht0bsinitvals29.fw" : ( (29,), ("HT",), ),
"lcn400bsinitvals33.fw" : ( (33,), ("LCN40",), ),
"ac0bsinitvals40.fw" : ( (40,), ("AC",), ),
"ac1bsinitvals42.fw" : ( (42,), ("AC",), ),
}
for f in fwfiles:
if f in revmapping:
if revs_match(corerevs, revmapping[f][0]) and\
phytypes_match(phytypes, revmapping[f][1]):
required_fwfiles += [f]
continue
if f in initvalmapping:
if revs_match(corerevs, initvalmapping[f][0]) and\
phytypes_match(phytypes, initvalmapping[f][1]):
required_fwfiles += [f]
continue
print("WARNING: Firmware file %s not found in the mapping lists" % f)
for f in fwfiles:
if f not in required_fwfiles:
print("Deleting %s" % f)
os.unlink(fwpath + '/' + f)
| gpl-2.0 |
partofthething/home-assistant | homeassistant/components/dwd_weather_warnings/sensor.py | 11 | 6218 | """
Support for getting statistical data from a DWD Weather Warnings.
Data is fetched from DWD:
https://rcccm.dwd.de/DE/wetter/warnungen_aktuell/objekt_einbindung/objekteinbindung.html
Warnungen vor extremem Unwetter (Stufe 4)
Unwetterwarnungen (Stufe 3)
Warnungen vor markantem Wetter (Stufe 2)
Wetterwarnungen (Stufe 1)
"""
from datetime import timedelta
import logging
from dwdwfsapi import DwdWeatherWarningsAPI
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_MONITORED_CONDITIONS, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by DWD"
ATTR_REGION_NAME = "region_name"
ATTR_REGION_ID = "region_id"
ATTR_LAST_UPDATE = "last_update"
ATTR_WARNING_COUNT = "warning_count"
API_ATTR_WARNING_NAME = "event"
API_ATTR_WARNING_TYPE = "event_code"
API_ATTR_WARNING_LEVEL = "level"
API_ATTR_WARNING_HEADLINE = "headline"
API_ATTR_WARNING_DESCRIPTION = "description"
API_ATTR_WARNING_INSTRUCTION = "instruction"
API_ATTR_WARNING_START = "start_time"
API_ATTR_WARNING_END = "end_time"
API_ATTR_WARNING_PARAMETERS = "parameters"
API_ATTR_WARNING_COLOR = "color"
DEFAULT_NAME = "DWD-Weather-Warnings"
CONF_REGION_NAME = "region_name"
CURRENT_WARNING_SENSOR = "current_warning_level"
ADVANCE_WARNING_SENSOR = "advance_warning_level"
SCAN_INTERVAL = timedelta(minutes=15)
MONITORED_CONDITIONS = {
CURRENT_WARNING_SENSOR: [
"Current Warning Level",
None,
"mdi:close-octagon-outline",
],
ADVANCE_WARNING_SENSOR: [
"Advance Warning Level",
None,
"mdi:close-octagon-outline",
],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_REGION_NAME): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(MONITORED_CONDITIONS)
): vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the DWD-Weather-Warnings sensor."""
name = config.get(CONF_NAME)
region_name = config.get(CONF_REGION_NAME)
api = WrappedDwDWWAPI(DwdWeatherWarningsAPI(region_name))
sensors = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
sensors.append(DwdWeatherWarningsSensor(api, name, sensor_type))
add_entities(sensors, True)
class DwdWeatherWarningsSensor(Entity):
"""Representation of a DWD-Weather-Warnings sensor."""
def __init__(self, api, name, sensor_type):
"""Initialize a DWD-Weather-Warnings sensor."""
self._api = api
self._name = name
self._sensor_type = sensor_type
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {MONITORED_CONDITIONS[self._sensor_type][0]}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return MONITORED_CONDITIONS[self._sensor_type][2]
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return MONITORED_CONDITIONS[self._sensor_type][1]
@property
def state(self):
"""Return the state of the device."""
if self._sensor_type == CURRENT_WARNING_SENSOR:
return self._api.api.current_warning_level
return self._api.api.expected_warning_level
@property
def device_state_attributes(self):
"""Return the state attributes of the DWD-Weather-Warnings."""
data = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_REGION_NAME: self._api.api.warncell_name,
ATTR_REGION_ID: self._api.api.warncell_id,
ATTR_LAST_UPDATE: self._api.api.last_update,
}
if self._sensor_type == CURRENT_WARNING_SENSOR:
searched_warnings = self._api.api.current_warnings
else:
searched_warnings = self._api.api.expected_warnings
data[ATTR_WARNING_COUNT] = len(searched_warnings)
for i, warning in enumerate(searched_warnings, 1):
data[f"warning_{i}_name"] = warning[API_ATTR_WARNING_NAME]
data[f"warning_{i}_type"] = warning[API_ATTR_WARNING_TYPE]
data[f"warning_{i}_level"] = warning[API_ATTR_WARNING_LEVEL]
data[f"warning_{i}_headline"] = warning[API_ATTR_WARNING_HEADLINE]
data[f"warning_{i}_description"] = warning[API_ATTR_WARNING_DESCRIPTION]
data[f"warning_{i}_instruction"] = warning[API_ATTR_WARNING_INSTRUCTION]
data[f"warning_{i}_start"] = warning[API_ATTR_WARNING_START]
data[f"warning_{i}_end"] = warning[API_ATTR_WARNING_END]
data[f"warning_{i}_parameters"] = warning[API_ATTR_WARNING_PARAMETERS]
data[f"warning_{i}_color"] = warning[API_ATTR_WARNING_COLOR]
# Dictionary for the attribute containing the complete warning
warning_copy = warning.copy()
warning_copy[API_ATTR_WARNING_START] = data[f"warning_{i}_start"]
warning_copy[API_ATTR_WARNING_END] = data[f"warning_{i}_end"]
data[f"warning_{i}"] = warning_copy
return data
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._api.api.data_valid
def update(self):
"""Get the latest data from the DWD-Weather-Warnings API."""
_LOGGER.debug(
"Update requested for %s (%s) by %s",
self._api.api.warncell_name,
self._api.api.warncell_id,
self._sensor_type,
)
self._api.update()
class WrappedDwDWWAPI:
"""Wrapper for the DWD-Weather-Warnings api."""
def __init__(self, api):
"""Initialize a DWD-Weather-Warnings wrapper."""
self.api = api
@Throttle(SCAN_INTERVAL)
def update(self):
"""Get the latest data from the DWD-Weather-Warnings API."""
self.api.update()
_LOGGER.debug("Update performed")
| mit |
axelkennedal/dissen | dissenEnv/lib/python3.5/site-packages/django/db/transaction.py | 238 | 11366 | from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, Error, ProgrammingError, connections,
)
from django.utils.decorators import ContextDecorator
class TransactionManagementError(ProgrammingError):
"""
This exception is thrown when transaction management is used improperly.
"""
pass
def get_connection(using=None):
"""
Get a database connection by name, or the default database connection
if no name is provided. This is a private API.
"""
if using is None:
using = DEFAULT_DB_ALIAS
return connections[using]
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""
Commits a transaction.
"""
get_connection(using).commit()
def rollback(using=None):
"""
Rolls back a transaction.
"""
get_connection(using).rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""
Gets the "needs rollback" flag -- for *advanced use* only.
"""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Sets or unsets the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, it triggers a rollback when exiting the
innermost enclosing atomic block that has `savepoint=True` (that's the
default). Use this to force a rollback without raising an exception.
When `rollback` is `False`, it prevents such a rollback. Use this only
after rolling back to a known-good state! Otherwise, you break the atomic
block and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
def on_commit(func, using=None):
"""
Register `func` to be called when the current transaction is committed.
If the current transaction is rolled back, `func` will not be called.
"""
get_connection(using).on_commit(func)
#################################
# Decorators / context managers #
#################################
class Atomic(ContextDecorator):
"""
This class guarantees the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@oa` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
This is a private API.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def __enter__(self):
connection = get_connection(self.using)
if not connection.in_atomic_block:
# Reset state when entering an outermost atomic block.
connection.commit_on_exit = True
connection.needs_rollback = False
if not connection.get_autocommit():
# Some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# Turning autocommit back on isn't an option; it would trigger
# a premature commit. Give up if that happens.
if connection.features.autocommits_when_autocommit_is_off:
raise TransactionManagementError(
"Your database backend doesn't behave properly when "
"autocommit is off. Turn it on before using 'atomic'.")
# Pretend we're already in an atomic block to bypass the code
# that disables autocommit to enter a transaction, and make a
# note to deal with this case in __exit__.
connection.in_atomic_block = True
connection.commit_on_exit = False
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
connection.set_autocommit(False, force_begin_transaction_with_broken_autocommit=True)
connection.in_atomic_block = True
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if connection.savepoint_ids:
sid = connection.savepoint_ids.pop()
else:
# Prematurely unset this flag to allow using commit or rollback.
connection.in_atomic_block = False
try:
if connection.closed_in_transaction:
# The database will perform a rollback by itself.
# Wait until we exit the outermost block.
pass
elif exc_type is None and not connection.needs_rollback:
if connection.in_atomic_block:
# Release savepoint if there is one
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
raise
else:
# Commit transaction
try:
connection.commit()
except DatabaseError:
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
raise
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.in_atomic_block:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
if sid is None:
connection.needs_rollback = True
else:
try:
connection.savepoint_rollback(sid)
# The savepoint won't be reused. Release it to
# minimize overhead for the database server.
connection.savepoint_commit(sid)
except Error:
# If rolling back to a savepoint fails, mark for
# rollback at a higher level and avoid shadowing
# the original exception.
connection.needs_rollback = True
else:
# Roll back transaction
try:
connection.rollback()
except Error:
# An error during rollback means that something
# went wrong with the connection. Drop it.
connection.close()
finally:
# Outermost block exit when autocommit was enabled.
if not connection.in_atomic_block:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.set_autocommit(True)
# Outermost block exit when autocommit was disabled.
elif not connection.savepoint_ids and not connection.commit_on_exit:
if connection.closed_in_transaction:
connection.connection = None
else:
connection.in_atomic_block = False
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def _non_atomic_requests(view, using):
try:
view._non_atomic_requests.add(using)
except AttributeError:
view._non_atomic_requests = {using}
return view
def non_atomic_requests(using=None):
if callable(using):
return _non_atomic_requests(using, DEFAULT_DB_ALIAS)
else:
if using is None:
using = DEFAULT_DB_ALIAS
return lambda view: _non_atomic_requests(view, using)
| mit |
rpwagner/tiled-display | flapp/movie/imageMovieReader.py | 1 | 7164 | import os
import Image
import traceback
from flapp.movie.utils import GetMovieFPS, GetMovieDuration
import mmap
import resource
class ImageMovieReader:
ST_STOP = 0
ST_PLAY = 1
ST_PAUSE = 2
def __init__(self, quiet = False):
self.path = ""
self.fileLoaded = False
self.playingFrame = None
self.state = self.ST_STOP
self.currentFrameIndex = 0
# self.duration = 0.0
self.defaultFps = 24.0
self.allowFrameSkip=False # whether frames should be skipped to stay in time
self.fps = self.defaultFps
self.discoverFps=True # whether fps should be discovered automatically.
# optional since currently done externally.
self.movieTime = 0.0
self.supportedFormats = [".png", ".jpg"]
self.quiet = quiet
def setPath(self, path):
self.path = path
setFile = setPath
def getSize(self):
return (self.width, self.height)
def preloadFrames(self):
# Allow more open files than the default
#resource.setrlimit(resource.RLIMIT_NOFILE, (-1,-1)) # -1 means the max possible
#print "FILE LIMIT:", resource.getrlimit(resource.RLIMIT_NOFILE)
softLimit, hardLimit = resource.getrlimit(resource.RLIMIT_NOFILE)
limit = min(softLimit, hardLimit) - 20
self.files = self.files[:limit-20]
self.preloadedFrames = []
print "preloading ", len(self.files), "files."
for i in range(len(self.files)):
path = os.path.join(self.path, self.files[i])
#self.preloadedFrames.append(open(path).read())
f = open(path, "r+b")
memFile = mmap.mmap(f.fileno(), 0)
memFile.seek(0)
# memFile.close()
self.preloadedFrames.append( memFile )
f.close()
def readOneFrame(self, index):
if self.usePreload:
print "INDEX:", index
img = Image.open(self.preloadedFrames[index])
return img
else:
path = os.path.join(self.path, self.files[index % len(self.files)])
img = Image.open(path)
return img
getFrameByIndex=readOneFrame
def getNextFrame(self):
return self.readOneFrame(self.currentFrameIndex)
def loadFile(self, path=None, preload=False):
if path:
self.path = path
if not os.path.exists(self.path):
raise Exception("Path does not exist: %s" % self.path )
allFiles = os.listdir(self.path)
self.files = []
for f in allFiles:
if os.path.splitext(f.lower())[1] in self.supportedFormats:
self.files.append(f)
self.files.sort()
print "Number of frames in movie:", len(self.files)
self.usePreload=preload
if self.usePreload:
self.preloadFrames()
self.frame = self.readOneFrame(0)
if self.frame.mode == "P":
self.frame = self.frame.convert("RGBA")
# print dir(self.stream.GetFrameNo(0))
self.width, self.height = self.frame.size
self.fileLoaded = True
self.movieTime = 0.0
self.currentFrameIndex = 0 # frame 0 gotten a few lines above
# try to read duration and FPS. Should eventually be done internally
# with additions to pyffmpeg.
self.fps = self.defaultFps
if self.discoverFps:
try:
self.fps = GetMovieFPS(self.filename)
except:
print "Warning: unable to determine movie FPS, using default."
traceback.print_exc()
#try:
# self.duration = GetMovieDuration(self.filename)
#except:
# print "Warning: unable to determine movie duration (not necessary)"
def has_alpha(self):
return False # for now, maybe later check surface or movie if we play a transparent movie.
def play(self):
self.state = self.ST_PLAY
start = play
def getNumImageChannels(self):
return 4;
def rewindMovie(self):
self.movieTime = 0.0
self.currentFrameIndex = 0
if not self.quiet:
print "Movie loop restarted"
def getNumFrames(self):
return len(self.files)
def getFrame(self):
try:
#img = self.stream.GetFrameTime(self.movieTime)
# given movieTime and fps
targetFrame = int(self.fps * self.movieTime)
#print "Current frame, target frame, movieTime:", self.currentFrameIndex, targetFrame, self.movieTime
if targetFrame > self.currentFrameIndex:
if (False == self.allowFrameSkip) or targetFrame-self.currentFrameIndex < 4 :
# it can take two frames worth of time to seek, so only do
# it if playback is more than a frame or two too slow.
#print "next frame"
img = self.getNextFrame()
self.currentFrameIndex += 1
# import time
# time.sleep(0.5) # for debugging
else:
#print "seek frame:", self.currentFrameIndex, targetFrame,"seek offset:", targetFrame - self.currentFrameIndex
self.currentFrameIndex = targetFrame
img = self.getFrameByIndex(self.currentFrameIndex)
# print "Got frame no:", self.currentFrameIndex, targetFrame, img
else:
#print "keep frame", self.currentFrameIndex, targetFrame
img = None
except IOError:
self.rewindMovie()
if self.currentFrameIndex >= len(self.files):
self.rewindMovie()
# print "exiting getFrame, movieTime:", self.movieTime
if img != None:
# img = img.transpose(Image.FLIP_TOP_BOTTOM)
return img.tostring()
else:
return None
#return img
"""
if frameNumber != self.playingFrame: # new frame so change the displayed image
self.rawFrameData = pygame.image.tostring(self.moviePygameSurface,'RGB',1)
self.rawFrameData = [x for x in self.rawFrameData]
self.rawFrameData = "".join(self.rawFrameData)
#print "raw data len:", len(self.rawFrameData)
#print "Raw data:", [ord(x) for x in self.rawFrameData]
#imagestr = [chr(int(x/3./1022./762.*255 / (x%3+1))) for x in xrange(1022*762*3)]
#imagestr = "".join(imagestr)
#return imagestr
return self.rawFrameData
"""
def update(self, secs, app):
if not self.fileLoaded:
self.loadFile()
print "LOADED MOVIE FILE"
return
if self.fileLoaded and self.state == self.ST_PLAY:
self.movieTime += secs
def getMovieTime(self):
return self.movieTime
def setMovieTime(self, movieTime):
self.movieTime = movieTime
def setAllowFrameSkip(self, boolValue=True):
self.allowFrameSkip = bool(boolValue)
def setFps(self, fps):
self.fps = fps
| apache-2.0 |
chainer/chainer | chainer/functions/activation/relu.py | 3 | 4736 | import numpy
import chainer
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
import chainerx
if cuda.available:
_relu_grad2_kernel = cuda.elementwise(
'T y, T gy', 'T gx',
'gx = y > 0 ? gy : (T)0', 'relu_bwd')
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
_mode = cuda.cuda.cudnn.CUDNN_ACTIVATION_RELU # type: ignore
class ReLU(function_node.FunctionNode):
"""Rectified Linear Unit."""
is_elementwise = True
_use_cudnn = False
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_chainerx(self, inputs):
x, = inputs
return chainerx.maximum(x, 0),
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
return self.forward_ideep(inputs)
x, = inputs
y = numpy.maximum(x, 0, dtype=x.dtype)
self.retain_outputs((0,))
return utils.force_array(y),
def forward_ideep(self, inputs):
x, = inputs
y = intel64.ideep.relu.Forward(intel64.ideep.array(x))
self.retain_outputs((0,))
return y,
def forward_gpu(self, inputs):
x, = inputs
if chainer.should_use_cudnn('>=auto') and x.flags.c_contiguous:
self._use_cudnn = True
y = cudnn.activation_forward(x, _mode)
else:
y = cuda.cupy.maximum(x, 0, dtype=x.dtype)
self.retain_outputs((0,))
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
y, = self.get_retained_outputs()
y_arr = y.raw_array
if self._use_cudnn and chainer.should_use_cudnn('>=auto'):
# cuDNN implementation
return ReLUGradCudnn(y_arr).apply((gy,))
# Generic implementation
return ReLUGrad2(y_arr).apply((gy,))
class ReLUGrad2(function_node.FunctionNode):
"""Computes the gradient of the ReLU function.
This function takes 2 variables b and c, and
computes f(b, c) = sign(b) * c with backpropagation
where operations are done in elementwise manner
and sign(x) = 1 when x > 0 is positive and 0 otherwise.
As the gradient of f with respect to b is 0,
we do not backpropagate errors toward b for computational efficiency.
"""
def __init__(self, b):
super(ReLUGrad2, self).__init__()
self.b = b
def forward_cpu(self, inputs):
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(inputs)):
return self.forward_ideep(inputs)
gy, = inputs
gx = gy * (self.b > 0)
return utils.force_array(gx, dtype=gy.dtype),
def forward_ideep(self, inputs):
gy, = inputs
gx = intel64.ideep.relu.Backward(
intel64.ideep.array(self.b),
intel64.ideep.array(gy))
return gx,
def forward_gpu(self, inputs):
gx = _relu_grad2_kernel(self.b, inputs[0])
return gx,
def backward(self, indexes, grad_outputs):
return ReLUGrad2(self.b).apply(grad_outputs)
class ReLUGradCudnn(function_node.FunctionNode):
"""Computes the gradient of the ReLU function.
This function takes 3 variables a, b, and c, and
computes f(a, b, c) = sign(b) * c with backpropagation
where operations are dones in elementwise manner
and sign(x) = 1 if x > 0 is positive and 0 otherwise.
As the gradient of f with respect to a and b are 0,
we do not backpropagate errors toward them for computational efficiency.
"""
is_elementwise = True
def __init__(self, y):
super(ReLUGradCudnn, self).__init__()
self.y = y
def forward(self, inputs):
gy, = inputs
return cudnn.activation_backward(self.y, self.y, gy, _mode),
def backward(self, indexes, grad_outputs):
return ReLUGrad2(self.y).apply(grad_outputs)
def relu(x):
"""Rectified Linear Unit function.
.. math:: f(x)=\\max(0, x).
Args:
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
Returns:
~chainer.Variable: Output variable. A
:math:`(s_1, s_2, ..., s_N)`-shaped float array.
.. admonition:: Example
>>> x = np.array([[-1, 0], [2, -3], [-2, 1]], np.float32)
>>> np.any(x < 0)
True
>>> y = F.relu(x)
>>> np.any(y.array < 0)
False
>>> y.shape
(3, 2)
"""
y, = ReLU().apply((x,))
return y
| mit |
Kilhog/odoo | addons/website/models/res_config.py | 240 | 2660 |
from openerp.osv import fields, osv
class website_config_settings(osv.osv_memory):
_name = 'website.config.settings'
_inherit = 'res.config.settings'
_columns = {
'website_id': fields.many2one('website', string="website", required=True),
'website_name': fields.related('website_id', 'name', type="char", string="Website Name"),
'language_ids': fields.related('website_id', 'language_ids', type='many2many', relation='res.lang', string='Languages'),
'default_lang_id': fields.related('website_id', 'default_lang_id', type='many2one', relation='res.lang', string='Default language'),
'default_lang_code': fields.related('website_id', 'default_lang_code', type="char", string="Default language code"),
'google_analytics_key': fields.related('website_id', 'google_analytics_key', type="char", string='Google Analytics Key'),
'social_twitter': fields.related('website_id', 'social_twitter', type="char", string='Twitter Account'),
'social_facebook': fields.related('website_id', 'social_facebook', type="char", string='Facebook Account'),
'social_github': fields.related('website_id', 'social_github', type="char", string='GitHub Account'),
'social_linkedin': fields.related('website_id', 'social_linkedin', type="char", string='LinkedIn Account'),
'social_youtube': fields.related('website_id', 'social_youtube', type="char", string='Youtube Account'),
'social_googleplus': fields.related('website_id', 'social_googleplus', type="char", string='Google+ Account'),
}
def on_change_website_id(self, cr, uid, ids, website_id, context=None):
website_data = self.pool.get('website').read(cr, uid, [website_id], [], context=context)[0]
values = {'website_name': website_data['name']}
for fname, v in website_data.items():
if fname in self._columns:
values[fname] = v[0] if v and self._columns[fname]._type == 'many2one' else v
return {'value' : values}
# FIXME in trunk for god sake. Change the fields above to fields.char instead of fields.related,
# and create the function set_website who will set the value on the website_id
# create does not forward the values to the related many2one. Write does.
def create(self, cr, uid, vals, context=None):
config_id = super(website_config_settings, self).create(cr, uid, vals, context=context)
self.write(cr, uid, config_id, vals, context=context)
return config_id
_defaults = {
'website_id': lambda self,cr,uid,c: self.pool.get('website').search(cr, uid, [], context=c)[0],
}
| agpl-3.0 |
julienvaslet/beer | shell/shell.py | 1 | 16453 | # -*- coding: utf-8 -*-
import sys
import re
import os
try:
import termios
import tty
from . import keys_unix as keys
SHELL_SYSTEM = "unix"
except ImportError:
import msvcrt
from . import keys_windows as keys
SHELL_SYSTEM = "windows"
# Switch to UTF-8 output encoding
# And reset the terminal window
if sys.stdout.encoding != "cp65001":
os.system( "echo off" )
os.system( "chcp 65001" )
sys.stdout.write( "\x1b[A" )
sys.stdout.flush()
from . import commands
from language import Language
WORD_SEPARATORS = [ " ", ",", ".", ";", ":", "!", "+", "-", "*", "/", "\\", "=", "(", ")", "{", "}", "[", "]", "^", "&", "|", ">", "<" ]
class Shell():
"""Represents an interactive command interpreter.
Verbosities are:
- 0: quiet
- 1: normal
- 2: user debug
- 3: shell debug
Attributes:
- (str) _title: Title of shell prompts.
- (int) _width: Width of the shell (default: 79).
- (bool) _running: Status of the shell session.
- (int) _verbosity: Level of verbosity (default: 1).
- (dict) _commands: Dictionary of registered commands.
"""
def __init__( self, title="", verbosity=1 ):
"""Initialize a new shell."""
# Load localized strings
Language.load( "shell.ini" )
self._title = title
self._width = 79
self._running = False
self._verbosity = verbosity
self._commands = {}
self.add_command( commands.Exit() )
self.add_command( commands.Help() )
def error( self, message, code=0 ):
"""Prints an error message to the standard error output.
Prints an error message to the standard error output. If an error code
greater than 0 is passed, it is prefixed to the message.
Parameters:
- (str) message: The error message to print.
- (int) code: The error code associated to the message (default: 0)
"""
if code > 0:
message = Language.get( Shell, "error_number" ) % ( code, message )
print( "[!] %s" % message, file=sys.stderr )
def log( self, message, level=1 ):
"""Prints an informative message to the shell output.
Prints an informative message to the shell output. If the level is
lower than the shell verbosity, the message is ignored.
Parameters:
- (str) message: The message to print.
- (int) level: The level of verbosity of the message (default: 1).
"""
if level <= self._verbosity:
self.print( message, left_text="[*]", lpad=4 )
def warn( self, message, level=1 ):
"""Prints a warning message to the shell output.
Prints a warning message to the shell output. If the level is
lower than the shell verbosity, the message is ignored.
Parameters:
- (str) message: The warning message to print.
- (int) level: The level of verbosity of the message (default: 1).
"""
if level <= self._verbosity:
self.print( message, left_text="[!]", lpad=4 )
def exit( self ):
"""Ends the shell session."""
self._running = False
def getch( self ):
"""Reads a character from user standard input.
Reads a character or a sequence from the user standard input without
printing it. Sequences are read to correctly get HOME, END, TAB, etc.
keys and unicode characters.
Returns:
- bytes -- the raw bytes sequence read.
"""
sequence = b""
escape_regex = re.compile( keys.escape_regex )
if SHELL_SYSTEM == "unix":
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr( fd )
new_settings = termios.tcgetattr( fd )
new_settings[3] = new_settings[3] & ~termios.ICANON & ~termios.ECHO
new_settings[6][termios.VMIN] = 1
new_settings[6][termios.VTIME] = 0
termios.tcsetattr( fd, termios.TCSANOW, new_settings )
try:
complete = False
while not complete:
sequence += os.read( fd, 1 )
if not escape_regex.fullmatch( sequence ):
complete = True
finally:
termios.tcsetattr( fd, termios.TCSADRAIN, old_settings )
# Windows case
else:
complete = False
while not complete:
s = msvcrt.getch()
if s == b'\r':
s = b'\n'
sequence += s
if not escape_regex.fullmatch( sequence ):
complete = True
return sequence
def autocomplete( self, line ):
"""Gets the autocompletion choices according to the current command line.
Parameters:
- (str) line: The current command line.
Returns:
- list -- the current available choices.
"""
choices = []
args = self.parse_line( line, keep_trailing_space=True )
if len(args) == 1:
for command_name in self._commands:
if command_name[:len(args[0])] == args[0]:
choices.append( command_name )
elif len(args) > 1:
command = self.get_command( args[0] )
if command != None:
choices = command.autocomplete( self, args )
return choices
def parse_line( self, line, keep_trailing_space=False ):
"""Parses the specified command line into an arguments array.
Parses the specified command line into an arguments array. If the
keep_trailing_space boolean is set and the command line ends with spaces,
an empty string is added to the arguments list.
Parameters:
- (str) line: The command line.
- (bool) keep_trailing_space: Keep trailing spaces (default: False).
Returns:
- list -- the arguments.
"""
args = []
matches = re.findall( r'"([^"]*)"|([^\s]+)', line )
for match in matches:
args.append( match[0] if len( match[0] ) else match[1] )
if keep_trailing_space:
if re.search( r"[^\s]+\s+$", line ) and keep_trailing_space:
args.append( "" )
return args
def input( self, prompt="" ):
"""Reads a command line from the user input.
Reads a command line from the user input. Arrow keys, Home, End and Tab
keys are intercepted to provide input navigation and autocompletion.
Parameters:
- (str) prompt: The prompt message.
Returns:
- str -- the read command line.
"""
line = ""
line_index = 0
last_line_index = 0
last_length = 0
line_read = False
rewrite_line = False
should_beep = False
new_line = True
while not line_read:
# Print the line to the console
if new_line:
output = prompt + line + "\b" * (len(line) - line_index)
os.write( sys.stdout.fileno(), output.encode() )
new_line = False
elif rewrite_line:
output = ("\b" * last_line_index) + line
if last_length > len(line):
output += " " * (last_length - len(line))
output += "\b" * (last_length - len(line))
output += "\b" * (len(line) - line_index)
os.write( sys.stdout.fileno(), output.encode() )
# Emits console beep
elif should_beep:
os.write( sys.stdout.fileno(), b"\x07" )
rewrite_line = False
should_beep = False
last_line_index = line_index
last_length = len(line)
key = self.getch()
# End of line
if key == keys.ENTER:
line_read = True
# Home
elif key == keys.HOME:
line_index = 0
rewrite_line = True
# End
elif key == keys.END:
line_index = len(line)
rewrite_line = True
# Tabulation
elif key == keys.TABULATION:
choices = self.autocomplete( line )
if len(choices) > 0:
# Prettify line
args = self.parse_line( line, keep_trailing_space=True )
line = " ".join( args )
characters_to_be_replaced = 1
while characters_to_be_replaced <= len(line) and line[-characters_to_be_replaced:].lower() != choices[0][:characters_to_be_replaced].lower():
characters_to_be_replaced += 1
# There is no concordance
if characters_to_be_replaced > len(line):
characters_to_be_replaced = 0
# Check for the full replacement (repetitive character/pattern cases)
else:
while characters_to_be_replaced + 1 < len(line) and line[-(characters_to_be_replaced + 1):].lower() == choices[0][:characters_to_be_replaced + 1].lower():
characters_to_be_replaced += 1
if len(choices) == 1:
#print( "\n", choices, line, len( line), characters_to_be_replaced )
line = line[:(len(line)-characters_to_be_replaced)] + choices[0] + " "
line_index = len(line)
rewrite_line = True
else:
# Partial autocompletion
similar_characters = 0
while similar_characters < len(choices[0]):
is_similar = True
for choice in choices:
if choice[similar_characters] != choices[0][similar_characters]:
is_similar = False
break
if is_similar:
similar_characters += 1
else:
break
if similar_characters > 1 and similar_characters > characters_to_be_replaced:
line = line[:(len(line)-characters_to_be_replaced)] + choices[0][:similar_characters]
line_index = len(line)
rewrite_line = True
# Prints available choices
max_choice_length = 0
for choice in choices:
if len(choice) > max_choice_length:
max_choice_length = len(choice)
max_choice_length += 2
choice_line_length = 0
output = "\n"
for choice in choices:
if choice_line_length + max_choice_length > self._width:
choice_line_length = 0
output += "\n"
output += choice.ljust( max_choice_length )
choice_line_length += max_choice_length
output += "\n"
os.write( sys.stdout.fileno(), output.encode() )
new_line = True
else:
should_beep = True
# Up
#elif key == "\x1b[A":
# print( "UUUUP" )
# Down
#elif key == "\x1b[B":
# print( "DOOOWN" )
# Left
elif key == keys.LEFT:
if line_index > 0:
line_index -= 1
rewrite_line = True
else:
should_beep = True
# Ctrl+Left
# Jump at the beginning of the word
elif key == keys.CTRL_LEFT:
# Purge separators
while line_index > 0 and line[line_index - 1] in WORD_SEPARATORS:
line_index -= 1
# Jump at the beginning of current word
while line_index > 0 and line[line_index - 1] not in WORD_SEPARATORS:
line_index -= 1
rewrite_line = True
# Right
elif key == keys.RIGHT:
if line_index < len(line):
line_index += 1
rewrite_line = True
else:
should_beep = True
# Ctrl+Right
# Jump at the end of the word
elif key == keys.CTRL_RIGHT:
# Purge separators
while line_index < len(line) and line[line_index] in WORD_SEPARATORS:
line_index += 1
# Jump at the next separator
while line_index < len(line) and line[line_index] not in WORD_SEPARATORS:
line_index += 1
rewrite_line = True
# Backspace
elif key == keys.BACKSPACE:
if len(line) > 0 and line_index > 0:
line = line[:line_index - 1] + line[line_index:]
line_index -= 1
rewrite_line = True
else:
should_beep = True
# Delete
elif key == keys.DELETE:
if len(line) > 0 and line_index < len(line):
line = line[:line_index] + line[line_index + 1:]
rewrite_line = True
else:
should_beep = True
# Printable character
# Temporary test to catch unicode sequences
elif (len(key) == 1 and ord(key) >= 32) or (len(key) == 2 and key[0] in [keys.UNICODE1[0], keys.UNICODE2[0]]):
try:
key = key.decode( "utf-8", "replace" )
line = line[:line_index] + key + line[line_index:]
line_index += 1
rewrite_line = True
except UnicodeDecodeError:
continue
os.write( sys.stdout.fileno(), b"\n" )
return line
def print( self, message, end="\n", left_text="", lpad=0, break_words=False, justify_text=True ):
"""Prints a message to the shell output.
Prints a message to the shell output. This message could be left-padded
in order to indent it and a message can be added into the left-padding.
Parameters:
- (str) message: The message to print.
- (str) end: The end of message separator (default "\n").
- (str) left_text: The text printed in the left padding (default: "").
- (int) lpad: The left padding width (default: 0).
- (bool) break_words: Break too long words at end of lines (default: False).
- (bool) justify_text: Change the words spacing to fit the shell width (default: True).
Returns:
- int -- the number of lines printed.
"""
line_length = self._width - lpad
lines_printed = 0
for msg in re.split( r"\n", message ):
if len( msg ) == 0:
print( "", end=end )
lines_printed += 1
else:
i = 0
while i < len( msg ):
pad = left_text if i == 0 else ""
current_line_length = line_length if i + line_length < len(msg) else len(msg) - i
if not break_words and i + current_line_length < len(msg):
while current_line_length > 0 and msg[i+current_line_length] != " ":
current_line_length -= 1
# Line does not contains any spaces, words are breaked.
if current_line_length == 0:
current_line_length = line_length
line = msg[i:i+current_line_length].strip()
i += current_line_length
# Justify active, smaller line than shell with, and not the last one.
if justify_text and len(line) < line_length and i < len(msg):
spaces_to_add = line_length - len(line)
spaces = len(re.findall( r"\s+", line ))
extended_spaces = len(re.findall( r"[,\.\?!;:]\s", line ))
if spaces > 0:
# Extra spaces are first equally distributed
if spaces_to_add > spaces:
space_width = spaces_to_add // spaces
spaces_to_add = spaces_to_add % spaces
line = re.sub( r"(\s+)", r"\1%s" % ( " " * space_width ), line )
# Remaining spaces
if spaces_to_add > 0:
if extended_spaces > 0:
line = re.sub( r"([,\.\?!;:]\s)", r"\1 ", line, count=spaces_to_add )
spaces_to_add -= extended_spaces
# Last spaces oddly added.
if spaces_to_add > 0:
line = re.sub( r"(\s+)", r"\1 ", line, count=spaces_to_add )
print( "%s%s" % ( pad.ljust( lpad ), line ), end=end )
lines_printed += 1
return lines_printed
def add_command( self, command ):
"""Adds a command to the shell.
Parameters:
- (shell.commands.Command) command: The command to add.
"""
if isinstance( command, commands.Command ):
if command.name in self._commands:
self.log( Language.get( Shell, "replacing_command" ) % command.name, level=3 )
else:
self.log( Language.get( Shell, "loading_command" ) % command.name, level=3 )
self._commands[command.name] = command
for alias in command.aliases:
if alias not in self._commands:
self.log( Language.get( Shell, "adding_alias" ) % ( alias, command.name ), level=3 )
self._commands[alias] = command.name
else:
self.log( Language.get( Shell, "ignoring_alias" ) % alias, level=3 )
else:
self.error( Language.get( Shell, "command_not_loaded" ) )
def get_command( self, command_name ):
command = None
if command_name in self._commands:
# Avoid cyclic-dependencies
tested_names = []
while isinstance( self._commands[command_name], str ) and command_name not in tested_names:
tested_names.append( command_name )
command_name = self._commands[command_name]
if isinstance( self._commands[command_name], commands.Command ):
command = self._commands[command_name]
return command
def execute( self, args=[] ):
"""Executes a parsed command line.
Parameters:
- (list) args: The parsed command line.
"""
command = self.get_command( args[0] )
if command != None:
command.run( self, args )
else:
self.error( Language.get( Shell, "unknown_command" ) % args[0] )
def run( self, args=[] ):
"""Launches the shell session.
Parameters:
- (list) args: Arguments passed to the shell.
"""
# Shell mode
if len(args) == 0:
self._running = True
while self._running:
try:
commandline = self.input( "%s > " % self._title )
args = self.parse_line( commandline )
if len( args ) == 0:
continue
self.execute( args )
except KeyboardInterrupt:
print()
self.log( Language.get( Shell, "interrupt_by_user" ), level=0 )
self.exit()
# Single command execution
else:
self.execute( args )
| gpl-3.0 |
qskycolor/viewfinder | backend/www/test/update_user_photo_test.py | 13 | 4568 | # Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Tests update_user_photo method.
"""
__author__ = ['[email protected] (Ben Darnell)']
import time
from copy import deepcopy
from viewfinder.backend.base import util
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.user_photo import UserPhoto
from viewfinder.backend.www.test import service_base_test
class UpdateUserPhotoTestCase(service_base_test.ServiceBaseTestCase):
def testUpdateUserPhoto(self):
"""Assign different asset keys to the same photo from different users."""
episode_id, photo_id = self._UploadEpisodeWithPhoto()
# Assign an asset key for user 1
self._tester.UpdateUserPhoto(self._cookie, photo_id, asset_keys=['a/#asset-key-1'])
# User 2 doesn't own the photo (and doesn't even have access to it!) but can still set asset keys.
self._tester.UpdateUserPhoto(self._cookie2, photo_id, asset_keys=['a/#asset-key-2'])
# User 2 can't read the episode yet.
self.assertEqual([],
self._tester.QueryEpisodes(self._cookie2,
[{'episode_id': episode_id, 'get_photos': True}])['episodes'])
# Share the episode with user 2, and then try fetching the asset key
vp_id, new_ep_ids = self._tester.ShareNew(self._cookie, [(episode_id, [photo_id])], [self._user2.user_id])
self.assertEqual(sorted(self._GetAssetKeys(self._cookie2, new_ep_ids[0])), ['a/#asset-key-2'])
def testReplaceUserPhoto(self):
"""Change the asset keys associated with a user/photo."""
episode_id, photo_id = self._UploadEpisodeWithPhoto()
self._tester.UpdateUserPhoto(self._cookie, photo_id, asset_keys=['a/#asset-key-1'])
self.assertEqual(self._GetAssetKeys(self._cookie, episode_id), ['a/#asset-key-1'])
self._tester.UpdateUserPhoto(self._cookie, photo_id, asset_keys=['a/#asset-key-1', 'a/#asset-key-2'])
self.assertEqual(sorted(self._GetAssetKeys(self._cookie, episode_id)), ['a/#asset-key-1', 'a/#asset-key-2'])
# Asset keys are append-only; an empty update doesn't remove what's there.
self._tester.UpdateUserPhoto(self._cookie, photo_id, asset_keys=[])
self.assertEqual(sorted(self._GetAssetKeys(self._cookie, episode_id)), ['a/#asset-key-1', 'a/#asset-key-2'])
def _GetAssetKeys(self, cookie, ep_id):
episodes = self._tester.QueryEpisodes(cookie, [{'episode_id': ep_id, 'get_photos': True}])
photo = episodes['episodes'][0]['photos'][0]
return photo.get('asset_keys')
def _UploadEpisodeWithPhoto(self):
"""Create episode with photo and upload.
Returns: photo_id of created photo.
"""
timestamp = time.time()
episode_id = Episode.ConstructEpisodeId(timestamp, self._device_ids[0], 100)
ep_dict = {'episode_id': episode_id,
'timestamp': timestamp,
'title': 'Episode Title'}
photo_id = Photo.ConstructPhotoId(timestamp, self._device_ids[0], 100)
ph_dict = {'aspect_ratio': 1.3333,
'timestamp': timestamp,
'tn_md5': util.ComputeMD5Hex('thumbnail image data'),
'med_md5': util.ComputeMD5Hex('medium image data'),
'full_md5': util.ComputeMD5Hex('full image data'),
'orig_md5': util.ComputeMD5Hex('original image data'),
'tn_size': 5*1024,
'med_size': 10*1024,
'full_size': 150*1024,
'orig_size': 1200*1024,
'caption': 'a photo',
'photo_id': photo_id}
self._tester.UploadEpisode(self._cookie, ep_dict, [ph_dict])
return episode_id, photo_id
def _TestUpdateUserPhoto(tester, user_cookie, request_dict):
validator = tester.validator
user_id, device_id = tester.GetIdsFromCookie(user_cookie)
request_dict = deepcopy(request_dict)
actual_dict = tester.SendRequest('update_user_photo', user_cookie, request_dict)
existing = validator.GetModelObject(UserPhoto, DBKey(user_id, request_dict['photo_id']), must_exist=False)
if existing is None:
asset_keys = request_dict['asset_keys']
else:
asset_keys = set(request_dict['asset_keys'])
asset_keys.update(existing.asset_keys)
up_dict = {'user_id': user_id,
'photo_id': request_dict['photo_id'],
'asset_keys': asset_keys}
validator.ValidateUpdateDBObject(UserPhoto, **up_dict)
tester._CompareResponseDicts('update_user_photo', user_id, request_dict, {}, actual_dict)
return actual_dict
| apache-2.0 |
bobcyw/django | django/contrib/postgres/fields/array.py | 186 | 8424 | import json
from django.contrib.postgres import lookups
from django.contrib.postgres.forms import SimpleArrayField
from django.contrib.postgres.validators import ArrayMaxLengthValidator
from django.core import checks, exceptions
from django.db.models import Field, IntegerField, Transform
from django.utils import six
from django.utils.translation import string_concat, ugettext_lazy as _
from .utils import AttributeSetter
__all__ = ['ArrayField']
class ArrayField(Field):
empty_strings_allowed = False
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
'nested_array_mismatch': _('Nested arrays must have the same length.'),
}
def __init__(self, base_field, size=None, **kwargs):
self.base_field = base_field
self.size = size
if self.size:
self.default_validators = self.default_validators[:]
self.default_validators.append(ArrayMaxLengthValidator(self.size))
super(ArrayField, self).__init__(**kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super(ArrayField, self).contribute_to_class(cls, name, **kwargs)
self.base_field.model = cls
def check(self, **kwargs):
errors = super(ArrayField, self).check(**kwargs)
if self.base_field.remote_field:
errors.append(
checks.Error(
'Base field for array cannot be a related field.',
hint=None,
obj=self,
id='postgres.E002'
)
)
else:
# Remove the field name checks as they are not needed here.
base_errors = self.base_field.check()
if base_errors:
messages = '\n '.join('%s (%s)' % (error.msg, error.id) for error in base_errors)
errors.append(
checks.Error(
'Base field for array has errors:\n %s' % messages,
hint=None,
obj=self,
id='postgres.E001'
)
)
return errors
def set_attributes_from_name(self, name):
super(ArrayField, self).set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
@property
def description(self):
return 'Array of %s' % self.base_field.description
def db_type(self, connection):
size = self.size or ''
return '%s[%s]' % (self.base_field.db_type(connection), size)
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, list) or isinstance(value, tuple):
return [self.base_field.get_db_prep_value(i, connection, prepared) for i in value]
return value
def deconstruct(self):
name, path, args, kwargs = super(ArrayField, self).deconstruct()
if path == 'django.contrib.postgres.fields.array.ArrayField':
path = 'django.contrib.postgres.fields.ArrayField'
kwargs.update({
'base_field': self.base_field,
'size': self.size,
})
return name, path, args, kwargs
def to_python(self, value):
if isinstance(value, six.string_types):
# Assume we're deserializing
vals = json.loads(value)
value = [self.base_field.to_python(val) for val in vals]
return value
def value_to_string(self, obj):
values = []
vals = self.value_from_object(obj)
base_field = self.base_field
for val in vals:
obj = AttributeSetter(base_field.attname, val)
values.append(base_field.value_to_string(obj))
return json.dumps(values)
def get_transform(self, name):
transform = super(ArrayField, self).get_transform(name)
if transform:
return transform
try:
index = int(name)
except ValueError:
pass
else:
index += 1 # postgres uses 1-indexing
return IndexTransformFactory(index, self.base_field)
try:
start, end = name.split('_')
start = int(start) + 1
end = int(end) # don't add one here because postgres slices are weird
except ValueError:
pass
else:
return SliceTransformFactory(start, end)
def validate(self, value, model_instance):
super(ArrayField, self).validate(value, model_instance)
for i, part in enumerate(value):
try:
self.base_field.validate(part, model_instance)
except exceptions.ValidationError as e:
raise exceptions.ValidationError(
string_concat(self.error_messages['item_invalid'], e.message),
code='item_invalid',
params={'nth': i},
)
if isinstance(self.base_field, ArrayField):
if len({len(i) for i in value}) > 1:
raise exceptions.ValidationError(
self.error_messages['nested_array_mismatch'],
code='nested_array_mismatch',
)
def run_validators(self, value):
super(ArrayField, self).run_validators(value)
for i, part in enumerate(value):
try:
self.base_field.run_validators(part)
except exceptions.ValidationError as e:
raise exceptions.ValidationError(
string_concat(self.error_messages['item_invalid'], ' '.join(e.messages)),
code='item_invalid',
params={'nth': i},
)
def formfield(self, **kwargs):
defaults = {
'form_class': SimpleArrayField,
'base_field': self.base_field.formfield(),
'max_length': self.size,
}
defaults.update(kwargs)
return super(ArrayField, self).formfield(**defaults)
@ArrayField.register_lookup
class ArrayContains(lookups.DataContains):
def as_sql(self, qn, connection):
sql, params = super(ArrayContains, self).as_sql(qn, connection)
sql += '::%s' % self.lhs.output_field.db_type(connection)
return sql, params
@ArrayField.register_lookup
class ArrayContainedBy(lookups.ContainedBy):
def as_sql(self, qn, connection):
sql, params = super(ArrayContainedBy, self).as_sql(qn, connection)
sql += '::%s' % self.lhs.output_field.db_type(connection)
return sql, params
@ArrayField.register_lookup
class ArrayOverlap(lookups.Overlap):
def as_sql(self, qn, connection):
sql, params = super(ArrayOverlap, self).as_sql(qn, connection)
sql += '::%s' % self.lhs.output_field.db_type(connection)
return sql, params
@ArrayField.register_lookup
class ArrayLenTransform(Transform):
lookup_name = 'len'
output_field = IntegerField()
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return 'array_length(%s, 1)' % lhs, params
class IndexTransform(Transform):
def __init__(self, index, base_field, *args, **kwargs):
super(IndexTransform, self).__init__(*args, **kwargs)
self.index = index
self.base_field = base_field
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s]' % (lhs, self.index), params
@property
def output_field(self):
return self.base_field
class IndexTransformFactory(object):
def __init__(self, index, base_field):
self.index = index
self.base_field = base_field
def __call__(self, *args, **kwargs):
return IndexTransform(self.index, self.base_field, *args, **kwargs)
class SliceTransform(Transform):
def __init__(self, start, end, *args, **kwargs):
super(SliceTransform, self).__init__(*args, **kwargs)
self.start = start
self.end = end
def as_sql(self, compiler, connection):
lhs, params = compiler.compile(self.lhs)
return '%s[%s:%s]' % (lhs, self.start, self.end), params
class SliceTransformFactory(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __call__(self, *args, **kwargs):
return SliceTransform(self.start, self.end, *args, **kwargs)
| bsd-3-clause |
talishte/ctigre | env/lib/python2.7/site-packages/Crypto/Signature/__init__.py | 126 | 1202 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Digital signature protocols
A collection of standardized protocols to carry out digital signatures.
:undocumented: __revision__, __package__
"""
__all__ = [ 'PKCS1_v1_5', 'PKCS1_PSS' ]
__revision__ = "$Id$"
| bsd-2-clause |
meteorcloudy/bazel | tools/jdk/proguard_whitelister_test.py | 13 | 2988 | # Lint as: python2, python3
# Copyright 2015 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
# Do not edit this line. Copybara replaces it with PY2 migration helper.
import six
from tools.jdk import proguard_whitelister
class ProguardConfigValidatorTest(unittest.TestCase):
def _CreateValidator(self, input_path, output_path):
return proguard_whitelister.ProguardConfigValidator(input_path, output_path)
def testValidConfig(self):
input_path = os.path.join(
os.path.dirname(__file__), "proguard_whitelister_test_input.pgcfg")
tmpdir = os.environ["TEST_TMPDIR"]
output_path = os.path.join(tmpdir, "proguard_whitelister_test_output.pgcfg")
# This will raise an exception if the config is invalid.
self._CreateValidator(input_path, output_path).ValidateAndWriteOutput()
with open(output_path) as output:
self.assertTrue(("# Merged from %s" % input_path) in output.read())
def _TestInvalidConfig(self, invalid_args, config):
tmpdir = os.environ["TEST_TMPDIR"]
input_path = os.path.join(tmpdir, "proguard_whitelister_test_input.pgcfg")
with open(input_path, "w") as f:
f.write(six.ensure_str(config))
output_path = os.path.join(tmpdir, "proguard_whitelister_test_output.pgcfg")
validator = self._CreateValidator(input_path, output_path)
try:
validator.ValidateAndWriteOutput()
self.fail()
except RuntimeError as e:
for invalid_arg in invalid_args:
self.assertTrue(six.ensure_str(invalid_arg) in str(e))
def testInvalidNoteConfig(self):
self._TestInvalidConfig(["-dontnote"], """\
# We don"t want libraries disabling notes globally.
-dontnote""")
def testInvalidWarnConfig(self):
self._TestInvalidConfig(["-dontwarn"], """\
# We don"t want libraries disabling warnings globally.
-dontwarn""")
def testInvalidOptimizationConfig(self):
self._TestInvalidConfig(["-optimizations"], """\
# We don"t want libraries disabling global optimizations.
-optimizations !class/merging/*,!code/allocation/variable""")
def testMultipleInvalidArgs(self):
self._TestInvalidConfig(["-optimizations", "-dontnote"], """\
# We don"t want libraries disabling global optimizations.
-optimizations !class/merging/*,!code/allocation/variable
-dontnote""")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
vmg/hg-stable | hgext/convert/gnuarch.py | 94 | 12716 | # gnuarch.py - GNU Arch support for the convert extension
#
# Copyright 2008, 2009 Aleix Conchillo Flaque <[email protected]>
# and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from common import NoRepo, commandline, commit, converter_source
from mercurial.i18n import _
from mercurial import encoding, util
import os, shutil, tempfile, stat
from email.Parser import Parser
class gnuarch_source(converter_source, commandline):
class gnuarch_rev(object):
def __init__(self, rev):
self.rev = rev
self.summary = ''
self.date = None
self.author = ''
self.continuationof = None
self.add_files = []
self.mod_files = []
self.del_files = []
self.ren_files = {}
self.ren_dirs = {}
def __init__(self, ui, path, rev=None):
super(gnuarch_source, self).__init__(ui, path, rev=rev)
if not os.path.exists(os.path.join(path, '{arch}')):
raise NoRepo(_("%s does not look like a GNU Arch repository")
% path)
# Could use checktool, but we want to check for baz or tla.
self.execmd = None
if util.findexe('baz'):
self.execmd = 'baz'
else:
if util.findexe('tla'):
self.execmd = 'tla'
else:
raise util.Abort(_('cannot find a GNU Arch tool'))
commandline.__init__(self, ui, self.execmd)
self.path = os.path.realpath(path)
self.tmppath = None
self.treeversion = None
self.lastrev = None
self.changes = {}
self.parents = {}
self.tags = {}
self.catlogparser = Parser()
self.encoding = encoding.encoding
self.archives = []
def before(self):
# Get registered archives
self.archives = [i.rstrip('\n')
for i in self.runlines0('archives', '-n')]
if self.execmd == 'tla':
output = self.run0('tree-version', self.path)
else:
output = self.run0('tree-version', '-d', self.path)
self.treeversion = output.strip()
# Get name of temporary directory
version = self.treeversion.split('/')
self.tmppath = os.path.join(tempfile.gettempdir(),
'hg-%s' % version[1])
# Generate parents dictionary
self.parents[None] = []
treeversion = self.treeversion
child = None
while treeversion:
self.ui.status(_('analyzing tree version %s...\n') % treeversion)
archive = treeversion.split('/')[0]
if archive not in self.archives:
self.ui.status(_('tree analysis stopped because it points to '
'an unregistered archive %s...\n') % archive)
break
# Get the complete list of revisions for that tree version
output, status = self.runlines('revisions', '-r', '-f', treeversion)
self.checkexit(status, 'failed retrieving revisions for %s'
% treeversion)
# No new iteration unless a revision has a continuation-of header
treeversion = None
for l in output:
rev = l.strip()
self.changes[rev] = self.gnuarch_rev(rev)
self.parents[rev] = []
# Read author, date and summary
catlog, status = self.run('cat-log', '-d', self.path, rev)
if status:
catlog = self.run0('cat-archive-log', rev)
self._parsecatlog(catlog, rev)
# Populate the parents map
self.parents[child].append(rev)
# Keep track of the current revision as the child of the next
# revision scanned
child = rev
# Check if we have to follow the usual incremental history
# or if we have to 'jump' to a different treeversion given
# by the continuation-of header.
if self.changes[rev].continuationof:
treeversion = '--'.join(
self.changes[rev].continuationof.split('--')[:-1])
break
# If we reached a base-0 revision w/o any continuation-of
# header, it means the tree history ends here.
if rev[-6:] == 'base-0':
break
def after(self):
self.ui.debug('cleaning up %s\n' % self.tmppath)
shutil.rmtree(self.tmppath, ignore_errors=True)
def getheads(self):
return self.parents[None]
def getfile(self, name, rev):
if rev != self.lastrev:
raise util.Abort(_('internal calling inconsistency'))
# Raise IOError if necessary (i.e. deleted files).
if not os.path.lexists(os.path.join(self.tmppath, name)):
raise IOError
return self._getfile(name, rev)
def getchanges(self, rev):
self._update(rev)
changes = []
copies = {}
for f in self.changes[rev].add_files:
changes.append((f, rev))
for f in self.changes[rev].mod_files:
changes.append((f, rev))
for f in self.changes[rev].del_files:
changes.append((f, rev))
for src in self.changes[rev].ren_files:
to = self.changes[rev].ren_files[src]
changes.append((src, rev))
changes.append((to, rev))
copies[to] = src
for src in self.changes[rev].ren_dirs:
to = self.changes[rev].ren_dirs[src]
chgs, cps = self._rendirchanges(src, to)
changes += [(f, rev) for f in chgs]
copies.update(cps)
self.lastrev = rev
return sorted(set(changes)), copies
def getcommit(self, rev):
changes = self.changes[rev]
return commit(author=changes.author, date=changes.date,
desc=changes.summary, parents=self.parents[rev], rev=rev)
def gettags(self):
return self.tags
def _execute(self, cmd, *args, **kwargs):
cmdline = [self.execmd, cmd]
cmdline += args
cmdline = [util.shellquote(arg) for arg in cmdline]
cmdline += ['>', os.devnull, '2>', os.devnull]
cmdline = util.quotecommand(' '.join(cmdline))
self.ui.debug(cmdline, '\n')
return os.system(cmdline)
def _update(self, rev):
self.ui.debug('applying revision %s...\n' % rev)
changeset, status = self.runlines('replay', '-d', self.tmppath,
rev)
if status:
# Something went wrong while merging (baz or tla
# issue?), get latest revision and try from there
shutil.rmtree(self.tmppath, ignore_errors=True)
self._obtainrevision(rev)
else:
old_rev = self.parents[rev][0]
self.ui.debug('computing changeset between %s and %s...\n'
% (old_rev, rev))
self._parsechangeset(changeset, rev)
def _getfile(self, name, rev):
mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
if stat.S_ISLNK(mode):
data = os.readlink(os.path.join(self.tmppath, name))
mode = mode and 'l' or ''
else:
data = open(os.path.join(self.tmppath, name), 'rb').read()
mode = (mode & 0111) and 'x' or ''
return data, mode
def _exclude(self, name):
exclude = ['{arch}', '.arch-ids', '.arch-inventory']
for exc in exclude:
if name.find(exc) != -1:
return True
return False
def _readcontents(self, path):
files = []
contents = os.listdir(path)
while len(contents) > 0:
c = contents.pop()
p = os.path.join(path, c)
# os.walk could be used, but here we avoid internal GNU
# Arch files and directories, thus saving a lot time.
if not self._exclude(p):
if os.path.isdir(p):
contents += [os.path.join(c, f) for f in os.listdir(p)]
else:
files.append(c)
return files
def _rendirchanges(self, src, dest):
changes = []
copies = {}
files = self._readcontents(os.path.join(self.tmppath, dest))
for f in files:
s = os.path.join(src, f)
d = os.path.join(dest, f)
changes.append(s)
changes.append(d)
copies[d] = s
return changes, copies
def _obtainrevision(self, rev):
self.ui.debug('obtaining revision %s...\n' % rev)
output = self._execute('get', rev, self.tmppath)
self.checkexit(output)
self.ui.debug('analyzing revision %s...\n' % rev)
files = self._readcontents(self.tmppath)
self.changes[rev].add_files += files
def _stripbasepath(self, path):
if path.startswith('./'):
return path[2:]
return path
def _parsecatlog(self, data, rev):
try:
catlog = self.catlogparser.parsestr(data)
# Commit date
self.changes[rev].date = util.datestr(
util.strdate(catlog['Standard-date'],
'%Y-%m-%d %H:%M:%S'))
# Commit author
self.changes[rev].author = self.recode(catlog['Creator'])
# Commit description
self.changes[rev].summary = '\n\n'.join((catlog['Summary'],
catlog.get_payload()))
self.changes[rev].summary = self.recode(self.changes[rev].summary)
# Commit revision origin when dealing with a branch or tag
if 'Continuation-of' in catlog:
self.changes[rev].continuationof = self.recode(
catlog['Continuation-of'])
except Exception:
raise util.Abort(_('could not parse cat-log of %s') % rev)
def _parsechangeset(self, data, rev):
for l in data:
l = l.strip()
# Added file (ignore added directory)
if l.startswith('A') and not l.startswith('A/'):
file = self._stripbasepath(l[1:].strip())
if not self._exclude(file):
self.changes[rev].add_files.append(file)
# Deleted file (ignore deleted directory)
elif l.startswith('D') and not l.startswith('D/'):
file = self._stripbasepath(l[1:].strip())
if not self._exclude(file):
self.changes[rev].del_files.append(file)
# Modified binary file
elif l.startswith('Mb'):
file = self._stripbasepath(l[2:].strip())
if not self._exclude(file):
self.changes[rev].mod_files.append(file)
# Modified link
elif l.startswith('M->'):
file = self._stripbasepath(l[3:].strip())
if not self._exclude(file):
self.changes[rev].mod_files.append(file)
# Modified file
elif l.startswith('M'):
file = self._stripbasepath(l[1:].strip())
if not self._exclude(file):
self.changes[rev].mod_files.append(file)
# Renamed file (or link)
elif l.startswith('=>'):
files = l[2:].strip().split(' ')
if len(files) == 1:
files = l[2:].strip().split('\t')
src = self._stripbasepath(files[0])
dst = self._stripbasepath(files[1])
if not self._exclude(src) and not self._exclude(dst):
self.changes[rev].ren_files[src] = dst
# Conversion from file to link or from link to file (modified)
elif l.startswith('ch'):
file = self._stripbasepath(l[2:].strip())
if not self._exclude(file):
self.changes[rev].mod_files.append(file)
# Renamed directory
elif l.startswith('/>'):
dirs = l[2:].strip().split(' ')
if len(dirs) == 1:
dirs = l[2:].strip().split('\t')
src = self._stripbasepath(dirs[0])
dst = self._stripbasepath(dirs[1])
if not self._exclude(src) and not self._exclude(dst):
self.changes[rev].ren_dirs[src] = dst
| gpl-2.0 |
harisibrahimkv/django | tests/inline_formsets/tests.py | 131 | 8414 | from django.forms.models import ModelForm, inlineformset_factory
from django.test import TestCase, skipUnlessDBFeature
from .models import Child, Parent, Poem, Poet, School
class DeletionTests(TestCase):
def test_deletion(self):
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True, fields="__all__")
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': str(poem.pk),
'poem_set-0-poet': str(poet.pk),
'poem_set-0-name': 'test',
'poem_set-0-DELETE': 'on',
}
formset = PoemFormSet(data, instance=poet)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poem.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True, fields="__all__")
poet = Poet.objects.create(name='test')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '0',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': '',
'poem_set-0-poem': '1',
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertIs(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 0)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertIs(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoemFormSet = inlineformset_factory(Poet, Poem, can_delete=True, fields="__all__")
poet = Poet.objects.create(name='test')
poem = poet.poem_set.create(name='test poem')
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '1',
'poem_set-MAX_NUM_FORMS': '0',
'poem_set-0-id': str(poem.id),
'poem_set-0-poem': str(poem.id),
'poem_set-0-name': 'x' * 1000,
}
formset = PoemFormSet(data, instance=poet)
# Make sure this form doesn't pass validation.
self.assertIs(formset.is_valid(), False)
self.assertEqual(Poem.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['poem_set-0-DELETE'] = 'on'
formset = PoemFormSet(data, instance=poet)
self.assertIs(formset.is_valid(), True)
formset.save()
self.assertEqual(Poem.objects.count(), 0)
def test_save_new(self):
"""
Make sure inlineformsets respect commit=False
regression for #10750
"""
# exclude some required field from the forms
ChildFormSet = inlineformset_factory(School, Child, exclude=['father', 'mother'])
school = School.objects.create(name='test')
mother = Parent.objects.create(name='mother')
father = Parent.objects.create(name='father')
data = {
'child_set-TOTAL_FORMS': '1',
'child_set-INITIAL_FORMS': '0',
'child_set-MAX_NUM_FORMS': '0',
'child_set-0-name': 'child',
}
formset = ChildFormSet(data, instance=school)
self.assertIs(formset.is_valid(), True)
objects = formset.save(commit=False)
for obj in objects:
obj.mother = mother
obj.father = father
obj.save()
self.assertEqual(school.child_set.count(), 1)
class InlineFormsetFactoryTest(TestCase):
def test_inline_formset_factory(self):
"""
These should both work without a problem.
"""
inlineformset_factory(Parent, Child, fk_name='mother', fields="__all__")
inlineformset_factory(Parent, Child, fk_name='father', fields="__all__")
def test_exception_on_unspecified_foreign_key(self):
"""
Child has two ForeignKeys to Parent, so if we don't specify which one
to use for the inline formset, we should get an exception.
"""
msg = "'inline_formsets.Child' has more than one ForeignKey to 'inline_formsets.Parent'."
with self.assertRaisesMessage(ValueError, msg):
inlineformset_factory(Parent, Child)
def test_fk_name_not_foreign_key_field_from_child(self):
"""
If we specify fk_name, but it isn't a ForeignKey from the child model
to the parent model, we should get an exception.
"""
msg = "fk_name 'school' is not a ForeignKey to 'inline_formsets.Parent'."
with self.assertRaisesMessage(ValueError, msg):
inlineformset_factory(Parent, Child, fk_name='school')
def test_non_foreign_key_field(self):
"""
If the field specified in fk_name is not a ForeignKey, we should get an
exception.
"""
with self.assertRaisesMessage(ValueError, "'inline_formsets.Child' has no field named 'test'."):
inlineformset_factory(Parent, Child, fk_name='test')
def test_any_iterable_allowed_as_argument_to_exclude(self):
# Regression test for #9171.
inlineformset_factory(
Parent, Child, exclude=['school'], fk_name='mother'
)
inlineformset_factory(
Parent, Child, exclude=('school',), fk_name='mother'
)
@skipUnlessDBFeature('allows_auto_pk_0')
def test_zero_primary_key(self):
# Regression test for #21472
poet = Poet.objects.create(id=0, name='test')
poet.poem_set.create(name='test poem')
PoemFormSet = inlineformset_factory(Poet, Poem, fields="__all__", extra=0)
formset = PoemFormSet(None, instance=poet)
self.assertEqual(len(formset.forms), 1)
def test_unsaved_fk_validate_unique(self):
poet = Poet(name='unsaved')
PoemFormSet = inlineformset_factory(Poet, Poem, fields=['name'])
data = {
'poem_set-TOTAL_FORMS': '2',
'poem_set-INITIAL_FORMS': '0',
'poem_set-MAX_NUM_FORMS': '2',
'poem_set-0-name': 'Poem',
'poem_set-1-name': 'Poem',
}
formset = PoemFormSet(data, instance=poet)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please correct the duplicate data for name.'])
def test_fk_not_duplicated_in_form_fields(self):
"""
A foreign key name isn't duplicated in form._meta fields (#21332).
"""
poet = Poet.objects.create(name='test')
poet.poem_set.create(name='first test poem')
poet.poem_set.create(name='second test poem')
poet.poem_set.create(name='third test poem')
PoemFormSet = inlineformset_factory(Poet, Poem, fields=('name',), extra=0)
formset = PoemFormSet(None, instance=poet)
self.assertEqual(len(formset.forms), 3)
self.assertEqual(['name', 'poet'], PoemFormSet.form._meta.fields)
def test_fk_in_all_formset_forms(self):
"""
A foreign key field is in Meta for all forms in the formset (#26538).
"""
class PoemModelForm(ModelForm):
def __init__(self, *args, **kwargs):
assert 'poet' in self._meta.fields
super().__init__(*args, **kwargs)
poet = Poet.objects.create(name='test')
poet.poem_set.create(name='first test poem')
poet.poem_set.create(name='second test poem')
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemModelForm, fields=('name',), extra=0)
formset = PoemFormSet(None, instance=poet)
formset.forms # Trigger form instantiation to run the assert above.
| bsd-3-clause |
gordon-elliott/glod | src/glod/unittests/model/test_statement_item_collection.py | 1 | 5394 | __copyright__ = 'Copyright(c) Gordon Elliott 2017'
"""
"""
from datetime import date, timedelta
from decimal import Decimal
from unittest import TestCase
from glod.model.statement_item import StatementItem
from glod.model.statement_item_collection import StatementItemCollection
from glod.model.account import Account
class TestStatementItemCollection(TestCase):
def setUp(self):
super().setUp()
self.account = Account(4003, name='current', account_no='3983789')
self.one_day = timedelta(1)
self.today = date.today()
def test_remove_net_zero_items(self):
opening_balance = Decimal('1000.00')
final_credit = Decimal('1.01')
final_balance = Decimal('1002.02')
items = (
# earliest
StatementItem(self.account, self.today, 'details', 'EUR', None, Decimal('1.01'), opening_balance),
StatementItem(self.account, self.today + self.one_day * 1, 'details', 'EUR', Decimal('1.01'), None, Decimal('1001.01')),
StatementItem(self.account, self.today + self.one_day * 2, 'details', 'EUR', None, None, Decimal('1000.00')),
StatementItem(self.account, self.today + self.one_day * 3, 'details', 'EUR', None, None, Decimal('1000.00')),
StatementItem(self.account, self.today + self.one_day * 4, 'details', 'EUR', None, Decimal('1.01'), Decimal('1000.00')),
StatementItem(self.account, self.today + self.one_day * 5, 'details', 'EUR', None, None, Decimal('1001.01')),
StatementItem(self.account, self.today + self.one_day * 6, 'details', 'EUR', None, Decimal('1.01'), Decimal('1001.01')),
StatementItem(self.account, self.today + self.one_day * 7, 'details', 'EUR', None, final_credit, final_balance),
# latest
)
collection = StatementItemCollection(items)
deduped = list(collection.remove_net_zero_items())
self.assertEqual(5, len(deduped))
self.assertEqual(
final_balance + final_credit,
opening_balance + sum((item.net for item in deduped))
)
def test_remove_net_zero_items_two_accounts(self):
other_account = Account(4004, name='savings', account_no='9388729')
opening_balance = Decimal('1000.00')
items = (
# earliest
StatementItem(self.account, self.today, 'details', 'EUR', None, Decimal('1.01'), opening_balance),
StatementItem(self.account, self.today + self.one_day * 1, 'details', 'EUR', Decimal('1.01'), None, Decimal('1001.01')),
StatementItem(self.account, self.today + self.one_day * 2, 'details', 'EUR', None, None, Decimal('1000.00')),
StatementItem(other_account, self.today + self.one_day * 3, 'details', 'EUR', None, None, Decimal('1000.00')),
StatementItem(other_account, self.today + self.one_day * 4, 'details', 'EUR', None, Decimal('1.01'), Decimal('1000.00')),
# latest
)
collection = StatementItemCollection(items)
deduped = list(collection.remove_net_zero_items())
self.assertEqual(3, len(deduped))
def test_remove_net_zero_items_where_no_transactions_leave_balance_update(self):
other_account = Account(4004, name='savings', account_no='9388729')
opening_balance = Decimal('1000.00')
items = (
# earliest
StatementItem(self.account, self.today, 'details', 'EUR', None, Decimal('1.01'), opening_balance),
StatementItem(self.account, self.today + self.one_day * 1, 'details', 'EUR', Decimal('1.01'), None, Decimal('1001.01')),
StatementItem(self.account, self.today + self.one_day * 2, 'details', 'EUR', None, None, Decimal('1000.00')),
StatementItem(other_account, self.today, 'details', 'EUR', None, None, Decimal('1000.00')),
# latest
)
collection = StatementItemCollection(items)
deduped = list(collection.remove_net_zero_items())
self.assertEqual(3, len(deduped))
def test_most_common_month(self):
one_month = timedelta(days=31)
last_month = self.today - one_month
next_month = self.today + one_month
items = (
# earliest
StatementItem(self.account, last_month, 'details', 'EUR', None, Decimal('1.01'), Decimal('1000.00')),
StatementItem(self.account, last_month, 'details', 'EUR', Decimal('1.01'), None, Decimal('1001.01')),
StatementItem(self.account, self.today, 'details', 'EUR', None, None, Decimal('1000.00')),
StatementItem(self.account, self.today, 'details', 'EUR', None, None, Decimal('1000.00')),
StatementItem(self.account, self.today, 'details', 'EUR', None, Decimal('1.01'), Decimal('1000.00')),
StatementItem(self.account, self.today, 'details', 'EUR', None, None, Decimal('1001.01')),
StatementItem(self.account, self.today, 'details', 'EUR', None, Decimal('1.01'), Decimal('1001.01')),
StatementItem(self.account, next_month, 'details', 'EUR', None, Decimal('1.01'), Decimal('1002.02')),
# latest
)
collection = StatementItemCollection(items)
single_month = list(collection.only_most_common_months(1))
self.assertEqual(5, len(single_month))
| mit |
homeworkprod/dbb-ranking-parser | dbbrankingparser/document.py | 1 | 1573 | # -*- coding: utf-8 -*-
"""
dbbrankingparser.document
~~~~~~~~~~~~~~~~~~~~~~~~~
HTML document utilities
:Copyright: 2006-2016 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
from lxml.html import document_fromstring
from .conversion import convert_attributes
def parse(html):
"""Yield ranks extracted from HTML document."""
trs = select_rank_rows(html)
return parse_rank_rows(trs)
def select_rank_rows(html):
"""Return the table rows that are expected to contain rank data."""
root = document_fromstring(html)
return root.xpath(
'body/form/table[@class="sportView"][2]/tr[position() > 1]')
def parse_rank_rows(trs):
"""Yield ranks extracted from table rows."""
for tr in trs:
rank = parse_rank_row(tr)
if rank:
yield rank
def parse_rank_row(tr):
"""Attempt to extract a single rank's properties from a table row."""
team_has_withdrawn = has_team_withdrawn(tr)
values = get_rank_values(tr, team_has_withdrawn)
if not values:
return None
attributes = convert_attributes(values)
attributes['withdrawn'] = team_has_withdrawn
return attributes
def has_team_withdrawn(tr):
"""Return `True` if the markup indicates that the team has withdrawn."""
return bool(tr.xpath('td[2]/nobr/strike'))
def get_rank_values(tr, team_has_withdrawn):
"""Return that row's cell values."""
xpath_expression = 'td/nobr/strike/text()' if team_has_withdrawn \
else 'td/nobr/text()'
return tr.xpath(xpath_expression)
| mit |
keith923/dotfiles | jupyter/jupyter_notebook_config.py | 1 | 21738 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.filemanager.FileContentsManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Use minified JS file or not, mainly use during dev to avoid JS recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
#c.NotebookApp.iopub_data_rate_limit = 0
## (msg/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 0
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 1.0
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'd3x923'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
#c.NotebookNotary.cache_size = 65535
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| mit |
nvoron23/paragraph2vec | setup.py | 11 | 4911 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Run with:
sudo python ./setup.py install
"""
import os
import sys
if sys.version_info[:2] < (2, 5):
raise Exception('This version of gensim needs Python 2.5 or later. ')
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages, Extension
# Commonly used information
pkg_name = 'gensim'
pkg_ver = '0.10.1'
pkg_desc = 'Python framework for fast Vector Space Modelling'
# there is a bug in python2.5, preventing distutils from using any non-ascii characters :( http://bugs.python.org/issue2562
pkg_author = 'Radim Rehurek' # u'Radim Řehůřek', # <- should really be this...
pkg_author_email = '[email protected]'
pkg_url = 'http://radimrehurek.com/gensim'
pkg_download_url = 'http://pypi.python.org/pypi/gensim'
pkg_keywords = 'Singular Value Decomposition, SVD, Latent Semantic Indexing, '
'LSA, LSI, Latent Dirichlet Allocation, LDA, '
'Hierarchical Dirichlet Process, HDP, Random Projections, '
'TFIDF, word2vec'
pkg_classifiers = [ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 3.3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing :: Linguistic',
]
pkg_license = 'LGPL'
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
native_ext = False
setup(
name = pkg_name,
version = pkg_ver,
description = pkg_desc,
long_description = read('README.rst'),
packages = find_packages(exclude=[ pkg_name + '_addons', pkg_name + '_addons.*']),
author = pkg_author,
author_email = pkg_author_email,
url = pkg_url,
download_url = pkg_download_url,
keywords = pkg_keywords,
license = pkg_license,
platforms = 'any',
zip_safe = False,
classifiers = pkg_classifiers,
test_suite = "gensim.test",
install_requires = [
'scipy >= 0.7.0',
'six >= 1.2.0',
],
extras_require = {
'distributed': ['Pyro4 >= 4.8'],
},
include_package_data = True,
# lines below are commented out to avoid installing system-wide scripts
# scripts can be run by running `python -m module_name` instead: less
# flexible but more explicit and imo cleaner.
# entry_points = {
# 'console_scripts': [
# 'lsi_worker = gensim.models.lsi_worker:main',
# 'lsi_dispatcher = gensim.models.lsi_dispatcher:main',
# ],
# },
)
# Here comes the setup for cythonized native addon-extension.
# try:
# from Cython.Distutils import build_ext
# import numpy
# models_dir = os.path.join(os.path.dirname(__file__), 'gensim', 'models')
# ext_modules = [
# Extension('gensim_addons.models.word2vec_inner',
# ['gensim_addons/models/word2vec_inner.pyx'],
# include_dirs = [models_dir, numpy.get_include()])
# ]
# native_ext = True
# except ImportError:
# sys.stderr.write('''
# =========================================================
# Please install Cython (http://cython.org/), if you
# want to use the highly optimized version of word2vec.
# Usually you can install it (optional) using:
# pip install -U cython
# or
# easy_install -U cython
# or
# the package-management of your distribution.
# If you install Cython *after* installing gensim, the
# optimized version of word2vec will still be automatically
# generated, on the first use of word2vec.
# =========================================================
# ''')
# if native_ext:
# setup(
# name = pkg_name + '_addons',
# version = pkg_ver,
# description = pkg_desc,
# long_description = read('README.rst'),
# packages = find_packages(exclude=[ pkg_name, pkg_name + '.*']),
# author = pkg_author,
# author_email = pkg_author_email,
# url = pkg_url,
# download_url = pkg_download_url,
# keywords = pkg_keywords,
# license = pkg_license,
# platforms = 'any',
# zip_safe = False,
# classifiers = pkg_classifiers,
# install_requires = [
# 'gensim == ' + pkg_ver,
# ],
# include_package_data = True,
# cmdclass = {
# 'build_ext': build_ext
# },
# ext_modules = ext_modules,
# )
| lgpl-3.0 |
NeurodataWithoutBorders/api-python | nwb/combine_messages.py | 1 | 9956 | import re
import pprint
pp = pprint.PrettyPrinter(indent=4)
from sys import version_info # py3, for checking type of input
def combine_messages(messages):
""" Combines messages that have one or more integers in them, such as
"trial001" "trial002", into a single message like "trial# (#=1-2)".
This is to reduce the number of messages required to be displayed.
Operates by creating the following structure, named "ti" for "template info":
{
't2tn': {} - maps each template (containing "#") to a template number (tn)
'tn2t': [] - list of templates, indexed by the template number
'm2tns': {} - maps each message number (index in messages) to
array of template numbers (tns)
'tn2dm': {} - maps each template number to a dictionary that has as keys the digits
used to make the template, and with value the message number used to make the template
with those digits. i.e.:
{ tn1: {d1: m1, d2: m2}, tn2: {d3: m3, d4: m4}, tn2: { ...}}
where:
tn - template number
d: m - digits used to make template from message number m
'tn2md': {} - maps each template number of a dictionary that has keys the message number
and value the digits used to make the message. These reverse the key-values in 'tn2dm', e.g.:
{ tn1: {m1: d1, m2: d2}, tn2: {m3: d3, m4: d4}, tn2: { ...}}
where:
tn - template number
d: m - digits used to make template from message number m
This array is used to dynamically remove entries in 'tn2dm' as each message in a
template is displayed so that structure always has an accurate list of remaining messages.
'mout': [] - messages to display (output), formed by combining messages
'mfin': [] - set of message numbers "finished" (already included in mout).
}
This function works by first creating everything except mout and mfin, then
going through each message, finding the template numbers that have the most
digits, and using those to make the combined message.
"""
ti = {}
ti['t2tn'] = {}
ti['tn2t'] = []
ti['m2tns'] = {}
ti['tn2dm'] = {}
ti['tn2md'] = {}
# debug_msg = "/acquisition/timeseries/fov_15002_17/data"
# debug_mn = -1
for mn in range(len(messages)):
msg = messages[mn]
if version_info[0] > 2:
assert isinstance(msg, str), "in Python 3, messages must be str (unicode) type"
# if msg.startswith(debug_msg):
# debug_mn = mn
found_nums = re.findall("\d+", msg)
if not found_nums:
# no numbers found, don't process
continue
# remove any duplicates
found_nums = list(set(found_nums))
for digits in found_nums:
pattern = "(?<!\d)%s(?!\d)" % digits # substitute only if digits not surrounded by other digits
template = re.sub(pattern, "#", msg) # make template for this message and digits
if template not in ti['t2tn']:
tn = len(ti['tn2t']) # template number
ti['tn2t'].append(template) # add template to list of templates
ti['t2tn'][template] = tn # add entry to map of template to template number
else:
tn = ti['t2tn'][template]
# save template number (tn) in 'm2tns'
if mn not in ti['m2tns']:
ti['m2tns'][mn] = [tn,]
else:
ti['m2tns'][mn].append(tn)
# save template number, digits and message number in 'tn2dm'
idigits = int(digits)
if tn not in ti['tn2dm']:
ti['tn2dm'][tn] = {idigits: mn}
ti['tn2md'][tn] = {mn: idigits}
else:
if digits in ti['tn2dm'][tn]:
print ("duplicate message found: %s" % msg)
break
ti['tn2dm'][tn][idigits] = mn
ti['tn2md'][tn][mn] = idigits
# done building needed structures. Now generate 'output' (i.e. ti['mfin'] and ti['mout']
ti['mout'] = []
ti['mfin'] = set([])
for mn in range(len(messages)):
# if mn == debug_mn:
# print ("found mn %i '%s'" % (debug_mn, debug_msg))
# import pdb; pdb.set_trace()
if mn in ti['mfin']:
# message has already been displayed (using a template)
continue
if mn not in ti['m2tns']:
# no digits found in this message, just display as is
ti['mout'].append(messages[mn])
ti['mfin'].add(mn)
continue
# this message has at least one pattern. Find template with largest number of other messages
# that have not been displayed yet
# build list of pairs, (a, b); a - template number, b - number of messages in template
tn_nm_pairs = [ (tn, len(ti['tn2dm'][tn])) for tn in ti['m2tns'][mn] ]
# get those pairs that have the largest number of messages
ltn_nm_pairs = largest_pairs(tn_nm_pairs)
# nmax = 0
# for tn in ti['m2tns'][mn]:
# dm = ti['tn2dm'][tn]
# num_messages = len(ti['tn2dm'][tn]) # num messages associated with this template
# if num_messages > nmax:
# max_tn = [tn]
# nmax = num_messages
# elif num_messages == nmax:
# # multiple templates have the same number of messages, will need to select
# # one in a deterministic way
# max_tn.append(tn)
# # if no other messages use pattern, just display as is
# if nmax == 1:
if ltn_nm_pairs[0][1] == 1:
# only one messages uses pattern, just display as is
ti['mout'].append(messages[mn])
ti['mfin'].add(mn)
continue
# if len(max_tn) > 1:
if len(ltn_nm_pairs) == 1:
# only one template found that has maximal number of messages. use it.
max_tn = ltn_nm_pairs[0][0]
else:
# multiple templates have the same maximal number of messages. Select the one
# with the rightmost position of '#' in the template
# build list of pairs, (a,b): a - template number, b - index of '#' in template
tn_ix_pairs = [ (ltn_nm_pairs[i][0], ti['tn2t'][ltn_nm_pairs[i][0]].index('#'))
for i in range(len(ltn_nm_pairs))]
tn_ix_pairs = largest_pairs(tn_ix_pairs)
if len(tn_ix_pairs) > 1:
# should never happen since templates made for the same message cannot have
# the same position for the '#'
sys.exit("found multiple templates with same maximal number of messages and same template")
# use the template found
max_tn = tn_ix_pairs[0][0]
# other messages use this template. Get list message numbers and digits that share this template
s_digits = list(ti['tn2dm'][max_tn].keys()) # shared digits
s_mns = list(ti['tn2dm'][max_tn].values()) # shared message numbers
# update tn2dm to remove messages that will be displayed shortly (in this template)
for mn in s_mns:
for tn in ti['m2tns'][mn]:
idigit = ti['tn2md'][tn][mn]
del ti['tn2dm'][tn][idigit]
# make new message by combining shared digits with template
template = ti['tn2t'][max_tn]
# convert digits from string to int
# i_digits = sorted([int(i) for i in s_digits])
i_digits = sorted(s_digits)
# make string representing ranges of digits
prevn = i_digits[0] # initialize previous number to first
sr = str(prevn) # string of ranges being generated
in_range = False
for i in range(1, len(i_digits)):
newn = i_digits[i]
if newn == prevn + 1:
# in a range
in_range = True
else:
# not in a range. But if was previously save end of previous range
if in_range:
sr = "%s-%i" % (sr, prevn)
in_range = False
# save new number
sr = "%s,%i" % (sr, newn)
prevn = newn
# append final number if in range
if in_range:
sr = "%s-%i" % (sr, newn)
new_message = template + " (#=%s)" % sr
ti['mout'].append(new_message)
# add all messages that share this template to ti['mfin'] so they are not displayed again
ti['mfin'].update(s_mns)
# return list of combined messages
return ti['mout']
def largest_pairs(pairs):
""""Input is a list of two-element tuples, e.g. [(5, 4), (2, 7), ...]
Output is list of those, which have the largest 2nd element, e.g. [(2,7)]"""
largest = -1
for pair in pairs:
a, b = pair
if b > largest:
largest = b
lpairs = [pair]
elif b == largest:
lpairs.append(pair)
return lpairs
def test_combine_messages():
""" tests combine_messages function"""
messages = [
"some prefix trial-none",
"some prefix trial23",
"some prefix trial23/timestamps",
"some prefix trial23 timestamps",
"some prefix trial23\ntimestamps",
"some prefix 32-bits, trial32",
"some prefix 32-bits, trial33",
"some prefix 32-bits, trial34",
"some prefix 32-bits, trial35",
"some prefix trial-11",
"some prefix trial23 and trial23 again",
"some prefix trial27",
"some prefix trial27/timestamps",
"some prefix trial27 timestamps",
"some prefix trial27\ntimestamps",
"some prefix 32-bits, trial27",
"some prefix trial27 and trial27 again"]
cm = combine_messages(messages)
pp.pprint(cm)
if __name__ == '__main__':
test_combine_messages()
| bsd-3-clause |
wevote/WeVoteServer | apis_v1/tests/test_views_voter_location.py | 1 | 3410 | # -*- coding: UTF-8 -*-
from django.contrib.gis import geoip2
from django.urls import reverse
from django.test import Client, TestCase
from functools import wraps
import json
# def print_geoip_instructions_on_exc(unittest):
# @wraps(unittest)
# def wrapper(*args, **kwargs):
# try:
# unittest(*args, **kwargs)
# except geoip2.base.GeoIPException:
# print('\nDid you setup GeoIP on your local machine? See '
# 'https://github.com/wevote/WeVoteServer/blob/master/README_API_INSTALL.md#set-up-geoip\n')
# raise
# return wrapper
class WeVoteAPIsV1TestsVoterVoterLocation(TestCase):
databases = ["default", "readonly"]
@classmethod
def setUpTestData(cls):
cls.voter_location_url = reverse('apis_v1:voterLocationRetrieveFromIPView')
cls.client = Client()
# NOTE: Different location found between free and paid version of GeoIP
# @print_geoip_instructions_on_exc
def test_location_from_ip_success(self):
response = self.client.get(self.voter_location_url, {'ip_address': '69.181.21.132'})
json_data = json.loads(response.content.decode())
self.assertEqual(json_data['success'], True)
self.assertEqual(json_data['voter_location_found'], True)
def test_failure_no_ip_supplied(self):
response = self.client.get(self.voter_location_url, REMOTE_ADDR=None)
# self.assertEqual(response.status_code, 400)
# self.assertEqual(response.content.decode(), 'missing ip_address request parameter')
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content.decode())
self.assertEqual(json_data['success'], False)
self.assertEqual(json_data['status'], 'LOCATION_RETRIEVE_IP_ADDRESS_REQUEST_PARAMETER_MISSING')
self.assertEqual(json_data['voter_location_found'], False)
# @print_geoip_instructions_on_exc
def test_failure_invalid_ip(self):
response = self.client.get(self.voter_location_url, {'ip_address': '0.2.1.1'})
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content.decode())
self.assertEqual(json_data['success'], True)
self.assertEqual(json_data['voter_location_found'], False)
# NOTE: Different location found between free and paid version of GeoIP
# @print_geoip_instructions_on_exc
def test_get_ip_from_headers(self):
""" If an IP address is not in the request parameters, it will be parsed from the headers. """
response = self.client.get(self.voter_location_url, REMOTE_ADDR='69.181.21.132')
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content.decode())
self.assertEqual(json_data['success'], True)
self.assertEqual(json_data['voter_location_found'], True)
# NOTE: Different location found between free and paid version of GeoIP
# @print_geoip_instructions_on_exc
def test_x_forwarded_for_header_priority_over_remote_addr(self):
response = self.client.get(self.voter_location_url, HTTP_X_FORWARDED_FOR='69.181.21.132', REMOTE_ADDR='0.1.1.1')
self.assertEqual(response.status_code, 200)
json_data = json.loads(response.content.decode())
self.assertEqual(json_data['success'], True)
self.assertEqual(json_data['voter_location_found'], True)
| mit |
Linkid/numpy | numpy/core/code_generators/generate_numpy_api.py | 113 | 7629 | from __future__ import division, print_function
import os
import genapi
from genapi import \
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
import numpy_api
# use annotated api when running under cpychecker
h_template = r"""
#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
typedef struct {
PyObject_HEAD
npy_bool obval;
} PyBoolScalarObject;
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
%s
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
extern void **PyArray_API;
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
void **PyArray_API;
#else
static void **PyArray_API=NULL;
#endif
#endif
%s
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
static int
_import_array(void)
{
int st;
PyObject *numpy = PyImport_ImportModule("numpy.core.multiarray");
PyObject *c_api = NULL;
if (numpy == NULL) {
PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
return -1;
}
#if PY_VERSION_HEX >= 0x03000000
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
#else
if (!PyCObject_Check(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);
#endif
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
return -1;
}
/* Perform runtime check of C API version */
if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"ABI version %%x but this version of numpy is %%x", \
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
return -1;
}
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"API version %%x but this version of numpy is %%x", \
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
return -1;
}
/*
* Perform runtime check of endianness and check it matches the one set by
* the headers (npy_endian.h) as a safeguard
*/
st = PyArray_GetEndianness();
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
return -1;
}
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"big endian, but detected different endianness at runtime");
return -1;
}
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"little endian, but detected different endianness at runtime");
return -1;
}
#endif
return 0;
}
#if PY_VERSION_HEX >= 0x03000000
#define NUMPY_IMPORT_ARRAY_RETVAL NULL
#else
#define NUMPY_IMPORT_ARRAY_RETVAL
#endif
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
#endif
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyArray_API[] = {
%s
};
"""
c_api_header = """
===========
Numpy C-API
===========
"""
def generate_api(output_dir, force=False):
basename = 'multiarray_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = numpy_api.multiarray_api
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
global_vars = sources[0]
scalar_bool_values = sources[1]
types_api = sources[2]
multiarray_funcs = sources[3]
multiarray_api = sources[:]
module_list = []
extension_list = []
init_list = []
# Check multiarray api indexes
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
multiarray_funcs)
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
# Create dict name -> *Api instance
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name][0]
annotations = multiarray_funcs[name][1:]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,
f.return_type,
f.args, api_name)
for name, val in global_vars.items():
index, type = val
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for name, val in scalar_bool_values.items():
index = val[0]
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for name, val in types_api.items():
index = val[0]
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
if len(multiarray_api_dict) != len(multiarray_api_index):
raise AssertionError("Multiarray API size mismatch %d %d" %
(len(multiarray_api_dict), len(multiarray_api_index)))
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
fid = open(header_file, 'w')
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
fid.write(s)
fid.close()
# Write to c-code
fid = open(c_file, 'w')
s = c_template % ',\n'.join(init_list)
fid.write(s)
fid.close()
# write to documentation
fid = open(doc_file, 'w')
fid.write(c_api_header)
for func in numpyapi_list:
fid.write(func.to_ReST())
fid.write('\n\n')
fid.close()
return targets
| bsd-3-clause |
tersmitten/ansible | lib/ansible/modules/clustering/consul_kv.py | 10 | 11558 | #!/usr/bin/python
#
# (c) 2015, Steve Gargan <[email protected]>
# (c) 2018 Genome Research Ltd.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: consul_kv
short_description: Manipulate entries in the key/value store of a consul cluster
description:
- Allows the retrieval, addition, modification and deletion of key/value entries in a
consul cluster via the agent. The entire contents of the record, including
the indices, flags and session are returned as C(value).
- If the C(key) represents a prefix then note that when a value is removed, the existing
value if any is returned as part of the results.
- See http://www.consul.io/docs/agent/http.html#kv for more details.
requirements:
- python-consul
- requests
version_added: "2.0"
author:
- Steve Gargan (@sgargan)
- Colin Nolan (@colin-nolan)
options:
state:
description:
- The action to take with the supplied key and value. If the state is 'present' and `value` is set, the key
contents will be set to the value supplied and `changed` will be set to `true` only if the value was
different to the current contents. If the state is 'present' and `value` is not set, the existing value
associated to the key will be returned. The state 'absent' will remove the key/value pair,
again 'changed' will be set to true only if the key actually existed
prior to the removal. An attempt can be made to obtain or free the
lock associated with a key/value pair with the states 'acquire' or
'release' respectively. a valid session must be supplied to make the
attempt changed will be true if the attempt is successful, false
otherwise.
choices: [ absent, acquire, present, release ]
default: present
key:
description:
- The key at which the value should be stored.
type: str
required: yes
value:
description:
- The value should be associated with the given key, required if C(state)
is C(present).
type: str
required: yes
recurse:
description:
- If the key represents a prefix, each entry with the prefix can be
retrieved by setting this to C(yes).
type: bool
default: 'no'
retrieve:
description:
- If the I(state) is C(present) and I(value) is set, perform a
read after setting the value and return this value.
default: True
type: bool
session:
description:
- The session that should be used to acquire or release a lock
associated with a key/value pair.
type: str
token:
description:
- The token key indentifying an ACL rule set that controls access to
the key value pair
type: str
cas:
description:
- Used when acquiring a lock with a session. If the C(cas) is C(0), then
Consul will only put the key if it does not already exist. If the
C(cas) value is non-zero, then the key is only set if the index matches
the ModifyIndex of that key.
type: str
flags:
description:
- Opaque positive integer value that can be passed when setting a value.
type: str
host:
description:
- Host of the consul agent.
type: str
default: localhost
port:
description:
- The port on which the consul agent is running.
type: int
default: 8500
scheme:
description:
- The protocol scheme on which the consul agent is running.
type: str
default: http
version_added: "2.1"
validate_certs:
description:
- Whether to verify the tls certificate of the consul agent.
type: bool
default: 'yes'
version_added: "2.1"
"""
EXAMPLES = '''
# If the key does not exist, the value associated to the "data" property in `retrieved_key` will be `None`
# If the key value is empty string, `retrieved_key["data"]["Value"]` will be `None`
- name: retrieve a value from the key/value store
consul_kv:
key: somekey
register: retrieved_key
- name: Add or update the value associated with a key in the key/value store
consul_kv:
key: somekey
value: somevalue
- name: Remove a key from the store
consul_kv:
key: somekey
state: absent
- name: Add a node to an arbitrary group via consul inventory (see consul.ini)
consul_kv:
key: ansible/groups/dc1/somenode
value: top_secret
- name: Register a key/value pair with an associated session
consul_kv:
key: stg/node/server_birthday
value: 20160509
session: "{{ sessionid }}"
state: acquire
'''
from ansible.module_utils._text import to_text
try:
import consul
from requests.exceptions import ConnectionError
python_consul_installed = True
except ImportError:
python_consul_installed = False
from ansible.module_utils.basic import AnsibleModule
# Note: although the python-consul documentation implies that using a key with a value of `None` with `put` has a
# special meaning (https://python-consul.readthedocs.io/en/latest/#consul-kv), if not set in the subsequently API call,
# the value just defaults to an empty string (https://www.consul.io/api/kv.html#create-update-key)
NOT_SET = None
def _has_value_changed(consul_client, key, target_value):
"""
Uses the given Consul client to determine if the value associated to the given key is different to the given target
value.
:param consul_client: Consul connected client
:param key: key in Consul
:param target_value: value to be associated to the key
:return: tuple where the first element is the value of the "X-Consul-Index" header and the second is `True` if the
value has changed (i.e. the stored value is not the target value)
"""
index, existing = consul_client.kv.get(key)
if not existing:
return index, True
try:
changed = to_text(existing['Value'], errors='surrogate_or_strict') != target_value
return index, changed
except UnicodeError:
# Existing value was not decodable but all values we set are valid utf-8
return index, True
def execute(module):
state = module.params.get('state')
if state == 'acquire' or state == 'release':
lock(module, state)
elif state == 'present':
if module.params.get('value') is NOT_SET:
get_value(module)
else:
set_value(module)
elif state == 'absent':
remove_value(module)
else:
module.exit_json(msg="Unsupported state: %s" % (state, ))
def lock(module, state):
consul_api = get_consul_api(module)
session = module.params.get('session')
key = module.params.get('key')
value = module.params.get('value')
if not session:
module.fail(
msg='%s of lock for %s requested but no session supplied' %
(state, key))
index, changed = _has_value_changed(consul_api, key, value)
if changed and not module.check_mode:
if state == 'acquire':
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
acquire=session,
flags=module.params.get('flags'))
else:
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
release=session,
flags=module.params.get('flags'))
module.exit_json(changed=changed,
index=index,
key=key)
def get_value(module):
consul_api = get_consul_api(module)
key = module.params.get('key')
index, existing_value = consul_api.kv.get(key, recurse=module.params.get('recurse'))
module.exit_json(changed=False, index=index, data=existing_value)
def set_value(module):
consul_api = get_consul_api(module)
key = module.params.get('key')
value = module.params.get('value')
if value is NOT_SET:
raise AssertionError('Cannot set value of "%s" to `NOT_SET`' % key)
index, changed = _has_value_changed(consul_api, key, value)
if changed and not module.check_mode:
changed = consul_api.kv.put(key, value,
cas=module.params.get('cas'),
flags=module.params.get('flags'))
stored = None
if module.params.get('retrieve'):
index, stored = consul_api.kv.get(key)
module.exit_json(changed=changed,
index=index,
key=key,
data=stored)
def remove_value(module):
''' remove the value associated with the given key. if the recurse parameter
is set then any key prefixed with the given key will be removed. '''
consul_api = get_consul_api(module)
key = module.params.get('key')
index, existing = consul_api.kv.get(
key, recurse=module.params.get('recurse'))
changed = existing is not None
if changed and not module.check_mode:
consul_api.kv.delete(key, module.params.get('recurse'))
module.exit_json(changed=changed,
index=index,
key=key,
data=existing)
def get_consul_api(module, token=None):
return consul.Consul(host=module.params.get('host'),
port=module.params.get('port'),
scheme=module.params.get('scheme'),
verify=module.params.get('validate_certs'),
token=module.params.get('token'))
def test_dependencies(module):
if not python_consul_installed:
module.fail_json(msg="python-consul required for this module. "
"see https://python-consul.readthedocs.io/en/latest/#installation")
def main():
module = AnsibleModule(
argument_spec=dict(
cas=dict(type='str'),
flags=dict(type='str'),
key=dict(type='str', required=True),
host=dict(type='str', default='localhost'),
scheme=dict(type='str', default='http'),
validate_certs=dict(type='bool', default=True),
port=dict(type='int', default=8500),
recurse=dict(type='bool'),
retrieve=dict(type='bool', default=True),
state=dict(type='str', default='present', choices=['absent', 'acquire', 'present', 'release']),
token=dict(type='str', no_log=True),
value=dict(type='str', default=NOT_SET),
session=dict(type='str'),
),
supports_check_mode=True
)
test_dependencies(module)
try:
execute(module)
except ConnectionError as e:
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
module.params.get('host'), module.params.get('port'), e))
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
bowlofstew/scylla | configure.py | 12 | 31100 | #!/usr/bin/python3
#
# Copyright 2015 Cloudius Systems
#
#
# This file is part of Scylla.
#
# Scylla is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Scylla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
#
import os, os.path, textwrap, argparse, sys, shlex, subprocess, tempfile, re
from distutils.spawn import find_executable
configure_args = str.join(' ', [shlex.quote(x) for x in sys.argv[1:]])
def get_flags():
with open('/proc/cpuinfo') as f:
for line in f:
if line.strip():
if line.rstrip('\n').startswith('flags'):
return re.sub(r'^flags\s+: ', '', line).split()
def add_tristate(arg_parser, name, dest, help):
arg_parser.add_argument('--enable-' + name, dest = dest, action = 'store_true', default = None,
help = 'Enable ' + help)
arg_parser.add_argument('--disable-' + name, dest = dest, action = 'store_false', default = None,
help = 'Disable ' + help)
def apply_tristate(var, test, note, missing):
if (var is None) or var:
if test():
return True
elif var == True:
print(missing)
sys.exit(1)
else:
print(note)
return False
return False
def pkg_config(option, package):
output = subprocess.check_output(['pkg-config', option, package])
return output.decode('utf-8').strip()
def try_compile(compiler, source = '', flags = []):
with tempfile.NamedTemporaryFile() as sfile:
sfile.file.write(bytes(source, 'utf-8'))
sfile.file.flush()
return subprocess.call([compiler, '-x', 'c++', '-o', '/dev/null', '-c', sfile.name] + flags,
stdout = subprocess.DEVNULL,
stderr = subprocess.DEVNULL) == 0
def warning_supported(warning, compiler):
# gcc ignores -Wno-x even if it is not supported
adjusted = re.sub('^-Wno-', '-W', warning)
return try_compile(flags = [adjusted], compiler = compiler)
def debug_flag(compiler):
src_with_auto = textwrap.dedent('''\
template <typename T>
struct x { auto f() {} };
x<int> a;
''')
if try_compile(source = src_with_auto, flags = ['-g', '-std=gnu++1y'], compiler = compiler):
return '-g'
else:
print('Note: debug information disabled; upgrade your compiler')
return ''
class Thrift(object):
def __init__(self, source, service):
self.source = source
self.service = service
def generated(self, gen_dir):
basename = os.path.splitext(os.path.basename(self.source))[0]
files = [basename + '_' + ext
for ext in ['types.cpp', 'types.h', 'constants.cpp', 'constants.h']]
files += [self.service + ext
for ext in ['.cpp', '.h']]
return [os.path.join(gen_dir, file) for file in files]
def headers(self, gen_dir):
return [x for x in self.generated(gen_dir) if x.endswith('.h')]
def sources(self, gen_dir):
return [x for x in self.generated(gen_dir) if x.endswith('.cpp')]
def objects(self, gen_dir):
return [x.replace('.cpp', '.o') for x in self.sources(gen_dir)]
def endswith(self, end):
return self.source.endswith(end)
class Antlr3Grammar(object):
def __init__(self, source):
self.source = source
def generated(self, gen_dir):
basename = os.path.splitext(self.source)[0]
files = [basename + ext
for ext in ['Lexer.cpp', 'Lexer.hpp', 'Parser.cpp', 'Parser.hpp']]
return [os.path.join(gen_dir, file) for file in files]
def headers(self, gen_dir):
return [x for x in self.generated(gen_dir) if x.endswith('.hpp')]
def sources(self, gen_dir):
return [x for x in self.generated(gen_dir) if x.endswith('.cpp')]
def objects(self, gen_dir):
return [x.replace('.cpp', '.o') for x in self.sources(gen_dir)]
def endswith(self, end):
return self.source.endswith(end)
modes = {
'debug': {
'sanitize': '-fsanitize=address -fsanitize=leak -fsanitize=undefined',
'sanitize_libs': '-lubsan -lasan',
'opt': '-O0 -DDEBUG -DDEBUG_SHARED_PTR -DDEFAULT_ALLOCATOR',
'libs': '',
},
'release': {
'sanitize': '',
'sanitize_libs': '',
'opt': '-O2',
'libs': '',
},
}
urchin_tests = [
'tests/mutation_test',
'tests/range_test',
'tests/types_test',
'tests/keys_test',
'tests/partitioner_test',
'tests/frozen_mutation_test',
'tests/perf/perf_mutation',
'tests/lsa_async_eviction_test',
'tests/lsa_sync_eviction_test',
'tests/row_cache_alloc_stress',
'tests/perf_row_cache_update',
'tests/perf/perf_hash',
'tests/perf/perf_cql_parser',
'tests/perf/perf_simple_query',
'tests/perf/perf_sstable',
'tests/cql_query_test',
'tests/storage_proxy_test',
'tests/mutation_reader_test',
'tests/mutation_query_test',
'tests/row_cache_test',
'tests/test-serialization',
'tests/sstable_test',
'tests/sstable_mutation_test',
'tests/memtable_test',
'tests/commitlog_test',
'tests/cartesian_product_test',
'tests/hash_test',
'tests/serializer_test',
'tests/map_difference_test',
'tests/message',
'tests/gossip',
'tests/gossip_test',
'tests/compound_test',
'tests/config_test',
'tests/gossiping_property_file_snitch_test',
'tests/snitch_reset_test',
'tests/network_topology_strategy_test',
'tests/query_processor_test',
'tests/batchlog_manager_test',
'tests/bytes_ostream_test',
'tests/UUID_test',
'tests/murmur_hash_test',
'tests/allocation_strategy_test',
'tests/logalloc_test',
'tests/managed_vector_test',
'tests/crc_test',
]
apps = [
'scylla',
]
tests = urchin_tests
all_artifacts = apps + tests
arg_parser = argparse.ArgumentParser('Configure scylla')
arg_parser.add_argument('--static', dest = 'static', action = 'store_const', default = '',
const = '-static',
help = 'Static link (useful for running on hosts outside the build environment')
arg_parser.add_argument('--pie', dest = 'pie', action = 'store_true',
help = 'Build position-independent executable (PIE)')
arg_parser.add_argument('--so', dest = 'so', action = 'store_true',
help = 'Build shared object (SO) instead of executable')
arg_parser.add_argument('--mode', action='store', choices=list(modes.keys()) + ['all'], default='all')
arg_parser.add_argument('--with', dest='artifacts', action='append', choices=all_artifacts, default=[])
arg_parser.add_argument('--cflags', action = 'store', dest = 'user_cflags', default = '',
help = 'Extra flags for the C++ compiler')
arg_parser.add_argument('--ldflags', action = 'store', dest = 'user_ldflags', default = '',
help = 'Extra flags for the linker')
arg_parser.add_argument('--compiler', action = 'store', dest = 'cxx', default = 'g++',
help = 'C++ compiler path')
arg_parser.add_argument('--with-osv', action = 'store', dest = 'with_osv', default = '',
help = 'Shortcut for compile for OSv')
arg_parser.add_argument('--enable-dpdk', action = 'store_true', dest = 'dpdk', default = False,
help = 'Enable dpdk (from seastar dpdk sources)')
arg_parser.add_argument('--dpdk-target', action = 'store', dest = 'dpdk_target', default = '',
help = 'Path to DPDK SDK target location (e.g. <DPDK SDK dir>/x86_64-native-linuxapp-gcc)')
arg_parser.add_argument('--debuginfo', action = 'store', dest = 'debuginfo', type = int, default = 1,
help = 'Enable(1)/disable(0)compiler debug information generation')
add_tristate(arg_parser, name = 'hwloc', dest = 'hwloc', help = 'hwloc support')
add_tristate(arg_parser, name = 'xen', dest = 'xen', help = 'Xen support')
args = arg_parser.parse_args()
defines = []
urchin_libs = '-llz4 -lsnappy -lz -lboost_thread -lcryptopp -lrt -lyaml-cpp -lboost_date_time'
extra_cxxflags = {}
cassandra_interface = Thrift(source = 'interface/cassandra.thrift', service = 'Cassandra')
urchin_core = (['database.cc',
'schema.cc',
'bytes.cc',
'mutation.cc',
'row_cache.cc',
'frozen_mutation.cc',
'memtable.cc',
'release.cc',
'utils/logalloc.cc',
'utils/large_bitset.cc',
'mutation_partition.cc',
'mutation_partition_view.cc',
'mutation_partition_serializer.cc',
'mutation_reader.cc',
'mutation_query.cc',
'keys.cc',
'sstables/sstables.cc',
'sstables/compress.cc',
'sstables/row.cc',
'sstables/key.cc',
'sstables/partition.cc',
'sstables/filter.cc',
'sstables/compaction.cc',
'log.cc',
'transport/event.cc',
'transport/event_notifier.cc',
'transport/server.cc',
'cql3/abstract_marker.cc',
'cql3/attributes.cc',
'cql3/cf_name.cc',
'cql3/cql3_type.cc',
'cql3/operation.cc',
'cql3/index_name.cc',
'cql3/keyspace_element_name.cc',
'cql3/lists.cc',
'cql3/sets.cc',
'cql3/maps.cc',
'cql3/functions/functions.cc',
'cql3/statements/cf_prop_defs.cc',
'cql3/statements/create_table_statement.cc',
'cql3/statements/drop_keyspace_statement.cc',
'cql3/statements/drop_table_statement.cc',
'cql3/statements/schema_altering_statement.cc',
'cql3/statements/ks_prop_defs.cc',
'cql3/statements/modification_statement.cc',
'cql3/statements/update_statement.cc',
'cql3/statements/delete_statement.cc',
'cql3/statements/batch_statement.cc',
'cql3/statements/select_statement.cc',
'cql3/statements/use_statement.cc',
'cql3/statements/index_prop_defs.cc',
'cql3/statements/index_target.cc',
'cql3/statements/create_index_statement.cc',
'cql3/update_parameters.cc',
'cql3/ut_name.cc',
'thrift/handler.cc',
'thrift/server.cc',
'thrift/thrift_validation.cc',
'utils/runtime.cc',
'utils/murmur_hash.cc',
'utils/uuid.cc',
'utils/big_decimal.cc',
'types.cc',
'validation.cc',
'service/migration_manager.cc',
'service/storage_proxy.cc',
'cql3/operator.cc',
'cql3/relation.cc',
'cql3/column_identifier.cc',
'cql3/constants.cc',
'cql3/query_processor.cc',
'cql3/query_options.cc',
'cql3/single_column_relation.cc',
'cql3/token_relation.cc',
'cql3/column_condition.cc',
'cql3/user_types.cc',
'cql3/untyped_result_set.cc',
'cql3/selection/abstract_function_selector.cc',
'cql3/selection/simple_selector.cc',
'cql3/selection/selectable.cc',
'cql3/selection/selector_factories.cc',
'cql3/selection/selection.cc',
'cql3/selection/selector.cc',
'cql3/restrictions/statement_restrictions.cc',
'db/consistency_level.cc',
'db/system_keyspace.cc',
'db/schema_tables.cc',
'db/commitlog/commitlog.cc',
'db/commitlog/commitlog_replayer.cc',
'db/serializer.cc',
'db/config.cc',
'db/index/secondary_index.cc',
'db/marshal/type_parser.cc',
'db/batchlog_manager.cc',
'io/io.cc',
'utils/utils.cc',
'utils/UUID_gen.cc',
'utils/i_filter.cc',
'utils/bloom_filter.cc',
'utils/bloom_calculations.cc',
'utils/rate_limiter.cc',
'utils/compaction_manager.cc',
'utils/file_lock.cc',
'gms/version_generator.cc',
'gms/versioned_value.cc',
'gms/gossiper.cc',
'gms/failure_detector.cc',
'gms/gossip_digest_syn.cc',
'gms/gossip_digest_ack.cc',
'gms/gossip_digest_ack2.cc',
'gms/endpoint_state.cc',
'dht/i_partitioner.cc',
'dht/murmur3_partitioner.cc',
'dht/byte_ordered_partitioner.cc',
'dht/boot_strapper.cc',
'unimplemented.cc',
'query.cc',
'query-result-set.cc',
'locator/abstract_replication_strategy.cc',
'locator/simple_strategy.cc',
'locator/local_strategy.cc',
'locator/network_topology_strategy.cc',
'locator/token_metadata.cc',
'locator/locator.cc',
'locator/snitch_base.cc',
'locator/simple_snitch.cc',
'locator/rack_inferring_snitch.cc',
'locator/gossiping_property_file_snitch.cc',
'message/messaging_service.cc',
'service/migration_task.cc',
'service/storage_service.cc',
'streaming/streaming.cc',
'streaming/stream_task.cc',
'streaming/stream_session.cc',
'streaming/stream_request.cc',
'streaming/stream_summary.cc',
'streaming/stream_transfer_task.cc',
'streaming/stream_receive_task.cc',
'streaming/stream_plan.cc',
'streaming/progress_info.cc',
'streaming/session_info.cc',
'streaming/stream_coordinator.cc',
'streaming/stream_manager.cc',
'streaming/stream_result_future.cc',
'streaming/messages/stream_init_message.cc',
'streaming/messages/retry_message.cc',
'streaming/messages/received_message.cc',
'streaming/messages/prepare_message.cc',
'streaming/messages/file_message_header.cc',
'streaming/messages/outgoing_file_message.cc',
'streaming/messages/incoming_file_message.cc',
'gc_clock.cc',
'partition_slice_builder.cc',
'init.cc',
'repair/repair.cc',
]
+ [Antlr3Grammar('cql3/Cql.g')]
+ [Thrift('interface/cassandra.thrift', 'Cassandra')]
)
api = ['api/api.cc',
'api/api-doc/storage_service.json',
'api/api-doc/lsa.json',
'api/storage_service.cc',
'api/api-doc/commitlog.json',
'api/commitlog.cc',
'api/api-doc/gossiper.json',
'api/gossiper.cc',
'api/api-doc/failure_detector.json',
'api/failure_detector.cc',
'api/api-doc/column_family.json',
'api/column_family.cc',
'api/messaging_service.cc',
'api/api-doc/messaging_service.json',
'api/api-doc/storage_proxy.json',
'api/storage_proxy.cc',
'api/api-doc/cache_service.json',
'api/cache_service.cc',
'api/api-doc/collectd.json',
'api/collectd.cc',
'api/api-doc/endpoint_snitch_info.json',
'api/endpoint_snitch.cc',
'api/api-doc/compaction_manager.json',
'api/compaction_manager.cc',
'api/api-doc/hinted_handoff.json',
'api/hinted_handoff.cc',
'api/api-doc/utils.json',
'api/lsa.cc',
'api/api-doc/stream_manager.json',
'api/stream_manager.cc',
]
urchin_tests_dependencies = urchin_core + [
'tests/cql_test_env.cc',
'tests/cql_assertions.cc',
'tests/result_set_assertions.cc',
'tests/mutation_source_test.cc',
]
urchin_tests_seastar_deps = [
'seastar/tests/test-utils.cc',
'seastar/tests/test_runner.cc',
]
deps = {
'scylla': ['main.cc'] + urchin_core + api,
}
tests_not_using_seastar_test_framework = set([
'tests/types_test',
'tests/keys_test',
'tests/partitioner_test',
'tests/map_difference_test',
'tests/frozen_mutation_test',
'tests/perf/perf_mutation',
'tests/lsa_async_eviction_test',
'tests/lsa_sync_eviction_test',
'tests/row_cache_alloc_stress',
'tests/perf_row_cache_update',
'tests/cartesian_product_test',
'tests/perf/perf_hash',
'tests/perf/perf_cql_parser',
'tests/message',
'tests/perf/perf_simple_query',
'tests/test-serialization',
'tests/gossip',
'tests/compound_test',
'tests/range_test',
'tests/crc_test',
'tests/perf/perf_sstable',
'tests/managed_vector_test',
])
for t in tests_not_using_seastar_test_framework:
if not t in urchin_tests:
raise Exception("Test %s not found in urchin_tests" % (t))
for t in urchin_tests:
deps[t] = urchin_tests_dependencies + [t + '.cc']
if t not in tests_not_using_seastar_test_framework:
deps[t] += urchin_tests_seastar_deps
deps['tests/sstable_test'] += ['tests/sstable_datafile_test.cc']
deps['tests/bytes_ostream_test'] = ['tests/bytes_ostream_test.cc']
deps['tests/UUID_test'] = ['utils/UUID_gen.cc', 'tests/UUID_test.cc']
deps['tests/murmur_hash_test'] = ['bytes.cc', 'utils/murmur_hash.cc', 'tests/murmur_hash_test.cc']
deps['tests/allocation_strategy_test'] = ['tests/allocation_strategy_test.cc', 'utils/logalloc.cc', 'log.cc']
warnings = [
'-Wno-mismatched-tags', # clang-only
'-Wno-maybe-uninitialized', # false positives on gcc 5
]
warnings = [w
for w in warnings
if warning_supported(warning = w, compiler = args.cxx)]
warnings = ' '.join(warnings)
dbgflag = debug_flag(args.cxx) if args.debuginfo else ''
if args.so:
args.pie = '-shared'
args.fpie = '-fpic'
elif args.pie:
args.pie = '-pie'
args.fpie = '-fpie'
else:
args.pie = ''
args.fpie = ''
defines = ' '.join(['-D' + d for d in defines])
globals().update(vars(args))
total_memory = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
link_pool_depth = max(int(total_memory / 7e9), 1)
build_modes = modes if args.mode == 'all' else [args.mode]
build_artifacts = all_artifacts if not args.artifacts else args.artifacts
status = subprocess.call("./SCYLLA-VERSION-GEN")
if status != 0:
print('Version file generation failed')
sys.exit(1)
file = open('build/SCYLLA-VERSION-FILE', 'r')
scylla_version = file.read().strip()
file = open('build/SCYLLA-RELEASE-FILE', 'r')
scylla_release = file.read().strip()
extra_cxxflags["release.cc"] = "-DSCYLLA_VERSION=\"\\\"" + scylla_version + "\\\"\" -DSCYLLA_RELEASE=\"\\\"" + scylla_release + "\\\"\""
seastar_flags = ['--disable-xen']
if args.dpdk:
# fake dependencies on dpdk, so that it is built before anything else
seastar_flags += ['--enable-dpdk']
elif args.dpdk_target:
seastar_flags += ['--dpdk-target', args.dpdk_target]
seastar_flags += ['--compiler', args.cxx, '--cflags=-march=nehalem']
status = subprocess.call(['./configure.py'] + seastar_flags, cwd = 'seastar')
if status != 0:
print('Seastar configuration failed')
sys.exit(1)
pc = { mode : 'build/{}/seastar.pc'.format(mode) for mode in build_modes }
ninja = find_executable('ninja') or find_executable('ninja-build')
if not ninja:
print('Ninja executable (ninja or ninja-build) not found on PATH\n')
sys.exit(1)
status = subprocess.call([ninja] + list(pc.values()), cwd = 'seastar')
if status:
print('Failed to generate {}\n'.format(pc))
sys.exit(1)
for mode in build_modes:
cfg = dict([line.strip().split(': ', 1)
for line in open('seastar/' + pc[mode])
if ': ' in line])
modes[mode]['seastar_cflags'] = cfg['Cflags']
modes[mode]['seastar_libs'] = cfg['Libs']
seastar_deps = 'practically_anything_can_change_so_lets_run_it_every_time_and_restat.'
args.user_cflags += " " + pkg_config("--cflags", "jsoncpp")
libs = "-lyaml-cpp -llz4 -lz -lsnappy " + pkg_config("--libs", "jsoncpp") + ' -lboost_filesystem'
user_cflags = args.user_cflags
outdir = 'build'
buildfile = 'build.ninja'
os.makedirs(outdir, exist_ok = True)
do_sanitize = True
if args.static:
do_sanitize = False
with open(buildfile, 'w') as f:
f.write(textwrap.dedent('''\
configure_args = {configure_args}
builddir = {outdir}
cxx = {cxx}
cxxflags = {user_cflags} {warnings} {defines}
ldflags = {user_ldflags}
libs = {libs}
pool link_pool
depth = {link_pool_depth}
rule ragel
command = ragel -G2 -o $out $in
description = RAGEL $out
rule gen
command = echo -e $text > $out
description = GEN $out
rule swagger
command = seastar/json/json2code.py -f $in -o $out
description = SWAGGER $out
rule ninja
command = {ninja} -C $subdir $target
restat = 1
description = NINJA $out
''').format(**globals()))
for mode in build_modes:
modeval = modes[mode]
f.write(textwrap.dedent('''\
cxxflags_{mode} = -I. -I $builddir/{mode}/gen -I seastar -I seastar/build/{mode}/gen
rule cxx.{mode}
command = $cxx -MMD -MT $out -MF $out.d {seastar_cflags} $cxxflags $cxxflags_{mode} -c -o $out $in
description = CXX $out
depfile = $out.d
rule link.{mode}
command = $cxx $cxxflags_{mode} $ldflags {seastar_libs} -o $out $in $libs $libs_{mode}
description = LINK $out
pool = link_pool
rule link_stripped.{mode}
command = $cxx $cxxflags_{mode} -s $ldflags {seastar_libs} -o $out $in $libs $libs_{mode}
description = LINK (stripped) $out
pool = link_pool
rule ar.{mode}
command = rm -f $out; ar cr $out $in; ranlib $out
description = AR $out
rule thrift.{mode}
command = thrift -gen cpp:cob_style -out $builddir/{mode}/gen $in
description = THRIFT $in
rule antlr3.{mode}
command = sed -e '/^#if 0/,/^#endif/d' $in > $builddir/{mode}/gen/$in && antlr3 $builddir/{mode}/gen/$in && sed -i 's/^\\( *\)\\(ImplTraits::CommonTokenType\\* [a-zA-Z0-9_]* = NULL;\\)$$/\\1const \\2/' build/{mode}/gen/${{stem}}Parser.cpp
description = ANTLR3 $in
''').format(mode = mode, **modeval))
f.write('build {mode}: phony {artifacts}\n'.format(mode = mode,
artifacts = str.join(' ', ('$builddir/' + mode + '/' + x for x in build_artifacts))))
compiles = {}
ragels = {}
swaggers = {}
thrifts = set()
antlr3_grammars = set()
for binary in build_artifacts:
srcs = deps[binary]
objs = ['$builddir/' + mode + '/' + src.replace('.cc', '.o')
for src in srcs
if src.endswith('.cc')]
has_thrift = False
for dep in deps[binary]:
if isinstance(dep, Thrift):
has_thrift = True
objs += dep.objects('$builddir/' + mode + '/gen')
if isinstance(dep, Antlr3Grammar):
objs += dep.objects('$builddir/' + mode + '/gen')
if binary.endswith('.pc'):
vars = modeval.copy()
vars.update(globals())
pc = textwrap.dedent('''\
Name: Seastar
URL: http://seastar-project.org/
Description: Advanced C++ framework for high-performance server applications on modern hardware.
Version: 1.0
Libs: -L{srcdir}/{builddir} -Wl,--whole-archive -lseastar -Wl,--no-whole-archive {dbgflag} -Wl,--no-as-needed {static} {pie} -fvisibility=hidden -pthread {user_ldflags} {libs} {sanitize_libs}
Cflags: -std=gnu++1y {dbgflag} {fpie} -Wall -Werror -fvisibility=hidden -pthread -I{srcdir} -I{srcdir}/{builddir}/gen {user_cflags} {warnings} {defines} {sanitize} {opt}
''').format(builddir = 'build/' + mode, srcdir = os.getcwd(), **vars)
f.write('build $builddir/{}/{}: gen\n text = {}\n'.format(mode, binary, repr(pc)))
elif binary.endswith('.a'):
f.write('build $builddir/{}/{}: ar.{} {}\n'.format(mode, binary, mode, str.join(' ', objs)))
else:
if binary.startswith('tests/'):
# Our code's debugging information is huge, and multiplied
# by many tests yields ridiculous amounts of disk space.
# So we strip the tests by default; The user can very
# quickly re-link the test unstripped by adding a "_g"
# to the test name, e.g., "ninja build/release/testname_g"
f.write('build $builddir/{}/{}: link_stripped.{} {} {}\n'.format(mode, binary, mode, str.join(' ', objs),
'seastar/build/{}/libseastar.a'.format(mode)))
if has_thrift:
f.write(' libs = -lthrift -lboost_system $libs\n')
f.write('build $builddir/{}/{}_g: link.{} {} {}\n'.format(mode, binary, mode, str.join(' ', objs),
'seastar/build/{}/libseastar.a'.format(mode)))
else:
f.write('build $builddir/{}/{}: link.{} {} {}\n'.format(mode, binary, mode, str.join(' ', objs),
'seastar/build/{}/libseastar.a'.format(mode)))
if has_thrift:
f.write(' libs = -lthrift -lboost_system $libs\n')
for src in srcs:
if src.endswith('.cc'):
obj = '$builddir/' + mode + '/' + src.replace('.cc', '.o')
compiles[obj] = src
elif src.endswith('.rl'):
hh = '$builddir/' + mode + '/gen/' + src.replace('.rl', '.hh')
ragels[hh] = src
elif src.endswith('.json'):
hh = '$builddir/' + mode + '/gen/' + src + '.hh'
swaggers[hh] = src
elif src.endswith('.thrift'):
thrifts.add(src)
elif src.endswith('.g'):
antlr3_grammars.add(src)
else:
raise Exception('No rule for ' + src)
for obj in compiles:
src = compiles[obj]
gen_headers = list(ragels.keys())
gen_headers += ['seastar/build/{}/http/request_parser.hh'.format(mode)]
for th in thrifts:
gen_headers += th.headers('$builddir/{}/gen'.format(mode))
for g in antlr3_grammars:
gen_headers += g.headers('$builddir/{}/gen'.format(mode))
gen_headers += list(swaggers.keys())
f.write('build {}: cxx.{} {} || {} \n'.format(obj, mode, src, ' '.join(gen_headers)))
if src in extra_cxxflags:
f.write(' cxxflags = {seastar_cflags} $cxxflags $cxxflags_{mode} {extra_cxxflags}\n'.format(mode = mode, extra_cxxflags = extra_cxxflags[src], **modeval))
for hh in ragels:
src = ragels[hh]
f.write('build {}: ragel {}\n'.format(hh, src))
for hh in swaggers:
src = swaggers[hh]
f.write('build {}: swagger {}\n'.format(hh,src))
for thrift in thrifts:
outs = ' '.join(thrift.generated('$builddir/{}/gen'.format(mode)))
f.write('build {}: thrift.{} {}\n'.format(outs, mode, thrift.source))
for cc in thrift.sources('$builddir/{}/gen'.format(mode)):
obj = cc.replace('.cpp', '.o')
f.write('build {}: cxx.{} {}\n'.format(obj, mode, cc))
for grammar in antlr3_grammars:
outs = ' '.join(grammar.generated('$builddir/{}/gen'.format(mode)))
f.write('build {}: antlr3.{} {}\n stem = {}\n'.format(outs, mode, grammar.source,
grammar.source.rsplit('.', 1)[0]))
for cc in grammar.sources('$builddir/{}/gen'.format(mode)):
obj = cc.replace('.cpp', '.o')
f.write('build {}: cxx.{} {}\n'.format(obj, mode, cc))
f.write('build seastar/build/{}/libseastar.a: ninja {}\n'.format(mode, seastar_deps))
f.write(' subdir = seastar\n')
f.write(' target = build/{}/libseastar.a\n'.format(mode))
f.write('build {}: phony\n'.format(seastar_deps))
f.write(textwrap.dedent('''\
rule configure
command = python3 configure.py $configure_args
generator = 1
build build.ninja: configure | configure.py
rule cscope
command = find -name '*.[chS]' -o -name "*.cc" -o -name "*.hh" | cscope -bq -i-
description = CSCOPE
build cscope: cscope
rule request_parser_hh
command = {ninja} -C seastar build/release/gen/http/request_parser.hh build/debug/gen/http/request_parser.hh
description = GEN seastar/http/request_parser.hh
build seastar/build/release/http/request_parser.hh seastar/build/debug/http/request_parser.hh: request_parser_hh
rule clean
command = rm -rf build
description = CLEAN
build clean: clean
default {modes_list}
''').format(modes_list = ' '.join(build_modes), **globals()))
| agpl-3.0 |
timberline-secondary/hackerspace | src/profile_manager/forms.py | 1 | 1126 | from django import forms
from .models import Profile
class ProfileForm(forms.ModelForm):
# this will be saved in User.email
email = forms.EmailField(required=False)
class Meta:
model = Profile
fields = ['preferred_name', 'preferred_internal_only',
'alias', 'avatar', 'grad_year', 'email',
'get_announcements_by_email', # 'get_notifications_by_email'
'visible_to_other_students', 'dark_theme', 'silent_mode', 'custom_stylesheet']
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
self.fields['grad_year'] = forms.ChoiceField(
choices=Profile.get_grad_year_choices()
)
self.fields['email'].initial = self.instance.user.email
# UNIQUE if NOT NULL
def clean_alias(self):
return self.cleaned_data['alias'] or None
def save(self, *args, **kwargs):
super(ProfileForm, self).save(*args, **kwargs)
user = self.instance.user
user.email = self.cleaned_data['email']
user.save()
return self.instance
| gpl-3.0 |
dcsg/dropman | dropman/commands/reboot.py | 1 | 2458 | # The MIT License (MIT)
# Copyright (c) 2016 Daniel Gomes <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software
# , and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions
# of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
"""The reboot command."""
import time
import digitalocean
from .base import Base
class Reboot(Base):
@staticmethod
def reboot(droplet_id, droplet, manager):
if droplet.status == 'active' or droplet.status == 'off':
droplet.reboot()
print("Droplet id/name: " + str(droplet.id) + "/" + droplet.name + " is being reboot...")
is_active = False
retries = 10
while is_active is False and retries > 0:
time.sleep(60)
droplet = manager.get_droplet(droplet_id)
if droplet.status == 'active':
is_active = True
print("Droplet id/name: " + str(droplet.id) + "/" + droplet.name + " is now power up.")
return
retries -= 1
return
print("Droplet id/name: " + str(droplet.id) + "/" + droplet.name + " is not running.")
def run(self, api_token):
droplet_id = self.options['<id>']
manager = digitalocean.Manager(token=api_token)
droplet = None
try:
droplet = manager.get_droplet(droplet_id)
except digitalocean.baseapi.DataReadError as err:
print(err)
if droplet is not None:
Reboot.reboot(droplet_id, droplet, manager)
| mit |
akshatharaj/django | tests/delete/tests.py | 222 | 18346 | from __future__ import unicode_literals
from math import ceil
from django.db import IntegrityError, connection, models
from django.db.models.sql.constants import GET_ITERATOR_CHUNK_SIZE
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils.six.moves import range
from .models import (
A, M, MR, R, S, T, Avatar, Base, Child, HiddenUser, HiddenUserProfile,
M2MFrom, M2MTo, MRNull, Parent, RChild, User, create_a, get_default_r,
)
class OnDeleteTests(TestCase):
def setUp(self):
self.DEFAULT = get_default_r()
def test_auto(self):
a = create_a('auto')
a.auto.delete()
self.assertFalse(A.objects.filter(name='auto').exists())
def test_auto_nullable(self):
a = create_a('auto_nullable')
a.auto_nullable.delete()
self.assertFalse(A.objects.filter(name='auto_nullable').exists())
def test_setvalue(self):
a = create_a('setvalue')
a.setvalue.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setvalue.pk)
def test_setnull(self):
a = create_a('setnull')
a.setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setnull)
def test_setdefault(self):
a = create_a('setdefault')
a.setdefault.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(self.DEFAULT, a.setdefault.pk)
def test_setdefault_none(self):
a = create_a('setdefault_none')
a.setdefault_none.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.setdefault_none)
def test_cascade(self):
a = create_a('cascade')
a.cascade.delete()
self.assertFalse(A.objects.filter(name='cascade').exists())
def test_cascade_nullable(self):
a = create_a('cascade_nullable')
a.cascade_nullable.delete()
self.assertFalse(A.objects.filter(name='cascade_nullable').exists())
def test_protect(self):
a = create_a('protect')
self.assertRaises(IntegrityError, a.protect.delete)
def test_do_nothing(self):
# Testing DO_NOTHING is a bit harder: It would raise IntegrityError for a normal model,
# so we connect to pre_delete and set the fk to a known value.
replacement_r = R.objects.create()
def check_do_nothing(sender, **kwargs):
obj = kwargs['instance']
obj.donothing_set.update(donothing=replacement_r)
models.signals.pre_delete.connect(check_do_nothing)
a = create_a('do_nothing')
a.donothing.delete()
a = A.objects.get(pk=a.pk)
self.assertEqual(replacement_r, a.donothing)
models.signals.pre_delete.disconnect(check_do_nothing)
def test_do_nothing_qscount(self):
"""
Test that a models.DO_NOTHING relation doesn't trigger a query.
"""
b = Base.objects.create()
with self.assertNumQueries(1):
# RelToBase should not be queried.
b.delete()
self.assertEqual(Base.objects.count(), 0)
def test_inheritance_cascade_up(self):
child = RChild.objects.create()
child.delete()
self.assertFalse(R.objects.filter(pk=child.pk).exists())
def test_inheritance_cascade_down(self):
child = RChild.objects.create()
parent = child.r_ptr
parent.delete()
self.assertFalse(RChild.objects.filter(pk=child.pk).exists())
def test_cascade_from_child(self):
a = create_a('child')
a.child.delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(R.objects.filter(pk=a.child_id).exists())
def test_cascade_from_parent(self):
a = create_a('child')
R.objects.get(pk=a.child_id).delete()
self.assertFalse(A.objects.filter(name='child').exists())
self.assertFalse(RChild.objects.filter(pk=a.child_id).exists())
def test_setnull_from_child(self):
a = create_a('child_setnull')
a.child_setnull.delete()
self.assertFalse(R.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_setnull_from_parent(self):
a = create_a('child_setnull')
R.objects.get(pk=a.child_setnull_id).delete()
self.assertFalse(RChild.objects.filter(pk=a.child_setnull_id).exists())
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.child_setnull)
def test_o2o_setnull(self):
a = create_a('o2o_setnull')
a.o2o_setnull.delete()
a = A.objects.get(pk=a.pk)
self.assertIsNone(a.o2o_setnull)
class DeletionTests(TestCase):
def test_m2m(self):
m = M.objects.create()
r = R.objects.create()
MR.objects.create(m=m, r=r)
r.delete()
self.assertFalse(MR.objects.exists())
r = R.objects.create()
MR.objects.create(m=m, r=r)
m.delete()
self.assertFalse(MR.objects.exists())
m = M.objects.create()
r = R.objects.create()
m.m2m.add(r)
r.delete()
through = M._meta.get_field('m2m').remote_field.through
self.assertFalse(through.objects.exists())
r = R.objects.create()
m.m2m.add(r)
m.delete()
self.assertFalse(through.objects.exists())
m = M.objects.create()
r = R.objects.create()
MRNull.objects.create(m=m, r=r)
r.delete()
self.assertFalse(not MRNull.objects.exists())
self.assertFalse(m.m2m_through_null.exists())
def test_bulk(self):
s = S.objects.create(r=R.objects.create())
for i in range(2 * GET_ITERATOR_CHUNK_SIZE):
T.objects.create(s=s)
# 1 (select related `T` instances)
# + 1 (select related `U` instances)
# + 2 (delete `T` instances in batches)
# + 1 (delete `s`)
self.assertNumQueries(5, s.delete)
self.assertFalse(S.objects.exists())
def test_instance_update(self):
deleted = []
related_setnull_sets = []
def pre_delete(sender, **kwargs):
obj = kwargs['instance']
deleted.append(obj)
if isinstance(obj, R):
related_setnull_sets.append(list(a.pk for a in obj.setnull_set.all()))
models.signals.pre_delete.connect(pre_delete)
a = create_a('update_setnull')
a.setnull.delete()
a = create_a('update_cascade')
a.cascade.delete()
for obj in deleted:
self.assertIsNone(obj.pk)
for pk_list in related_setnull_sets:
for a in A.objects.filter(id__in=pk_list):
self.assertIsNone(a.setnull)
models.signals.pre_delete.disconnect(pre_delete)
def test_deletion_order(self):
pre_delete_order = []
post_delete_order = []
def log_post_delete(sender, **kwargs):
pre_delete_order.append((sender, kwargs['instance'].pk))
def log_pre_delete(sender, **kwargs):
post_delete_order.append((sender, kwargs['instance'].pk))
models.signals.post_delete.connect(log_post_delete)
models.signals.pre_delete.connect(log_pre_delete)
r = R.objects.create(pk=1)
s1 = S.objects.create(pk=1, r=r)
s2 = S.objects.create(pk=2, r=r)
T.objects.create(pk=1, s=s1)
T.objects.create(pk=2, s=s2)
r.delete()
self.assertEqual(
pre_delete_order, [(T, 2), (T, 1), (S, 2), (S, 1), (R, 1)]
)
self.assertEqual(
post_delete_order, [(T, 1), (T, 2), (S, 1), (S, 2), (R, 1)]
)
models.signals.post_delete.disconnect(log_post_delete)
models.signals.pre_delete.disconnect(log_pre_delete)
def test_relational_post_delete_signals_happen_before_parent_object(self):
deletions = []
def log_post_delete(instance, **kwargs):
self.assertTrue(R.objects.filter(pk=instance.r_id))
self.assertIs(type(instance), S)
deletions.append(instance.id)
r = R.objects.create(pk=1)
S.objects.create(pk=1, r=r)
models.signals.post_delete.connect(log_post_delete, sender=S)
try:
r.delete()
finally:
models.signals.post_delete.disconnect(log_post_delete)
self.assertEqual(len(deletions), 1)
self.assertEqual(deletions[0], 1)
@skipUnlessDBFeature("can_defer_constraint_checks")
def test_can_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to delete the avatar
# The important thing is that when we can defer constraint checks there
# is no need to do an UPDATE on User.avatar to null it out.
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
self.assertNumQueries(3, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
@skipIfDBFeature("can_defer_constraint_checks")
def test_cannot_defer_constraint_checks(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
# Attach a signal to make sure we will not do fast_deletes.
calls = []
def noop(*args, **kwargs):
calls.append('')
models.signals.post_delete.connect(noop, sender=User)
a = Avatar.objects.get(pk=u.avatar_id)
# The below doesn't make sense... Why do we need to null out
# user.avatar if we are going to delete the user immediately after it,
# and there are no more cascades.
# 1 query to find the users for the avatar.
# 1 query to delete the user
# 1 query to null out user.avatar, because we can't defer the constraint
# 1 query to delete the avatar
self.assertNumQueries(4, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
self.assertEqual(len(calls), 1)
models.signals.post_delete.disconnect(noop, sender=User)
def test_hidden_related(self):
r = R.objects.create()
h = HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h)
r.delete()
self.assertEqual(HiddenUserProfile.objects.count(), 0)
def test_large_delete(self):
TEST_SIZE = 2000
objs = [Avatar() for i in range(0, TEST_SIZE)]
Avatar.objects.bulk_create(objs)
# Calculate the number of queries needed.
batch_size = connection.ops.bulk_batch_size(['pk'], objs)
# The related fetches are done in batches.
batches = int(ceil(float(len(objs)) / batch_size))
# One query for Avatar.objects.all() and then one related fast delete for
# each batch.
fetches_to_mem = 1 + batches
# The Avatar objects are going to be deleted in batches of GET_ITERATOR_CHUNK_SIZE
queries = fetches_to_mem + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE
self.assertNumQueries(queries, Avatar.objects.all().delete)
self.assertFalse(Avatar.objects.exists())
def test_large_delete_related(self):
TEST_SIZE = 2000
s = S.objects.create(r=R.objects.create())
for i in range(TEST_SIZE):
T.objects.create(s=s)
batch_size = max(connection.ops.bulk_batch_size(['pk'], range(TEST_SIZE)), 1)
# TEST_SIZE // batch_size (select related `T` instances)
# + 1 (select related `U` instances)
# + TEST_SIZE // GET_ITERATOR_CHUNK_SIZE (delete `T` instances in batches)
# + 1 (delete `s`)
expected_num_queries = (ceil(TEST_SIZE // batch_size) +
ceil(TEST_SIZE // GET_ITERATOR_CHUNK_SIZE) + 2)
self.assertNumQueries(expected_num_queries, s.delete)
self.assertFalse(S.objects.exists())
self.assertFalse(T.objects.exists())
def test_delete_with_keeping_parents(self):
child = RChild.objects.create()
parent_id = child.r_ptr_id
child.delete(keep_parents=True)
self.assertFalse(RChild.objects.filter(id=child.id).exists())
self.assertTrue(R.objects.filter(id=parent_id).exists())
def test_queryset_delete_returns_num_rows(self):
"""
QuerySet.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
Avatar.objects.bulk_create([Avatar(desc='a'), Avatar(desc='b'), Avatar(desc='c')])
avatars_count = Avatar.objects.count()
deleted, rows_count = Avatar.objects.all().delete()
self.assertEqual(deleted, avatars_count)
# more complex example with multiple object types
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
}
deleted, deleted_objs = R.objects.all().delete()
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
def test_model_delete_returns_num_rows(self):
"""
Model.delete() should return the number of deleted rows and a
dictionary with the number of deletions for each object type.
"""
r = R.objects.create()
h1 = HiddenUser.objects.create(r=r)
h2 = HiddenUser.objects.create(r=r)
HiddenUser.objects.create(r=r)
HiddenUserProfile.objects.create(user=h1)
HiddenUserProfile.objects.create(user=h2)
m1 = M.objects.create()
m2 = M.objects.create()
MR.objects.create(r=r, m=m1)
r.m_set.add(m1)
r.m_set.add(m2)
r.save()
existed_objs = {
R._meta.label: R.objects.count(),
HiddenUser._meta.label: HiddenUser.objects.count(),
A._meta.label: A.objects.count(),
MR._meta.label: MR.objects.count(),
HiddenUserProfile._meta.label: HiddenUserProfile.objects.count(),
M.m2m.through._meta.label: M.m2m.through.objects.count(),
}
deleted, deleted_objs = r.delete()
self.assertEqual(deleted, sum(existed_objs.values()))
for k, v in existed_objs.items():
self.assertEqual(deleted_objs[k], v)
class FastDeleteTests(TestCase):
def test_fast_delete_fk(self):
u = User.objects.create(
avatar=Avatar.objects.create()
)
a = Avatar.objects.get(pk=u.avatar_id)
# 1 query to fast-delete the user
# 1 query to delete the avatar
self.assertNumQueries(2, a.delete)
self.assertFalse(User.objects.exists())
self.assertFalse(Avatar.objects.exists())
def test_fast_delete_m2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete f, 1 to fast-delete m2m for f
self.assertNumQueries(2, f.delete)
def test_fast_delete_revm2m(self):
t = M2MTo.objects.create()
f = M2MFrom.objects.create()
f.m2m.add(t)
# 1 to delete t, 1 to fast-delete t's m_set
self.assertNumQueries(2, f.delete)
def test_fast_delete_qs(self):
u1 = User.objects.create()
u2 = User.objects.create()
self.assertNumQueries(1, User.objects.filter(pk=u1.pk).delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_joined_qs(self):
a = Avatar.objects.create(desc='a')
User.objects.create(avatar=a)
u2 = User.objects.create()
expected_queries = 1 if connection.features.update_can_self_select else 2
self.assertNumQueries(expected_queries,
User.objects.filter(avatar__desc='a').delete)
self.assertEqual(User.objects.count(), 1)
self.assertTrue(User.objects.filter(pk=u2.pk).exists())
def test_fast_delete_inheritance(self):
c = Child.objects.create()
p = Parent.objects.create()
# 1 for self, 1 for parent
# However, this doesn't work as child.parent access creates a query,
# and this means we will be generating extra queries (a lot for large
# querysets). This is not a fast-delete problem.
# self.assertNumQueries(2, c.delete)
c.delete()
self.assertFalse(Child.objects.exists())
self.assertEqual(Parent.objects.count(), 1)
self.assertEqual(Parent.objects.filter(pk=p.pk).count(), 1)
# 1 for self delete, 1 for fast delete of empty "child" qs.
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
# 1 for self delete, 1 for fast delete of empty "child" qs.
c = Child.objects.create()
p = c.parent_ptr
self.assertNumQueries(2, p.delete)
self.assertFalse(Parent.objects.exists())
self.assertFalse(Child.objects.exists())
def test_fast_delete_large_batch(self):
User.objects.bulk_create(User() for i in range(0, 2000))
# No problems here - we aren't going to cascade, so we will fast
# delete the objects in a single query.
self.assertNumQueries(1, User.objects.all().delete)
a = Avatar.objects.create(desc='a')
User.objects.bulk_create(User(avatar=a) for i in range(0, 2000))
# We don't hit parameter amount limits for a, so just one query for
# that + fast delete of the related objs.
self.assertNumQueries(2, a.delete)
self.assertEqual(User.objects.count(), 0)
| bsd-3-clause |
lukeiwanski/tensorflow | tensorflow/python/data/kernel_tests/concatenate_dataset_op_test.py | 42 | 5552 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import test
class ConcatenateDatasetTest(test.TestCase):
def testConcatenateDataset(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 15),
np.array([37.0, 38.0, 39.0, 40.0]))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(concatenated.output_shapes, (tensor_shape.TensorShape(
[20]), tensor_shape.TensorShape([15]), tensor_shape.TensorShape([])))
iterator = concatenated.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(9):
result = sess.run(get_next)
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConcatenateDatasetDifferentShape(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
concatenated = input_dataset.concatenate(dataset_to_concatenate)
self.assertEqual(
[ts.as_list()
for ts in nest.flatten(concatenated.output_shapes)], [[20], [None]])
iterator = concatenated.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(9):
result = sess.run(get_next)
if i < 4:
for component, result_component in zip(input_components, result):
self.assertAllEqual(component[i], result_component)
else:
for component, result_component in zip(to_concatenate_components,
result):
self.assertAllEqual(component[i - 4], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConcatenateDatasetDifferentStructure(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1], [2], [3], [4], [5]]), 20),
np.tile(np.array([[12], [13], [14], [15], [16]]), 15),
np.array([37.0, 38.0, 39.0, 40.0, 41.0]))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(ValueError,
"don't have the same number of elements"):
input_dataset.concatenate(dataset_to_concatenate)
def testConcatenateDatasetDifferentType(self):
input_components = (
np.tile(np.array([[1], [2], [3], [4]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 4))
to_concatenate_components = (
np.tile(np.array([[1.0], [2.0], [3.0], [4.0]]), 5),
np.tile(np.array([[12], [13], [14], [15]]), 15))
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_components)
dataset_to_concatenate = dataset_ops.Dataset.from_tensor_slices(
to_concatenate_components)
with self.assertRaisesRegexp(TypeError, "have different types"):
input_dataset.concatenate(dataset_to_concatenate)
if __name__ == "__main__":
test.main()
| apache-2.0 |
2947721120/thumbor | vows/file_storage_vows.py | 7 | 7952 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from os.path import exists, dirname, join
import random
import shutil
from pyvows import Vows, expect
import thumbor.storages.file_storage as Storage
from thumbor.storages.file_storage import Storage as FileStorage
from thumbor.context import Context
from thumbor.config import Config
from fixtures.storage_fixture import IMAGE_URL, SAME_IMAGE_URL, IMAGE_BYTES, get_server
@Vows.batch
class FileStorageVows(Vows.Context):
class CreatesRootPathIfNoneFound(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/%s" % random.randint(1, 10000000))
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
storage.ensure_dir(config.FILE_STORAGE_ROOT_PATH)
return exists(config.FILE_STORAGE_ROOT_PATH)
def should_exist(self, topic):
expect(topic).to_be_true()
class CanStoreImage(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/")
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % 1, IMAGE_BYTES)
return storage.get(IMAGE_URL % 1)
def should_be_in_catalog(self, topic):
expect(topic.result()).not_to_be_null()
expect(topic.exception()).not_to_be_an_error()
class CanStoreImagesInSameFolder(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/")
root_path = join(config.FILE_STORAGE_ROOT_PATH, dirname(SAME_IMAGE_URL % 999))
if exists(root_path):
shutil.rmtree(root_path)
old_exists = Storage.storages.exists
Storage.storages.exists = lambda path: False
try:
storage = Storage.Storage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(SAME_IMAGE_URL % 998, IMAGE_BYTES)
storage.put(SAME_IMAGE_URL % 999, IMAGE_BYTES)
finally:
Storage.storages.exists = old_exists
return storage.get(SAME_IMAGE_URL % 999)
def should_be_in_catalog(self, topic):
expect(topic.result()).not_to_be_null()
expect(topic.exception()).not_to_be_an_error()
class CanGetImage(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/")
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % 2, IMAGE_BYTES)
return storage.get(IMAGE_URL % 2)
def should_not_be_null(self, topic):
expect(topic.result()).not_to_be_null()
expect(topic.exception()).not_to_be_an_error()
def should_have_proper_bytes(self, topic):
expect(topic.result()).to_equal(IMAGE_BYTES)
class CannotGetExpiredImage(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/", STORAGE_EXPIRATION_SECONDS=-1)
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % 2, IMAGE_BYTES)
return storage.get(IMAGE_URL % 2)
def should_be_null(self, topic):
expect(topic.result()).to_be_null()
expect(topic.exception()).not_to_be_an_error()
class CanGetIfExpireSetToNone(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/", STORAGE_EXPIRATION_SECONDS=None)
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % 2, IMAGE_BYTES)
return storage.get(IMAGE_URL % 2)
def should_be_null(self, topic):
expect(topic.result()).not_to_be_null()
expect(topic.exception()).not_to_be_an_error()
class CryptoVows(Vows.Context):
class RaisesIfInvalidConfig(Vows.Context):
@Vows.capture_error
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/", STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True)
storage = FileStorage(Context(config=config, server=get_server('')))
storage.put(IMAGE_URL % 3, IMAGE_BYTES)
storage.put_crypto(IMAGE_URL % 3)
def should_be_an_error(self, topic):
expect(topic).to_be_an_error_like(RuntimeError)
expect(topic).to_have_an_error_message_of(
"STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no SECURITY_KEY specified"
)
class GettingCryptoForANewImageReturnsNone(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/", STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True)
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
return storage.get_crypto(IMAGE_URL % 9999)
def should_be_null(self, topic):
expect(topic.result()).to_be_null()
class DoesNotStoreIfConfigSaysNotTo(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/")
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % 5, IMAGE_BYTES)
storage.put_crypto(IMAGE_URL % 5)
return storage.get_crypto(IMAGE_URL % 5)
def should_be_null(self, topic):
expect(topic.result()).to_be_null()
class CanStoreCrypto(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/", STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True)
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % 6, IMAGE_BYTES)
storage.put_crypto(IMAGE_URL % 6)
return storage.get_crypto(IMAGE_URL % 6)
def should_not_be_null(self, topic):
expect(topic.result()).not_to_be_null()
expect(topic.exception()).not_to_be_an_error()
def should_have_proper_key(self, topic):
expect(topic.result()).to_equal('ACME-SEC')
class DetectorVows(Vows.Context):
class CanStoreDetectorData(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/")
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
storage.put(IMAGE_URL % 7, IMAGE_BYTES)
storage.put_detector_data(IMAGE_URL % 7, 'some-data')
return storage.get_detector_data(IMAGE_URL % 7)
def should_not_be_null(self, topic):
expect(topic.result()).not_to_be_null()
expect(topic.exception()).not_to_be_an_error()
def should_equal_some_data(self, topic):
expect(topic.result()).to_equal('some-data')
class ReturnsNoneIfNoDetectorData(Vows.Context):
def topic(self):
config = Config(FILE_STORAGE_ROOT_PATH="/tmp/thumbor/file_storage/")
storage = FileStorage(Context(config=config, server=get_server('ACME-SEC')))
return storage.get_detector_data(IMAGE_URL % 10000)
def should_not_be_null(self, topic):
expect(topic.result()).to_be_null()
| mit |
littlstar/chromium.src | tools/PRESUBMIT.py | 43 | 1548 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for bisect/perf trybot.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
import imp
import os
def _ExamineConfigFiles(input_api):
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith('run-bisect-perf-regression.cfg') and
not f.LocalPath().endswith('run-perf-test.cfg')):
continue
try:
cfg_file = imp.load_source('config', os.path.basename(f.LocalPath()))
for k, v in cfg_file.config.iteritems():
if v:
return f.LocalPath()
except (IOError, AttributeError, TypeError):
return f.LocalPath()
return None
def _CheckNoChangesToBisectConfigFile(input_api, output_api):
results = _ExamineConfigFiles(input_api)
if results:
return [output_api.PresubmitError(
'The bisection config file should only contain a config dict with '
'empty fields. Changes to this file should never be submitted.',
items=[results])]
return []
def CommonChecks(input_api, output_api):
results = []
results.extend(_CheckNoChangesToBisectConfigFile(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api)
| bsd-3-clause |
pietern/caffe2 | caffe2/python/crf.py | 4 | 15115 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package crf
# Module caffe2.python.crf
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, recurrent, model_helper, brew
import numpy as np
'''
Due to a limitation in ReccurentNetworkOp, this layer only supports batch_size=1
In order to support batch_size > 1, we will have to implement the CRFUnit
and its gradient in C++ and handle the different batches there.
'''
class CRFWithLoss(object):
def __init__(self, model, num_classes, transitions_blob=None):
self.model = model
self.num_classes = num_classes
self.num_classes_padded = num_classes + 2 # After adding BOS and EOS
if not transitions_blob:
transitions_blob = self.model.param_init_net.UniformFill(
[],
[core.ScopedBlobReference('crf_transitions')],
shape=[self.num_classes_padded, self.num_classes_padded],
min=-1.0,
max=1.0
)
self.transitions = transitions_blob
self.model.params.append(self.transitions)
def crf_loss(self, predictions, labels, seq_lengths=None):
# Since the transitions matrix is a shared parameter, need to
# take a snapshot of it at the beginning since it can be updated
# in between the operators that uses it when doing parallel updates
transitions_snapshot = self.model.net.Copy(
self.transitions, core.ScopedBlobReference('transitions_snapshot')
)
# Compute best path unary score from the logits
path_unary_score = self._gather_entries_sum(
predictions, labels, self.num_classes
)
# Append BOS and EOS entries to the predictions and labels
predictions = self._pad_predictions(predictions)
labels = self._pad_labels(labels)
# Compute best path binary scores from the transitions matrix
path_binary_score = self._path_binary_scores(
labels, transitions_snapshot, seq_lengths
)
path_total_score = self.model.net.Add(
[path_binary_score, path_unary_score],
core.ScopedBlobReference('path_total')
)
# Compute all paths score
zero_index = self.model.param_init_net.ConstantFill(
[], shape=[1], value=0
)
initial_state = self.model.net.Gather(
[predictions, zero_index],
core.ScopedBlobReference('rnn_initial'),
dense_gradient=True
)
input_data, _ = self.model.net.RemovePadding(
[predictions],
padding_width=1,
end_padding_width=0,
outputs=2,
)
input_data = self.model.net.ExpandDims(
[input_data],
core.ScopedBlobReference('rnn_input_data'),
dims=[1]
)
# Due to a bug in RecurrentNetworkGradientOp, we need to copy the
# transitions blob before sending it to the recurrent network
transitions_copy = self.model.net.Copy(
transitions_snapshot, core.ScopedBlobReference('transitions_copy')
)
all_paths_scores = self._crf_forward(
input_data, initial_state, transitions_copy
)
loss = self.model.net.Sub(
[all_paths_scores, path_total_score],
core.ScopedBlobReference('crf_loss')
)
return loss
def _pad_predictions(self, predictions):
# This function will introduce two labels for beginning of sequence
# And end of sequence, it will make the necessary udpates to the
# the predictions blob
low_score = -1000.0 # An arbitray very low number
b_scores = np.array(
[[low_score] * self.num_classes + [0, low_score]]
).astype(np.float32)
e_scores = np.array(
[[low_score] * self.num_classes + [low_score, 0]]
).astype(np.float32)
b_scores = self.model.param_init_net.GivenTensorFill(
[], "b_scores", shape=[1, self.num_classes_padded], values=b_scores
)
e_scores = self.model.param_init_net.GivenTensorFill(
[], "e_scores", shape=[1, self.num_classes_padded], values=e_scores
)
zero_index = self.model.net.ConstantFill(
[], shape=[1, ], value=0
)
length = self.model.net.Gather(
[self.model.net.Shape([predictions]), zero_index],
)
length = self.model.net.Cast(length, to='int32')
t_range = self.model.net.LengthsRangeFill(length)
padding = self.model.net.ConstantFill([t_range], value=low_score)
padding = self.model.net.ExpandDims(padding, dims=[1])
padded_predictions, _ = self.model.net.Concat(
[predictions, padding, padding],
outputs=2,
axis=1
)
padded_predictions_concat, _ = self.model.net.Concat(
[b_scores, padded_predictions, e_scores],
outputs=2,
axis=0
)
return padded_predictions_concat
def _pad_labels(self, labels):
bos_i = self.num_classes
eos_i = self.num_classes + 1
bos_i_b = self.model.param_init_net.ConstantFill(
[], shape=[1], value=bos_i
)
eos_i_b = self.model.param_init_net.ConstantFill(
[], shape=[1], value=eos_i
)
labels = self.model.net.Cast([labels], to='int64')
padded_labels, _ = self.model.net.Concat(
[bos_i_b, labels, eos_i_b],
axis=0,
outputs=2
)
return padded_labels
def _path_binary_scores(self, labels, transitions, seq_lengths=None):
column_ids, _ = self.model.net.RemovePadding(
[labels],
outputs=2,
padding_width=1,
end_padding_width=0
)
row_ids, _ = self.model.net.RemovePadding(
[labels],
outputs=2,
padding_width=0,
end_padding_width=1
)
# Since there is no multi-dimensional gather, I flatten the matrix to
# a 1-d vector and transform the ids to (row_ids * num_columns +
# column_ids) and do gather in 1-d
num_columns_blob = self.model.net.ConstantFill(
[row_ids],
value=self.num_classes_padded,
)
flattened_ids = self.model.net.Mul([row_ids, num_columns_blob])
flattened_ids = self.model.net.Add([flattened_ids, column_ids])
flattened_transitions = self.model.net.FlattenToVec([transitions])
entries = self.model.net.Gather(
[flattened_transitions, flattened_ids],
dense_gradient=True
)
return self.model.ReduceFrontSum(entries)
def _gather_entries_sum(self, in_data, indices, index_size):
indices = self.model.net.Cast([indices], to='int64')
index_size_blob = self.model.param_init_net.ConstantFill(
[],
shape=[1],
value=index_size,
)
query_one_hot = self.model.net.OneHot(
[indices, index_size_blob]
)
flattend_query = self.model.net.FlattenToVec(query_one_hot)
flattend_data = self.model.net.FlattenToVec(in_data)
query_scores = self.model.net.DotProduct(
[flattend_query, flattend_data]
)
final_sum = self.model.net.ReduceFrontSum([query_scores])
return final_sum
def _crf_forward(
self,
input_blob,
initial_state,
transitions_copy,
seq_lengths=None
):
# Build the RNN net and get the last timestep output
out_last = self.build_crf_net(
input_blob, initial_state, transitions_copy
)
out_last, _ = self.model.net.Reshape(
[out_last],
outputs=2,
shape=(self.num_classes_padded,)
)
zero_segment_id = self.model.param_init_net.ConstantFill(
[],
value=0,
shape=[self.num_classes_padded],
dtype=core.DataType.INT32,
)
# Compute the accumlated total score of all the paths
accum_score = self.model.net.SortedSegmentRangeLogSumExp(
[out_last, zero_segment_id]
)
accum_score, _ = self.model.net.Reshape(
accum_score,
outputs=2,
shape=()
)
return accum_score
def build_crf_net(self, input_blob, initial_state, transitions):
'''
Adds the crf_net recurrent operator to the model.
model: model_helper.ModelHelper object new operators would be added
to
input_blob: the input sequence in a format T x N x D
where T is sequence size, N - batch size and D - input dimention
##Only supports batch-size 1##
seq_lengths: blob containing sequence lengths (unused)
'''
scope = 'crf_net'
def s(name):
''
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
step_model = model_helper.ModelHelper(name='crf_step',
param_model=self.model)
input_t, cell_t_prev, _ = (
step_model.net.AddExternalInputs(
core.ScopedBlobReference('input_t'),
core.ScopedBlobReference('cell_t_prev'),
transitions
)
)
zero_segment_id = step_model.param_init_net.ConstantFill(
[],
[s('zero_segment_id')],
value=0,
shape=[self.num_classes_padded],
dtype=core.DataType.INT32,
)
# A hack to bypass model cloning for test
step_model.param_init_net.AddExternalOutput(zero_segment_id)
""" the CRF step """
# Do tile
prev_transpose = brew.transpose(
step_model,
cell_t_prev,
[s('prev_transpose')],
axes=(0, 2, 1),
)
prev_tiled = step_model.net.Tile(
prev_transpose,
[s('prev_tiled')],
tiles=self.num_classes_padded,
axis=2,
)
input_t_tiled = step_model.net.Tile(
input_t,
[s('input_t_tiled')],
tiles=self.num_classes_padded,
axis=1,
)
input_with_prev = step_model.net.Add(
[prev_tiled, input_t_tiled],
[s('input_with_prev')]
)
all_with_transitions = step_model.net.Add(
[input_with_prev, transitions],
[s('prev_with_transitions')],
broadcast=1,
use_grad_hack=1,
)
all_with_transitions_reshaped, _ = step_model.net.Reshape(
all_with_transitions,
[s('all_with_transitions_reshaped'), s('all_with_transitions_orig')],
shape=(self.num_classes_padded, self.num_classes_padded)
)
cell_t = step_model.net.SortedSegmentRangeLogSumExp(
[all_with_transitions_reshaped, zero_segment_id],
[s('cell_t')],
)
step_model.net.AddExternalOutputs(cell_t)
""" recurrent network """
cell_input_blob = initial_state
out_all, out_last = recurrent.recurrent_net(
net=self.model.net,
cell_net=step_model.net,
inputs=[(input_t, input_blob)],
initial_cell_inputs=[
(cell_t_prev, cell_input_blob),
],
links={
cell_t_prev: cell_t,
},
scope=scope,
outputs_with_grads=(1,)
)
return out_last
def update_predictions(self, classes):
def crf_update_predictions_op(inputs, outputs):
# This operator will compute the best path of classes by performing
# Viterbi decoding and then updates the predictions to make the tag
# On the best path has the highest score among the others
predictions = inputs[0].data
transitions = inputs[1].data
predictions = inputs[0].data
predictions_shape = inputs[0].shape
outputs[0].reshape(predictions_shape)
trellis = np.zeros(predictions_shape)
backpointers = np.zeros(predictions_shape, dtype=np.int32)
trellis[0] = predictions[0]
for t in range(1, predictions_shape[0]):
v = np.expand_dims(trellis[t - 1], 1) + transitions
trellis[t] = predictions[t] + np.max(v, 0)
backpointers[t] = np.argmax(v, 0)
viterbi = [np.argmax(trellis[-1])]
for bp in reversed(backpointers[1:]):
viterbi.append(bp[viterbi[-1]])
viterbi.reverse()
new_predictions = np.zeros(predictions_shape)
old_bests = []
for i, w_predictions in enumerate(predictions):
# Get the current tag with the maximum score
new_predictions[i] = predictions[i]
old_best = np.argmax(w_predictions)
old_bests.append(old_best)
# Swap the scores of the current best tag and the tag on the
# Viterbi path
w_predictions[viterbi[i]], w_predictions[old_best] = \
w_predictions[old_best], w_predictions[viterbi[i]]
new_predictions[i] = w_predictions
# Remove the BOS and EOS entries from the predictions matrix
orig_predictions = new_predictions[1:-1, 0:-2]
outputs[0].reshape(orig_predictions.shape)
outputs[0].data[...] = orig_predictions
padded_classes = self._pad_predictions(classes)
new_classes = self.model.net.Python(crf_update_predictions_op)(
[padded_classes, self.transitions],
core.ScopedBlobReference('post_crf_classes')
)
return new_classes
| apache-2.0 |
nikitos/npui | netprofile_postfix/netprofile_postfix/models.py | 1 | 18458 | #!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
#
# NetProfile: PowerDNS module - Models
# © Copyright 2013 Alex 'Unik' Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
__all__ = [
"PostfixAdmin",
"PostfixDomain",
"PostfixDomainAdmins",
"PostfixLog",
"PostfixVacation",
"PostfixAlias",
"PostfixMailbox",
]
import datetime
from sqlalchemy import (
Column,
Date,
DateTime,
ForeignKey,
Index,
Sequence,
TIMESTAMP,
Unicode,
UnicodeText,
text,
Text
)
from sqlalchemy.orm import (
backref,
relationship
)
from sqlalchemy.ext.associationproxy import association_proxy
from netprofile.db.connection import Base
from netprofile.db.fields import (
ASCIIString,
ASCIIText,
ASCIITinyText,
DeclEnum,
NPBoolean,
UInt8,
UInt16,
UInt32,
npbool
)
from netprofile.db.ddl import Comment
from netprofile.tpl import TemplateObject
from netprofile.ext.columns import MarkupColumn
from netprofile.ext.wizards import (
SimpleWizard,
Step,
Wizard
)
from pyramid.i18n import (
TranslationStringFactory,
get_localizer
)
_ = TranslationStringFactory('netprofile_postfix')
class PostfixAdmin(Base):
"""
Admin Account class
"""
__tablename__ = 'postfix_admin'
__table_args__ = (
Comment('PostfixAdmin Admin Accounts'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_DOMAINS',
#'cap_read' : 'DOMAINS_LIST',
#'cap_create' : 'DOMAINS_CREATE',
#'cap_edit' : 'DOMAINS_EDIT',
#'cap_delete' : 'DOMAINS_DELETE',
'show_in_menu' : 'admin',
'menu_name' : _('Postfix Admins'),
'menu_order' : 50,
'default_sort' : ({ 'property': 'username' ,'direction': 'ASC' },),
'grid_view' : ('username', 'password', 'active'
),
'form_view' : ('username', 'password', 'created', 'modified', 'active'
),
'easy_search' : ('username',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new Postfix admin account'))
}
}
)
id = Column(
'id',
UInt32(),
Sequence('postfix_admin_seq'),
Comment("ID"),
primary_key=True,
nullable=False,
default=0,
info={
'header_string' : _('ID')
}
)
#Foreign Key To PDNS Users?
username = Column(
'username',
Unicode(255),
Comment("Admin Username"),
nullable=False,
default='',
info={
'header_string' : _('Username')
}
)
password = Column(
'password',
Unicode(255),
Comment("Admin Password"),
nullable=False,
default = '',
info={
'header_string' : _('Password')
}
)
created = Column(
'created',
DateTime(),
Comment("User Creation Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Created')
}
)
modified = Column(
'modified',
DateTime(),
Comment("User Modification Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Modified')
}
)
active = Column(
'active',
UInt8(),
Comment("Is User Active"),
nullable=False,
default=1,
info={
'header_string' : _('Is Active?')
}
)
def __str__(self):
return(self.username)
class PostfixDomain(Base):
"""
Postfix Domain class
"""
__tablename__ = 'postfix_domain'
__table_args__ = (
Comment('Postfix Domains'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_DOMAINS',
#'cap_read' : 'DOMAINS_LIST',
#'cap_create' : 'DOMAINS_CREATE',
#'cap_edit' : 'DOMAINS_EDIT',
#'cap_delete' : 'DOMAINS_DELETE',
'show_in_menu' : 'admin',
'menu_name' : _('Postfix Domains'),
'menu_order' : 50,
'default_sort' : ({ 'property': 'domain' ,'direction': 'ASC' },),
'grid_view' : ('domain', 'created', 'modified', 'active',
),
'form_view' : ('domain', 'aliases', 'mailboxes',
'maxquota', 'transport', 'backupmx',
'created', 'modified', 'active',
'description',
),
'easy_search' : ('domain', ),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
#We should not add domains here but in the pdns module
'create_wizard' : SimpleWizard(title=_('Add new domain'))
}
}
)
id = Column(
'id',
UInt32(),
Sequence('postfix_domain_seq'),
Comment("ID"),
primary_key=True,
nullable=False,
default=0,
info={
'header_string' : _('ID')
}
)
domain = Column(
'domain',
Unicode(255),
Comment("Postfix Domain"),
nullable=False,
default='',
info={
'header_string' : _('Domain')
}
)
description = Column(
'description',
Unicode(255),
Comment("Domain Description"),
nullable=False,
default='',
info={
'header_string' : _('Description')
}
)
#Foreign key from aliases table
aliases = Column(
'aliases',
UInt32(),
Comment("Mailbox Aliases"),
nullable=False,
default=0,
info={
'header_string' : _('Aliases')
}
)
#Foreign key from mailboxes table
mailboxes = Column(
'mailboxes',
UInt32(),
Comment("Mailboxes"),
nullable=False,
default=0,
info={
'header_string' : _('Mailboxes')
}
)
maxquota = Column(
'maxquota',
UInt32(),
Comment("Max Quota"),
nullable=False,
default=0,
info={
'header_string' : _('Max Quota')
}
)
transport = Column(
'transport',
Unicode(255),
Comment("Transport"),
nullable=True,
info={
'header_string' : _('Transport')
}
)
backupmx = Column(
'backupmx',
UInt8(),
Comment("Backup MX Server"),
nullable=False,
default=1,
info={
'header_string' : _('Backup MX Server')
}
)
created = Column(
'created',
DateTime(),
Comment("Domain Creation Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Created')
}
)
modified = Column(
'modified',
DateTime(),
Comment("Domain Modification Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Modified')
}
)
active = Column(
'active',
UInt8(),
Comment("Is Domain Active"),
nullable=False,
default=1,
info={
'header_string' : _('Is Active?')
}
)
def __str__(self):
return(self.domain)
class PostfixDomainAdmins(Base):
"""
Postfix Domain-Admin relation class
"""
__tablename__ = 'postfix_domain_admins'
__table_args__ = (
Comment('Postfix Domain Admins table'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_DOMAINS',
#'cap_read' : 'DOMAINS_LIST',
#'cap_create' : 'DOMAINS_CREATE',
#'cap_edit' : 'DOMAINS_EDIT',
#'cap_delete' : 'DOMAINS_DELETE',
'show_in_menu' : 'admin',
'menu_name' : _('User Domains'),
'menu_order' : 50,
'default_sort' : ({ 'property': 'username' ,'direction': 'ASC' },),
'grid_view' : ('username', 'domain' ),
'form_view' : ('username', 'domain', 'created', 'active'),
'easy_search' : ('username', 'domain',),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new user-domain relation'))
}
}
)
id = Column(
'id',
UInt32(),
Sequence('postfix_domain_admins_seq'),
Comment("ID"),
primary_key=True,
nullable=False,
default=0,
info={
'header_string' : _('ID')
}
)
#Foreign key to postfix admins
username = Column(
'username',
Unicode(255),
Comment("Admin Username"),
nullable=False,
default='',
info={
'header_string' : _('Username')
}
)
#Foreign key to postfix domains
domain = Column(
'domain',
Unicode(255),
Comment("Postfix Domain"),
nullable=False,
default='',
info={
'header_string' : _('Domain')
}
)
created = Column(
'created',
DateTime(),
Comment("Domain Admin Creation Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Created')
}
)
active = Column(
'active',
UInt8(),
Comment("Is Domain Admin Active"),
nullable=False,
default=1,
info={
'header_string' : _('Is Active?')
}
)
def __str__(self):
return(self.username)
class PostfixLog(Base):
"""
Postfix Log class
"""
__tablename__ = 'postfix_log'
__table_args__ = (
Comment('Postfix Log'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_DOMAINS',
#'cap_read' : 'DOMAINS_LIST',
#'cap_create' : 'DOMAINS_CREATE',
#'cap_edit' : 'DOMAINS_EDIT',
#'cap_delete' : 'DOMAINS_DELETE',
'show_in_menu' : 'admin',
'menu_name' : _('Logs'),
'menu_order' : 50,
'default_sort' : ({ 'property': 'timestamp' ,'direction': 'DESC' },),
'grid_view' : ('username', 'timestamp', 'domain', 'action',
),
'form_view' : ('username', 'timestamp', 'domain', 'action', 'data',
),
'easy_search' : ('domain','username'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new user-domain relation'))
}
}
)
id = Column(
'id',
UInt32(),
Sequence('postfix_log_seq'),
Comment("ID"),
primary_key=True,
nullable=False,
default=0,
info={
'header_string' : _('ID')
}
)
#Maybe ID as a primary key here and to database?
timestamp = Column(
'timestamp',
DateTime(),
Comment("Log Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Timestamp')
}
)
#Foreign key to postfix admins
username = Column(
'username',
Unicode(255),
Comment("Admin Username"),
nullable=False,
default='',
info={
'header_string' : _('Username')
}
)
#Foreign key to postfix domains
domain = Column(
'domain',
Unicode(255),
Comment("Postfix Domain"),
nullable=False,
default='',
info={
'header_string' : _('Domain')
}
)
action = Column(
'action',
Unicode(255),
Comment("Log Action"),
nullable=False,
default='',
info={
'header_string' : _('Action')
}
)
data = Column(
'data',
Unicode(255),
Comment("Log Data"),
nullable=False,
default='',
info={
'header_string' : _('Data')
}
)
def __str__(self):
return("{0}:{1}".format(self.username, self.timestamp))
class PostfixVacation(Base):
"""
Postfix Vacation Autoresponder class
"""
__tablename__ = 'postfix_vacation'
__table_args__ = (
Comment('Vacation Autoresponder'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_DOMAINS',
#'cap_read' : 'DOMAINS_LIST',
#'cap_create' : 'DOMAINS_CREATE',
#'cap_edit' : 'DOMAINS_EDIT',
#'cap_delete' : 'DOMAINS_DELETE',
'show_in_menu' : 'modules',
'menu_name' : _('Vacation Autoresponder'),
'menu_order' : 50,
'default_sort' : ({ 'property': 'created' ,'direction': 'DESC' },),
'grid_view' : ('domain', 'email', 'subject', 'created', 'active'),
'form_view' : ('domain', 'email', 'subject', 'body', 'cache', 'created', 'active'),
'easy_search' : ('domain','email'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new vacation message'))
}
}
)
id = Column(
'id',
UInt32(),
Sequence('postfix_vacation_seq'),
Comment("ID"),
primary_key=True,
nullable=False,
default=0,
info={
'header_string' : _('ID')
}
)
#Maybe ID as a primary key here and to database?
email = Column(
'email',
Unicode(255),
Comment("Email"),
nullable=False,
default='',
info={
'header_string' : _('Email')
}
)
subject = Column(
'subject',
Unicode(255),
Comment("Subject"),
nullable=False,
default='',
info={
'header_string' : _('Subject')
}
)
body = Column(
'body',
UnicodeText(),
Comment("Body"),
nullable=False,
default='',
info={
'header_string' : _('Body')
}
)
cache = Column(
'cache',
UnicodeText(),
Comment("Cache"),
nullable=False,
default='',
info={
'header_string' : _('Cache')
}
)
domain = Column(
'domain',
Unicode(255),
Comment("Postfix Domain"),
nullable=False,
default='',
info={
'header_string' : _('Domain')
}
)
created = Column(
'created',
DateTime(),
Comment("Vacation Message Creation Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Created')
}
)
active = Column(
'active',
UInt8(),
Comment("Is Vacation Autoresponder Admin Active"),
nullable=False,
default=1,
info={
'header_string' : _('Is Active?')
}
)
def __str__(self):
return("{0}:{1}".format(self.email, self.subject))
class PostfixAlias(Base):
"""
Postfix Alias class
"""
__tablename__ = 'postfix_alias'
__table_args__ = (
Comment('Postfix Aliases'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_DOMAINS',
#'cap_read' : 'DOMAINS_LIST',
#'cap_create' : 'DOMAINS_CREATE',
#'cap_edit' : 'DOMAINS_EDIT',
#'cap_delete' : 'DOMAINS_DELETE',
'show_in_menu' : 'modules',
'menu_name' : _('Aliases'),
'menu_order' : 50,
'default_sort' : ({ 'property': 'created' ,'direction': 'DESC' },),
'grid_view' : ('address', 'goto', 'domain', 'active',),
'form_view' : ('address', 'goto', 'domain', 'created', 'modified', 'active',),
'easy_search' : ('address', 'domain'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new alias'))
}
}
)
id = Column(
'id',
UInt32(),
Sequence('postfix_alias_seq'),
Comment("ID"),
primary_key=True,
nullable=False,
default=0,
info={
'header_string' : _('ID')
}
)
#Maybe ID as a primary key here and to database?
address = Column(
'address',
Unicode(255),
Comment("Address"),
nullable=False,
default='',
info={
'header_string' : _('Address')
}
)
goto = Column(
'goto',
UnicodeText(),
Comment("Destination"),
nullable=False,
default='',
info={
'header_string' : _('Destination')
}
)
#Foreign key?
domain = Column(
'domain',
Unicode(255),
Comment("Domain"),
nullable=False,
default='',
info={
'header_string' : _('Domain')
}
)
created = Column(
'created',
DateTime(),
Comment("Alias Creation Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Created')
}
)
modified = Column(
'modified',
DateTime(),
Comment("Alias Modification Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Modified')
}
)
active = Column(
'active',
UInt8(),
Comment("Is Alias Active"),
nullable=False,
default=1,
info={
'header_string' : _('Is Active?')
}
)
def __str__(self):
return("{0}:{1}".format(self.address, self.goto))
class PostfixMailbox(Base):
"""
Postfix Mailbox class
"""
__tablename__ = 'postfix_mailbox'
__table_args__ = (
Comment('Postfix Mailboxes'),
{
'mysql_engine' : 'InnoDB',
'mysql_charset' : 'utf8',
'info' : {
'cap_menu' : 'BASE_DOMAINS',
#'cap_read' : 'DOMAINS_LIST',
#'cap_create' : 'DOMAINS_CREATE',
#'cap_edit' : 'DOMAINS_EDIT',
#'cap_delete' : 'DOMAINS_DELETE',
'show_in_menu' : 'modules',
'menu_name' : _('Mailboxes'),
'menu_order' : 50,
'default_sort' : ({ 'property': 'created' ,'direction': 'DESC' },),
'grid_view' : ('name', 'domain', 'active', ),
'form_view' : ('name', 'domain', 'username',
'password', 'maildir', 'quota',
'created', 'modified', 'active', ),
'easy_search' : ('name', 'domain', 'username'),
'detail_pane' : ('netprofile_core.views', 'dpane_simple'),
'create_wizard' : SimpleWizard(title=_('Add new mailbox'))
}
}
)
id = Column(
'id',
UInt32(),
Sequence('postfix_mailbox_seq'),
Comment("ID"),
primary_key=True,
nullable=False,
default=0,
info={
'header_string' : _('ID')
}
)
#primary key here?
username = Column(
'username',
Unicode(255),
Comment("Username"),
nullable=False,
default='',
info={
'header_string' : _('Username')
}
)
password = Column(
'password',
Unicode(255),
Comment("Password"),
nullable=False,
default='',
info={
'header_string' : _('Password')
}
)
name = Column(
'name',
Unicode(255),
Comment("Mailbox Name"),
nullable=False,
default='',
info={
'header_string' : _('Mailbox Name')
}
)
maildir = Column(
'maildir',
Unicode(255),
Comment("Mail Directory"),
nullable=False,
default='',
info={
'header_string' : _('Mail Directory')
}
)
quota = Column(
'quota',
UInt32(),
Comment("Quota"),
nullable=False,
default=0,
info={
'header_string' : _('Quota')
}
)
#Foreign key here?
domain = Column(
'domain',
Unicode(255),
Comment("Domain"),
nullable=False,
default='',
info={
'header_string' : _('Domain')
}
)
created = Column(
'created',
DateTime(),
Comment("Mailbox Creation Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Created')
}
)
modified = Column(
'modified',
DateTime(),
Comment("Mailbox Modification Timestamp"),
nullable=False,
default=datetime.datetime.utcnow(),
info={
'header_string' : _('Modified')
}
)
active = Column(
'active',
UInt8(),
Comment("Is Mailbox Active"),
nullable=False,
default=1,
info={
'header_string' : _('Is Active?')
}
)
def __str__(self):
return(self.name)
| agpl-3.0 |
mark-burnett/filament-dynamics | actin_dynamics/primitives/objectives/tau.py | 1 | 5939 | # Copyright (C) 2011 Mark Burnett
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bisect
from . import base_classes
from actin_dynamics.numerical import interpolation
from actin_dynamics.numerical import measurements
from actin_dynamics.numerical import regression
from actin_dynamics import logger
log = logger.getLogger(__file__)
class HalfTime(base_classes.Objective):
def __init__(self, analysis_name=None, base_value=None,
subtract_fraction=0, second_subtract_fraction=0, *args, **kwargs):
self.analysis_name = analysis_name
self.half_value = float(base_value) * (
1 - float(subtract_fraction)
- float(second_subtract_fraction)) / 2
base_classes.Objective.__init__(self, *args, **kwargs)
def perform(self, run, target):
times, values, errors = run.analyses[self.analysis_name]
target.value = _calc_halftime(times, values, self.half_value)
class HalfTimeError(base_classes.Objective):
def __init__(self, analysis_name=None, base_value=None,
subtract_fraction=0, second_subtract_fraction=0, *args, **kwargs):
self.analysis_name = analysis_name
self.half_value = float(base_value) * (
1 - float(subtract_fraction)
- float(second_subtract_fraction)) / 2
base_classes.Objective.__init__(self, *args, **kwargs)
def perform(self, run, target):
times, values, errors = run.analyses[self.analysis_name]
# log.warn('times: %s', times)
# log.warn('values: %s', values)
# log.warn('errors: %s', errors)
halftime = _calc_halftime(times, values, self.half_value)
# log.warn('half_value = %s, calculated halftime = %s',
# self.half_value, halftime)
left_index = bisect.bisect_left(times, halftime)
left_index = min(left_index, len(times) - 2)
# log.warn('left_index = %s, times: %s', left_index,
# times[left_index:left_index + 2])
left_time, right_time = times[left_index:left_index + 2]
left_value, right_value = values[left_index:left_index + 2]
left_error, right_error = errors[left_index:left_index + 2]
half_value_error = interpolation.linear_project(left_time, left_error,
right_time, right_error, halftime)
slope = (right_value - left_value) / (right_time - left_time)
target.value = half_value_error / slope
# XXX This obviously breaks if the halftime isn't reached.
def _calc_halftime(times, values, half_value):
for i, v in enumerate(values):
if v > half_value:
break;
left_time = times[i-1]
left_value = values[i-1]
right_time = times[i]
right_value = values[i]
if (left_value == right_value):
log.warn('Matching values: left = %s, right = %s', left_value, right_value)
if (left_time == right_time):
log.warn('Matching times: left = %s, right = %s', left_time, right_time)
y = interpolation.linear_project(left_value, left_time,
right_value, right_time, half_value)
if y == float('nan'):
log.error('Halftime is not a number: i = %s, lt = %s, rt = %s, lv = %s, rv = %s',
i, left_time, right_time, left_value, right_value)
return y
class PeakTime(base_classes.Objective):
def __init__(self, analysis_name=None, *args, **kwargs):
self.analysis_name = analysis_name
base_classes.Objective.__init__(self, *args, **kwargs)
def perform(self, run, target):
times, values, errors = run.analyses[self.analysis_name]
max_value = max(values)
max_index = values.index(max_value)
target.value = times[max_index]
class PeakValue(base_classes.Objective):
def __init__(self, analysis_name=None, *args, **kwargs):
self.analysis_name = analysis_name
base_classes.Objective.__init__(self, *args, **kwargs)
def perform(self, run, target):
times, values, errors = run.analyses[self.analysis_name]
max_value = max(values)
target.value = max_value
class FitTau(base_classes.Objective):
def __init__(self, analysis_name=None, start_time=None, *args, **kwargs):
self.analysis_name = analysis_name
self.start_time = float(start_time)
base_classes.Objective.__init__(self, *args, **kwargs)
def perform(self, run, target):
times, values, errors = run.analyses[self.analysis_name]
sliced_times, sliced_values = measurements.time_slice((times, values),
start_time=self.start_time)
tau, scale = regression.fit_exponential(sliced_times, sliced_values)
target.value = tau
class FitMagnitude(base_classes.Objective):
def __init__(self, analysis_name=None, start_time=None, *args, **kwargs):
self.analysis_name = analysis_name
self.start_time = float(start_time)
base_classes.Objective.__init__(self, *args, **kwargs)
def perform(self, run, target):
times, values, errors = run.analyses[self.analysis_name]
sliced_times, sliced_values = measurements.time_slice((times, values),
start_time=self.start_time)
tau, scale = regression.fit_exponential(sliced_times, sliced_values)
target.value = scale
| gpl-3.0 |
beezee/GAE-Django-site | django/contrib/auth/tests/remote_user.py | 150 | 6422 | from datetime import datetime
from django.conf import settings
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.models import User
from django.test import TestCase
class RemoteUserTest(TestCase):
urls = 'django.contrib.auth.tests.urls'
middleware = 'django.contrib.auth.middleware.RemoteUserMiddleware'
backend = 'django.contrib.auth.backends.RemoteUserBackend'
# Usernames to be passed in REMOTE_USER for the test_known_user test case.
known_user = 'knownuser'
known_user2 = 'knownuser2'
def setUp(self):
self.curr_middleware = settings.MIDDLEWARE_CLASSES
self.curr_auth = settings.AUTHENTICATION_BACKENDS
settings.MIDDLEWARE_CLASSES += (self.middleware,)
settings.AUTHENTICATION_BACKENDS = (self.backend,)
def test_no_remote_user(self):
"""
Tests requests where no remote user is specified and insures that no
users get created.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', REMOTE_USER=None)
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', REMOTE_USER='')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
def test_unknown_user(self):
"""
Tests the case where the username passed in the header does not exist
as a User.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/', REMOTE_USER='newuser')
self.assertEqual(response.context['user'].username, 'newuser')
self.assertEqual(User.objects.count(), num_users + 1)
User.objects.get(username='newuser')
# Another request with same user should not create any new users.
response = self.client.get('/remote_user/', REMOTE_USER='newuser')
self.assertEqual(User.objects.count(), num_users + 1)
def test_known_user(self):
"""
Tests the case where the username passed in the header is a valid User.
"""
User.objects.create(username='knownuser')
User.objects.create(username='knownuser2')
num_users = User.objects.count()
response = self.client.get('/remote_user/', REMOTE_USER=self.known_user)
self.assertEqual(response.context['user'].username, 'knownuser')
self.assertEqual(User.objects.count(), num_users)
# Test that a different user passed in the headers causes the new user
# to be logged in.
response = self.client.get('/remote_user/', REMOTE_USER=self.known_user2)
self.assertEqual(response.context['user'].username, 'knownuser2')
self.assertEqual(User.objects.count(), num_users)
def test_last_login(self):
"""
Tests that a user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
"""
user = User.objects.create(username='knownuser')
# Set last_login to something so we can determine if it changes.
default_login = datetime(2000, 1, 1)
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/', REMOTE_USER=self.known_user)
self.assertNotEqual(default_login, response.context['user'].last_login)
user = User.objects.get(username='knownuser')
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/', REMOTE_USER=self.known_user)
self.assertEqual(default_login, response.context['user'].last_login)
def tearDown(self):
"""Restores settings to avoid breaking other tests."""
settings.MIDDLEWARE_CLASSES = self.curr_middleware
settings.AUTHENTICATION_BACKENDS = self.curr_auth
class RemoteUserNoCreateBackend(RemoteUserBackend):
"""Backend that doesn't create unknown users."""
create_unknown_user = False
class RemoteUserNoCreateTest(RemoteUserTest):
"""
Contains the same tests as RemoteUserTest, but using a custom auth backend
class that doesn't create unknown users.
"""
backend =\
'django.contrib.auth.tests.remote_user.RemoteUserNoCreateBackend'
def test_unknown_user(self):
num_users = User.objects.count()
response = self.client.get('/remote_user/', REMOTE_USER='newuser')
self.assertTrue(response.context['user'].is_anonymous())
self.assertEqual(User.objects.count(), num_users)
class CustomRemoteUserBackend(RemoteUserBackend):
"""
Backend that overrides RemoteUserBackend methods.
"""
def clean_username(self, username):
"""
Grabs username before the @ character.
"""
return username.split('@')[0]
def configure_user(self, user):
"""
Sets user's email address.
"""
user.email = '[email protected]'
user.save()
return user
class RemoteUserCustomTest(RemoteUserTest):
"""
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
"""
backend =\
'django.contrib.auth.tests.remote_user.CustomRemoteUserBackend'
# REMOTE_USER strings with e-mail addresses for the custom backend to
# clean.
known_user = '[email protected]'
known_user2 = '[email protected]'
def test_known_user(self):
"""
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
"""
super(RemoteUserCustomTest, self).test_known_user()
self.assertEqual(User.objects.get(username='knownuser').email, '')
self.assertEqual(User.objects.get(username='knownuser2').email, '')
def test_unknown_user(self):
"""
The unknown user created should be configured with an email address.
"""
super(RemoteUserCustomTest, self).test_unknown_user()
newuser = User.objects.get(username='newuser')
self.assertEqual(newuser.email, '[email protected]')
| bsd-3-clause |
thumbimigwe/echorizr | lib/python2.7/site-packages/pip/_vendor/distlib/locators.py | 129 | 50493 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
name1, name2 = name1.lower(), name2.lower()
if name1 == name2:
result = True
else:
# distribute replaces '-' by '_' in project names, so it
# can tell where the version starts in a filename.
result = name1.replace('_', '-') == name2.replace('_', '-')
return result
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
| mit |
newerthcom/savagerebirth | libs/python-2.72/Lib/lib2to3/fixes/fix_renames.py | 326 | 2218 | """Fix incompatible renames
Fixes:
* sys.maxint -> sys.maxsize
"""
# Author: Christian Heimes
# based on Collin Winter's fix_import
# Local imports
from .. import fixer_base
from ..fixer_util import Name, attr_chain
MAPPING = {"sys": {"maxint" : "maxsize"},
}
LOOKUP = {}
def alternates(members):
return "(" + "|".join(map(repr, members)) + ")"
def build_pattern():
#bare = set()
for module, replace in MAPPING.items():
for old_attr, new_attr in replace.items():
LOOKUP[(module, old_attr)] = new_attr
#bare.add(module)
#bare.add(old_attr)
#yield """
# import_name< 'import' (module=%r
# | dotted_as_names< any* module=%r any* >) >
# """ % (module, module)
yield """
import_from< 'from' module_name=%r 'import'
( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
""" % (module, old_attr, old_attr)
yield """
power< module_name=%r trailer< '.' attr_name=%r > any* >
""" % (module, old_attr)
#yield """bare_name=%s""" % alternates(bare)
class FixRenames(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "|".join(build_pattern())
order = "pre" # Pre-order tree traversal
# Don't match the node if it's within another match
def match(self, node):
match = super(FixRenames, self).match
results = match(node)
if results:
if any(match(obj) for obj in attr_chain(node, "parent")):
return False
return results
return False
#def start_tree(self, tree, filename):
# super(FixRenames, self).start_tree(tree, filename)
# self.replace = {}
def transform(self, node, results):
mod_name = results.get("module_name")
attr_name = results.get("attr_name")
#bare_name = results.get("bare_name")
#import_mod = results.get("module")
if mod_name and attr_name:
new_attr = unicode(LOOKUP[(mod_name.value, attr_name.value)])
attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
| gpl-2.0 |
cloudnull/ansible-modules-core | cloud/openstack/quantum_floating_ip_associate.py | 9 | 8149 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_floating_ip_associate
version_added: "1.2"
short_description: Associate or disassociate a particular floating IP with an instance
description:
- Associates or disassociates a specific floating IP with a particular instance
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- the tenant name of the login user
required: true
default: true
auth_url:
description:
- the keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- name of the region
required: false
default: None
state:
description:
- indicates the desired state of the resource
choices: ['present', 'absent']
default: present
instance_name:
description:
- name of the instance to which the public IP should be assigned
required: true
default: None
ip_address:
description:
- floating ip that should be assigned to the instance
required: true
default: None
requirements:
- "python >= 2.6"
- "python-novaclient"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
# Associate a specific floating IP with an Instance
- quantum_floating_ip_associate:
state=present
login_username=admin
login_password=admin
login_tenant_name=admin
ip_address=1.1.1.1
instance_name=vm1
'''
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception, e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception, e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception, e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_server_state(module, nova):
server_info = None
server = None
try:
for server in nova.servers.list():
if server:
info = server._info
if info['name'] == module.params['instance_name']:
if info['status'] != 'ACTIVE' and module.params['state'] == 'present':
module.fail_json(msg="The VM is available but not Active. state:" + info['status'])
server_info = info
break
except Exception, e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
return server_info, server
def _get_port_id(neutron, module, instance_id):
kwargs = dict(device_id = instance_id)
try:
ports = neutron.list_ports(**kwargs)
except Exception, e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
return ports['ports'][0]['id']
def _get_floating_ip_id(module, neutron):
kwargs = {
'floating_ip_address': module.params['ip_address']
}
try:
ips = neutron.list_floatingips(**kwargs)
except Exception, e:
module.fail_json(msg = "error in fetching the floatingips's %s" % e.message)
if not ips['floatingips']:
module.fail_json(msg = "Could find the ip specified in parameter, Please check")
ip = ips['floatingips'][0]['id']
if not ips['floatingips'][0]['port_id']:
state = "detached"
else:
state = "attached"
return state, ip
def _update_floating_ip(neutron, module, port_id, floating_ip_id):
kwargs = {
'port_id': port_id
}
try:
result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs})
except Exception, e:
module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed = True, result = result, public_ip=module.params['ip_address'])
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
instance_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-novaclient, python-keystoneclient, and either python-neutronclient or python-quantumclient are required')
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], service_type='compute')
except Exception, e:
module.fail_json( msg = " Error in authenticating to nova: %s" % e.message)
neutron = _get_neutron_client(module, module.params)
state, floating_ip_id = _get_floating_ip_id(module, neutron)
if module.params['state'] == 'present':
if state == 'attached':
module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address'])
server_info, server_obj = _get_server_state(module, nova)
if not server_info:
module.fail_json(msg = " The instance name provided cannot be found")
port_id = _get_port_id(neutron, module, server_info['id'])
if not port_id:
module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned")
_update_floating_ip(neutron, module, port_id, floating_ip_id)
if module.params['state'] == 'absent':
if state == 'detached':
module.exit_json(changed = False, result = 'detached')
if state == 'attached':
_update_floating_ip(neutron, module, None, floating_ip_id)
module.exit_json(changed = True, result = "detached")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
philippp/civic | ingestion/record/clean_records.py | 1 | 3145 | #!/usr/bin/python
import datetime
import logging
import pprint
import json
import glob
import re
"""
Given a source file (or wildcard) for JSON-formatted records, ingest
these to the database."""
def clean(json_record_file, output_dir):
source_files = glob.glob(json_record_file)
if len(source_files) == 0:
logging.error("Found no files in %s.", json_record_file)
return False
for source_file in source_files:
source_filename = source_file.split("/")[-1]
f = open(source_file, 'r')
records = json.loads(f.read())
cleaned_records = []
for record in records:
new_record = clean_record(record)
if new_record:
cleaned_records.append(new_record)
else:
pprint.pprint(record)
f2 = open(output_dir + "/" + source_filename, "w")
f2.write(json.dumps(cleaned_records))
f2.close()
f.close()
if len(records) != len(cleaned_records):
logging.info("Cleaned %s, %d vs %d records",
source_file, len(records), len(cleaned_records))
return True
def find_formatted_value(raw_value, regexp):
result = re.search(regexp, raw_value)
if not result:
return ""
return result.group(1)
def clean_record(record):
new_record = dict(record)
keys_regexps_required = (
("apn", "([\d\w]+\-\d+)", False),
("date", "(\d{2}\/\d{2}\/\d{4})", True),
("id", "(\w{1,2}\d+\-\d{2})", True),
("reel_image", "([A-Z]\d{2,4},\d{4})", False),
("doctype", "([A-Z_\-\s]+)", True))
for key, regex, required in keys_regexps_required:
if getattr(new_record[key], "__iter__", None):
clean_list = list()
for i in range(len(new_record[key])):
val = new_record[key][i]
clean_val = find_formatted_value(val, regex)
if clean_val:
clean_list.append(clean_val)
else:
continue
if required and len(clean_list) == 0:
logging.error(
"Record %s contained no valid values for %s",
record["id"], key)
return None
new_record[key] = clean_list
else:
clean_val = find_formatted_value(new_record[key], regex)
if clean_val:
new_record[key] = clean_val
else:
if required:
logging.error("Record %s had an invalid value for %s",
record["id"], key)
return None
if getattr(new_record['date'], "__iter__", None):
new_record['date'] = new_record['date'][0]
if getattr(new_record['reel_image'], "__iter__", None):
if len(new_record['reel_image']) > 0:
new_record['reel_image'] = new_record['reel_image'][0]
else:
new_record['reel_image'] = ""
return new_record
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
clean("data/record/deed/*.json", "deeds_clean")
| mpl-2.0 |
cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/pymongo/read_preferences.py | 17 | 6471 | # Copyright 2012-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License",
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for choosing which member of a replica set to read from."""
import random
from pymongo.errors import ConfigurationError
class ReadPreference:
"""An enum that defines the read preference modes supported by PyMongo.
Used in three cases:
:class:`~pymongo.mongo_client.MongoClient` connected to a single host:
* `PRIMARY`: Queries are allowed if the host is standalone or the replica
set primary.
* All other modes allow queries to standalone servers, to the primary, or
to secondaries.
:class:`~pymongo.mongo_client.MongoClient` connected to a mongos, with a
sharded cluster of replica sets:
* `PRIMARY`: Queries are sent to the primary of a shard.
* `PRIMARY_PREFERRED`: Queries are sent to the primary if available,
otherwise a secondary.
* `SECONDARY`: Queries are distributed among shard secondaries. An error
is raised if no secondaries are available.
* `SECONDARY_PREFERRED`: Queries are distributed among shard secondaries,
or the primary if no secondary is available.
* `NEAREST`: Queries are distributed among all members of a shard.
:class:`~pymongo.mongo_replica_set_client.MongoReplicaSetClient`:
* `PRIMARY`: Queries are sent to the primary of the replica set.
* `PRIMARY_PREFERRED`: Queries are sent to the primary if available,
otherwise a secondary.
* `SECONDARY`: Queries are distributed among secondaries. An error
is raised if no secondaries are available.
* `SECONDARY_PREFERRED`: Queries are distributed among secondaries,
or the primary if no secondary is available.
* `NEAREST`: Queries are distributed among all members.
"""
PRIMARY = 0
PRIMARY_PREFERRED = 1
SECONDARY = 2
SECONDARY_ONLY = 2
SECONDARY_PREFERRED = 3
NEAREST = 4
# For formatting error messages
modes = {
ReadPreference.PRIMARY: 'PRIMARY',
ReadPreference.PRIMARY_PREFERRED: 'PRIMARY_PREFERRED',
ReadPreference.SECONDARY: 'SECONDARY',
ReadPreference.SECONDARY_PREFERRED: 'SECONDARY_PREFERRED',
ReadPreference.NEAREST: 'NEAREST',
}
_mongos_modes = [
'primary',
'primaryPreferred',
'secondary',
'secondaryPreferred',
'nearest',
]
def mongos_mode(mode):
return _mongos_modes[mode]
def mongos_enum(enum):
return _mongos_modes.index(enum)
def select_primary(members):
for member in members:
if member.is_primary:
return member
return None
def select_member_with_tags(members, tags, secondary_only, latency):
candidates = []
for candidate in members:
if secondary_only and candidate.is_primary:
continue
if not (candidate.is_primary or candidate.is_secondary):
# In RECOVERING or similar state
continue
if candidate.matches_tags(tags):
candidates.append(candidate)
if not candidates:
return None
# ping_time is in seconds
fastest = min([candidate.get_avg_ping_time() for candidate in candidates])
near_candidates = [
candidate for candidate in candidates
if candidate.get_avg_ping_time() - fastest < latency / 1000.]
return random.choice(near_candidates)
def select_member(
members,
mode=ReadPreference.PRIMARY,
tag_sets=None,
latency=15
):
"""Return a Member or None.
"""
if tag_sets is None:
tag_sets = [{}]
# For brevity
PRIMARY = ReadPreference.PRIMARY
PRIMARY_PREFERRED = ReadPreference.PRIMARY_PREFERRED
SECONDARY = ReadPreference.SECONDARY
SECONDARY_PREFERRED = ReadPreference.SECONDARY_PREFERRED
NEAREST = ReadPreference.NEAREST
if mode == PRIMARY:
if tag_sets != [{}]:
raise ConfigurationError("PRIMARY cannot be combined with tags")
return select_primary(members)
elif mode == PRIMARY_PREFERRED:
# Recurse.
candidate_primary = select_member(members, PRIMARY, [{}], latency)
if candidate_primary:
return candidate_primary
else:
return select_member(members, SECONDARY, tag_sets, latency)
elif mode == SECONDARY:
for tags in tag_sets:
candidate = select_member_with_tags(members, tags, True, latency)
if candidate:
return candidate
return None
elif mode == SECONDARY_PREFERRED:
# Recurse.
candidate_secondary = select_member(
members, SECONDARY, tag_sets, latency)
if candidate_secondary:
return candidate_secondary
else:
return select_member(members, PRIMARY, [{}], latency)
elif mode == NEAREST:
for tags in tag_sets:
candidate = select_member_with_tags(members, tags, False, latency)
if candidate:
return candidate
# Ran out of tags.
return None
else:
raise ConfigurationError("Invalid mode %s" % repr(mode))
"""Commands that may be sent to replica-set secondaries, depending on
ReadPreference and tags. All other commands are always run on the primary.
"""
secondary_ok_commands = frozenset([
"group", "aggregate", "collstats", "dbstats", "count", "distinct",
"geonear", "geosearch", "geowalk", "mapreduce", "getnonce", "authenticate",
"text", "parallelcollectionscan"
])
class MovingAverage(object):
def __init__(self, samples):
"""Immutable structure to track a 5-sample moving average.
"""
self.samples = samples[-5:]
assert self.samples
self.average = sum(self.samples) / float(len(self.samples))
def clone_with(self, sample):
"""Get a copy of this instance plus a new sample"""
return MovingAverage(self.samples + [sample])
def get(self):
return self.average
| mit |
mahak/keystone | keystone/tests/protection/v3/test_roles.py | 2 | 13262 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import http.client
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class _SystemUserRoleTests(object):
"""Common default functionality for all system users."""
def test_user_can_list_roles(self):
PROVIDERS.role_api.create_role(uuid.uuid4().hex, unit.new_role_ref())
with self.test_client() as c:
r = c.get('/v3/roles', headers=self.headers)
# With bootstrap setup and the role we just created, there should
# be four roles present in the deployment. Bootstrap creates
# ``admin``, ``member``, and ``reader``.
self.assertEqual(4, len(r.json['roles']))
def test_user_can_get_a_role(self):
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref()
)
with self.test_client() as c:
r = c.get('/v3/roles/%s' % role['id'], headers=self.headers)
self.assertEqual(role['id'], r.json['role']['id'])
class _SystemReaderAndMemberRoleTests(object):
"""Common default functionality for system readers and system members."""
def test_user_cannot_create_roles(self):
create = {'role': unit.new_role_ref()}
with self.test_client() as c:
c.post(
'/v3/roles', json=create, headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_roles(self):
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref()
)
update = {'role': {'description': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/roles/%s' % role['id'], json=update, headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_roles(self):
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref()
)
with self.test_client() as c:
c.delete(
'/v3/roles/%s' % role['id'], headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class _DomainAndProjectUserRoleTests(object):
"""Common functionality for all domain and project users."""
def test_user_cannot_list_roles(self):
PROVIDERS.role_api.create_role(uuid.uuid4().hex, unit.new_role_ref())
with self.test_client() as c:
c.get(
'/v3/roles', headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_get_a_role(self):
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref()
)
with self.test_client() as c:
c.get(
'/v3/roles/%s' % role['id'], headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_create_roles(self):
create = {'role': unit.new_role_ref()}
with self.test_client() as c:
c.post(
'/v3/roles', json=create, headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_update_roles(self):
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref()
)
update = {'role': {'description': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/roles/%s' % role['id'], json=update, headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
def test_user_cannot_delete_roles(self):
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref()
)
with self.test_client() as c:
c.delete(
'/v3/roles/%s' % role['id'], headers=self.headers,
expected_status_code=http.client.FORBIDDEN
)
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserRoleTests,
_SystemReaderAndMemberRoleTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserRoleTests,
_SystemReaderAndMemberRoleTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_member
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.member_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_member['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_SystemUserRoleTests):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
# Reuse the system administrator account created during
# ``keystone-manage bootstrap``
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_can_create_roles(self):
create = {'role': unit.new_role_ref()}
with self.test_client() as c:
c.post('/v3/roles', json=create, headers=self.headers)
def test_user_can_update_roles(self):
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref()
)
update = {'role': {'description': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/roles/%s' % role['id'], json=update, headers=self.headers,
)
def test_user_can_delete_roles(self):
role = PROVIDERS.role_api.create_role(
uuid.uuid4().hex, unit.new_role_ref()
)
with self.test_client() as c:
c.delete('/v3/roles/%s' % role['id'], headers=self.headers)
class DomainUserTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainAndProjectUserRoleTests):
def setUp(self):
super(DomainUserTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.admin_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=domain_admin['password'],
domain_id=self.domain_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectUserTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainAndProjectUserRoleTests):
def setUp(self):
super(ProjectUserTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
project_id=self.bootstrapper.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectUserTestsWithoutEnforceScope(
base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_DomainAndProjectUserRoleTests):
def setUp(self):
super(ProjectUserTestsWithoutEnforceScope, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
# Explicityly set enforce_scope to False to make sure we maintain
# backwards compatibility with project users.
self.config_fixture.config(group='oslo_policy', enforce_scope=False)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = unit.new_user_ref(domain_id=domain['id'])
self.user_id = PROVIDERS.identity_api.create_user(user)['id']
self.project_id = PROVIDERS.resource_api.create_project(
uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id'])
)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id,
password=user['password'],
project_id=self.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
| apache-2.0 |
mmalecki/node-gyp | gyp/test/subdirectory/gyptest-top-all.py | 240 | 1384 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
There is a difference here in the default behavior of the underlying
build tools. Specifically, when building the entire "solution", Xcode
puts the output of each project relative to the .xcodeproj directory,
while Visual Studio (and our implementations of SCons and Make) put it
in a build directory relative to the "solution"--that is, the entry-point
from which you built the entire tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('prog1.gyp', test.ALL, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| mit |
paolodedios/tensorflow | tensorflow/python/data/experimental/kernel_tests/prefetch_with_slack_test.py | 6 | 4188 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `experimental_slack` option."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import multi_device_iterator_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class PrefetchWithSlackTest(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(PrefetchWithSlackTest, self).setUp()
self._devices = self.configureDevicesForMultiDeviceTest(3)
@combinations.generate(test_base.default_test_combinations())
def testPrefetchWithSlackOption(self):
"""Determines slack_period based on num devices attached to iterator."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset, [self._devices[1], self._devices[2]])
self.evaluate(multi_device_iterator.initializer)
for i in range(0, 10, 2):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.assertEqual(i, self.evaluate(elem_on_1))
self.assertEqual(i + 1, self.evaluate(elem_on_2))
with self.assertRaises(errors.OutOfRangeError):
elem_on_1, elem_on_2 = multi_device_iterator.get_next()
self.evaluate(elem_on_1)
self.evaluate(elem_on_2)
@combinations.generate(test_base.default_test_combinations())
def testPrefetchWithSlackOptionWithoutIterator(self):
"""Defaults to slack period of 1 without iterator."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, range(10))
@combinations.generate(test_base.default_test_combinations())
def testWithPassthroughDataset(self):
"""Should still work with a passthrough dataset after prefetch()."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
dataset = dataset.map(lambda x: x + 1)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, range(1, 11))
@combinations.generate(test_base.default_test_combinations())
def testNoErrorWithoutPrefetch(self):
"""The rewrite should not fail if there is no prefetch() in the pipeline."""
dataset = dataset_ops.Dataset.range(10)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, range(10))
@combinations.generate(test_base.default_test_combinations())
def testNoErrorWithInvalidDataset(self):
"""With a nested dataset op after prefetch, the rewrite should fail."""
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.prefetch(1)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensors)
options = dataset_ops.Options()
options.experimental_slack = True
dataset = dataset.with_options(options)
self.assertDatasetProduces(dataset, range(10))
if __name__ == "__main__":
test.main()
| apache-2.0 |
t11e/werkzeug | bench/wzbench.py | 1 | 11587 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
wzbench
~~~~~~~
A werkzeug internal benchmark module. It's used in combination with
hg bisact to find out how the Werkzeug performance of some internal
core parts changes over time.
:copyright: 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division
import os
import gc
import sys
import subprocess
from cStringIO import StringIO
from timeit import default_timer as timer
from types import FunctionType
# create a new module were we later store all the werkzeug attributes.
wz = type(sys)('werkzeug_nonlazy')
sys.path.insert(0, '<DUMMY>')
null_out = file(os.devnull, 'w')
# ±4% are ignore
TOLERANCE = 0.04
MIN_RESOLUTION = 0.002
# we run each test 5 times
TEST_RUNS = 5
def find_hg_tag(path):
"""Returns the current node or tag for the given path."""
tags = {}
try:
client = subprocess.Popen(['hg', 'cat', '-r', 'tip', '.hgtags'],
stdout=subprocess.PIPE, cwd=path)
for line in client.communicate()[0].splitlines():
line = line.strip()
if not line:
continue
hash, tag = line.split()
tags[hash] = tag
except OSError:
return
client = subprocess.Popen(['hg', 'parent', '--template', '#node#'], cwd=path,
stdout=subprocess.PIPE)
tip = client.communicate()[0].strip()
tag = tags.get(tip)
if tag is not None:
return tag
return tip
def load_werkzeug(path):
"""Load werkzeug."""
sys.path[0] = path
# get rid of already imported stuff
wz.__dict__.clear()
for key in sys.modules.keys():
if key.startswith('werkzeug.') or key == 'werkzeug':
sys.modules.pop(key, None)
# import werkzeug again.
import werkzeug
for key in werkzeug.__all__:
setattr(wz, key, getattr(werkzeug, key))
# get the hg tag
hg_tag = find_hg_tag(path)
# get the real version from the setup file
try:
f = file(os.path.join(path, 'setup.py'))
except IOError:
pass
else:
try:
for line in f:
line = line.strip()
if line.startswith('version='):
return line[8:].strip(' \t,')[1:-1], hg_tag
finally:
f.close()
print >> sys.stderr, 'Unknown werkzeug version loaded'
sys.exit(2)
def median(seq):
seq = sorted(seq)
if not seq:
return 0.0
return seq[len(seq) // 2]
def format_func(func):
if type(func) is FunctionType:
name = func.__name__
else:
name = func
if name.startswith('time_'):
name = name[5:]
return name.replace('_', ' ').title()
def bench(func):
"""Times a single function."""
sys.stdout.write('%44s ' % format_func(func))
sys.stdout.flush()
# figure out how many times we have to run the function to
# get reliable timings.
for i in xrange(1, 10):
rounds = 10 * i
t = timer()
for x in xrange(rounds):
func()
if timer() - t >= 0.2:
break
# now run the tests without gc TEST_RUNS times and use the median
# value of these runs.
def _run():
gc.collect()
gc.disable()
try:
t = timer()
for x in xrange(rounds):
func()
return (timer() - t) / rounds * 1000
finally:
gc.enable()
delta = median(_run() for x in xrange(TEST_RUNS))
sys.stdout.write('%.4f\n' % delta)
sys.stdout.flush()
return delta
def main():
"""The main entrypoint."""
from optparse import OptionParser
parser = OptionParser(usage='%prog [options]')
parser.add_option('--werkzeug-path', '-p', dest='path', default='..',
help='the path to the werkzeug package. defaults to cwd')
parser.add_option('--compare', '-c', dest='compare', nargs=2,
default=False, help='compare two hg nodes of Werkzeug')
parser.add_option('--init-compare', dest='init_compare', action='store_true',
default=False, help='Initializes the comparison feature')
options, args = parser.parse_args()
if args:
parser.error('Script takes no arguments')
if options.compare:
compare(*options.compare)
elif options.init_compare:
init_compare()
else:
run(options.path)
def init_compare():
"""Initializes the comparision feature."""
print 'Initializing comparision feature'
subprocess.Popen(['hg', 'clone', '..', 'a']).wait()
subprocess.Popen(['hg', 'clone', '..', 'b']).wait()
def compare(node1, node2):
"""Compares two Werkzeug hg versions."""
if not os.path.isdir('a'):
print >> sys.stderr, 'error: comparision feature not initialized'
sys.exit(4)
print '=' * 80
print 'WERKZEUG INTERNAL BENCHMARK -- COMPARE MODE'.center(80)
print '-' * 80
delim = '-' * 20
def _error(msg):
print >> sys.stderr, 'error:', msg
sys.exit(1)
def _hg_update(repo, node):
hg = lambda *x: subprocess.call(['hg'] + list(x), cwd=repo,
stdout=null_out, stderr=null_out)
hg('revert', '-a', '--no-backup')
hg('pull', '../..')
hg('update', node)
if node == 'tip':
diff = subprocess.Popen(['hg', 'diff'], cwd='..',
stdout=subprocess.PIPE).communicate()[0]
if diff:
client = subprocess.Popen(['hg', 'import', '--no-commit', '-'],
cwd=repo, stdout=null_out,
stdin=subprocess.PIPE)
client.communicate(diff)
_hg_update('a', node1)
_hg_update('b', node2)
d1 = run('a', no_header=True)
d2 = run('b', no_header=True)
print 'DIRECT COMPARISON'.center(80)
print '-' * 80
for key in sorted(d1):
delta = d1[key] - d2[key]
if abs(1 - d1[key] / d2[key]) < TOLERANCE or \
abs(delta) < MIN_RESOLUTION:
delta = ''
else:
delta = '%+.4f (%+d%%)' % (delta, round(d2[key] / d1[key] * 100 - 100))
print '%36s %.4f %.4f %s' % (format_func(key), d1[key],
d2[key], delta)
print '-' * 80
def run(path, no_header=False):
path = os.path.abspath(path)
wz_version, hg_tag = load_werkzeug(path)
result = {}
if not no_header:
print '=' * 80
print 'WERKZEUG INTERNAL BENCHMARK'.center(80)
print '-' * 80
print 'Path: %s' % path
print 'Version: %s' % wz_version
if hg_tag is not None:
print 'HG Tag: %s' % hg_tag
print '-' * 80
for key, value in sorted(globals().items()):
if key.startswith('time_'):
before = globals().get('before_' + key[5:])
if before:
before()
result[key] = bench(value)
after = globals().get('after_' + key[5:])
if after:
after()
print '-' * 80
return result
URL_DECODED_DATA = dict((str(x), str(x)) for x in xrange(100))
URL_ENCODED_DATA = '&'.join('%s=%s' % x for x in URL_DECODED_DATA.items())
MULTIPART_ENCODED_DATA = '\n'.join((
'--foo',
'Content-Disposition: form-data; name=foo',
'',
'this is just bar',
'--foo',
'Content-Disposition: form-data; name=bar',
'',
'blafasel',
'--foo',
'Content-Disposition: form-data; name=foo; filename=wzbench.py',
'Content-Type: text/plain',
'',
file(__file__.rstrip('c')).read(),
'--foo--'
))
MULTIDICT = None
REQUEST = None
TEST_ENV = None
LOCAL = None
LOCAL_MANAGER = None
def time_url_decode():
wz.url_decode(URL_ENCODED_DATA)
def time_url_encode():
wz.url_encode(URL_DECODED_DATA)
def time_parse_form_data_multipart():
# use a hand written env creator so that we don't bench
# from_values which is known to be slowish in 0.5.1 and higher.
# we don't want to bench two things at once.
environ = {
'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': 'multipart/form-data; boundary=foo',
'wsgi.input': StringIO(MULTIPART_ENCODED_DATA),
'CONTENT_LENGTH': str(len(MULTIPART_ENCODED_DATA))
}
request = wz.Request(environ)
request.form
def before_multidict_lookup_hit():
global MULTIDICT
MULTIDICT = wz.MultiDict({'foo': 'bar'})
def time_multidict_lookup_hit():
MULTIDICT['foo']
def after_multidict_lookup_hit():
global MULTIDICT
MULTIDICT = None
def before_multidict_lookup_miss():
global MULTIDICT
MULTIDICT = wz.MultiDict()
def time_multidict_lookup_miss():
try:
MULTIDICT['foo']
except KeyError:
pass
def after_multidict_lookup_miss():
global MULTIDICT
MULTIDICT = None
def time_cached_property():
class Foo(object):
@wz.cached_property
def x(self):
return 42
f = Foo()
for x in xrange(60):
f.x
def before_request_form_access():
global REQUEST
data = 'foo=bar&blah=blub'
REQUEST = wz.Request({
'CONTENT_LENGTH': str(len(data)),
'wsgi.input': StringIO(data),
'REQUEST_METHOD': 'POST',
'wsgi.version': (1, 0),
'QUERY_STRING': data,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'PATH_INFO': '/',
'SCRIPT_NAME': ''
})
def time_request_form_access():
for x in xrange(30):
REQUEST.path
REQUEST.script_root
REQUEST.args['foo']
REQUEST.form['foo']
def after_request_form_access():
global REQUEST
REQUEST = None
def time_request_from_values():
wz.Request.from_values(base_url='http://www.google.com/',
query_string='foo=bar&blah=blaz',
input_stream=StringIO(MULTIPART_ENCODED_DATA),
content_length=len(MULTIPART_ENCODED_DATA),
content_type='multipart/form-data; '
'boundary=foo', method='POST')
def before_request_shallow_init():
global TEST_ENV
TEST_ENV = wz.create_environ()
def time_request_shallow_init():
wz.Request(TEST_ENV, shallow=True)
def after_request_shallow_init():
global TEST_ENV
TEST_ENV = None
def time_response_iter_performance():
resp = wz.Response(u'Hällo Wörld ' * 1000,
mimetype='text/html')
for item in resp({'REQUEST_METHOD': 'GET'}, lambda *s: None):
pass
def time_response_iter_head_performance():
resp = wz.Response(u'Hällo Wörld ' * 1000,
mimetype='text/html')
for item in resp({'REQUEST_METHOD': 'HEAD'}, lambda *s: None):
pass
def before_local_manager_dispatch():
global LOCAL_MANAGER, LOCAL
LOCAL = wz.Local()
LOCAL_MANAGER = wz.LocalManager([LOCAL])
def time_local_manager_dispatch():
for x in xrange(10):
LOCAL.x = 42
for x in xrange(10):
LOCAL.x
def after_local_manager_dispatch():
global LOCAL_MANAGER, LOCAL
LOCAL = LOCAL_MANAGER = None
if __name__ == '__main__':
os.chdir(os.path.dirname(__file__) or os.path.curdir)
try:
main()
except KeyboardInterrupt:
print >> sys.stderr, 'interrupted!'
| bsd-3-clause |
braintreeps/moto | tests/test_ec2/test_internet_gateways.py | 2 | 9124 | from __future__ import unicode_literals
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises
from nose.tools import assert_raises
import re
import boto
from boto.exception import EC2ResponseError, JSONResponseError
import sure # noqa
from moto import mock_ec2
VPC_CIDR="10.0.0.0/16"
BAD_VPC="vpc-deadbeef"
BAD_IGW="igw-deadbeef"
@mock_ec2
def test_igw_create():
""" internet gateway create """
conn = boto.connect_vpc('the_key', 'the_secret')
conn.get_all_internet_gateways().should.have.length_of(0)
with assert_raises(JSONResponseError) as ex:
igw = conn.create_internet_gateway(dry_run=True)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set')
igw = conn.create_internet_gateway()
conn.get_all_internet_gateways().should.have.length_of(1)
igw.id.should.match(r'igw-[0-9a-f]+')
igw = conn.get_all_internet_gateways()[0]
igw.attachments.should.have.length_of(0)
@mock_ec2
def test_igw_attach():
""" internet gateway attach """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
with assert_raises(JSONResponseError) as ex:
conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set')
conn.attach_internet_gateway(igw.id, vpc.id)
igw = conn.get_all_internet_gateways()[0]
igw.attachments[0].vpc_id.should.be.equal(vpc.id)
@mock_ec2
def test_igw_attach_bad_vpc():
""" internet gateway fail to attach w/ bad vpc """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
with assert_raises(EC2ResponseError) as cm:
conn.attach_internet_gateway(igw.id, BAD_VPC)
cm.exception.code.should.equal('InvalidVpcID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_attach_twice():
""" internet gateway fail to attach twice """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc1 = conn.create_vpc(VPC_CIDR)
vpc2 = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc1.id)
with assert_raises(EC2ResponseError) as cm:
conn.attach_internet_gateway(igw.id, vpc2.id)
cm.exception.code.should.equal('Resource.AlreadyAssociated')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_detach():
""" internet gateway detach"""
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
with assert_raises(JSONResponseError) as ex:
conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set')
conn.detach_internet_gateway(igw.id, vpc.id)
igw = conn.get_all_internet_gateways()[0]
igw.attachments.should.have.length_of(0)
@mock_ec2
def test_igw_detach_wrong_vpc():
""" internet gateway fail to detach w/ wrong vpc """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc1 = conn.create_vpc(VPC_CIDR)
vpc2 = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc1.id)
with assert_raises(EC2ResponseError) as cm:
conn.detach_internet_gateway(igw.id, vpc2.id)
cm.exception.code.should.equal('Gateway.NotAttached')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_detach_invalid_vpc():
""" internet gateway fail to detach w/ invalid vpc """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
with assert_raises(EC2ResponseError) as cm:
conn.detach_internet_gateway(igw.id, BAD_VPC)
cm.exception.code.should.equal('Gateway.NotAttached')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_detach_unattached():
""" internet gateway fail to detach unattached """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
with assert_raises(EC2ResponseError) as cm:
conn.detach_internet_gateway(igw.id, vpc.id)
cm.exception.code.should.equal('Gateway.NotAttached')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_delete():
""" internet gateway delete"""
conn = boto.connect_vpc('the_key', 'the_secret')
vpc = conn.create_vpc(VPC_CIDR)
conn.get_all_internet_gateways().should.have.length_of(0)
igw = conn.create_internet_gateway()
conn.get_all_internet_gateways().should.have.length_of(1)
with assert_raises(JSONResponseError) as ex:
conn.delete_internet_gateway(igw.id, dry_run=True)
ex.exception.reason.should.equal('DryRunOperation')
ex.exception.status.should.equal(400)
ex.exception.message.should.equal('An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set')
conn.delete_internet_gateway(igw.id)
conn.get_all_internet_gateways().should.have.length_of(0)
@mock_ec2
def test_igw_delete_attached():
""" internet gateway fail to delete attached """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw.id, vpc.id)
with assert_raises(EC2ResponseError) as cm:
conn.delete_internet_gateway(igw.id)
cm.exception.code.should.equal('DependencyViolation')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_desribe():
""" internet gateway fetch by id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw = conn.create_internet_gateway()
igw_by_search = conn.get_all_internet_gateways([igw.id])[0]
igw.id.should.equal(igw_by_search.id)
@mock_ec2
def test_igw_desribe_bad_id():
""" internet gateway fail to fetch by bad id """
conn = boto.connect_vpc('the_key', 'the_secret')
with assert_raises(EC2ResponseError) as cm:
conn.get_all_internet_gateways([BAD_IGW])
cm.exception.code.should.equal('InvalidInternetGatewayID.NotFound')
cm.exception.status.should.equal(400)
cm.exception.request_id.should_not.be.none
@mock_ec2
def test_igw_filter_by_vpc_id():
""" internet gateway filter by vpc id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw1.id, vpc.id)
result = conn.get_all_internet_gateways(filters={"attachment.vpc-id": vpc.id})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
@mock_ec2
def test_igw_filter_by_tags():
""" internet gateway filter by vpc id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
igw1.add_tag("tests", "yes")
result = conn.get_all_internet_gateways(filters={"tag:tests": "yes"})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
@mock_ec2
def test_igw_filter_by_internet_gateway_id():
""" internet gateway filter by internet gateway id """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
result = conn.get_all_internet_gateways(filters={"internet-gateway-id": igw1.id})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
@mock_ec2
def test_igw_filter_by_attachment_state():
""" internet gateway filter by attachment state """
conn = boto.connect_vpc('the_key', 'the_secret')
igw1 = conn.create_internet_gateway()
igw2 = conn.create_internet_gateway()
vpc = conn.create_vpc(VPC_CIDR)
conn.attach_internet_gateway(igw1.id, vpc.id)
result = conn.get_all_internet_gateways(filters={"attachment.state": "available"})
result.should.have.length_of(1)
result[0].id.should.equal(igw1.id)
| apache-2.0 |
learningequality/kolibri | kolibri/plugins/hooks.py | 3 | 12517 | """
Kolibri Hooks API
-----------------
What are hooks
~~~~~~~~~~~~~~
Hooks are classes that define *something* that happens at one or more places
where the hook is looked for and applied. It means that you can
"hook into a component" in Kolibri and have it do a predefined and
parameterized *thing*. For instance, Kolibri could ask all its plugins who
wants to add something to the user settings panel, and its then up to the
plugins to inherit from that specific hook and feed back the parameters that
the hook definition expects.
The consequences of a hook being applied can happen anywhere in Kolibri. Each
hook is defined through a class inheriting from ``KolibriHook``. But how the
inheritor of that class deals with plugins using it, is entirely up to each
specific implementation and can be applied in templates, views, middleware -
basically everywhere!
That's why you should consult the class definition and documentation of the
hook you are adding plugin functionality with.
We have two different types of hooks:
Abstract hooks
Are definitions of hooks that are implemented by *implementing hooks*.
These hooks are Python abstract base classes, and can use the @abstractproperty
and @abstractmethod decorators from the abc module in order to define which
properties and methods their descendant registered hooks should implement.
Registered hooks
Are concrete hooks that inherit from abstract hooks, thus embodying the
definitions of the abstract hook into a specific case. If the abstract parent hook
has any abstract properties or methods, the hook being registered as a descendant
must implement those properties and methods, or an error will occur.
So what's "a hook"?
Simply referring to "a hook" is okay, it can be ambiguous on purpose. For
instance, in the example, we talk about "a navigation hook". So we both
mean the abstract definition of the navigation hook and everything that
is registered for the navigation.
Where can I find hooks?
~~~~~~~~~~~~~~~~~~~~~~~
All Kolibri core applications and plugins alike should *by convention* define
their abstract hooks inside ``<myapp>/hooks.py``. Thus, to see which hooks
a Kolibri component exposes, you can refer to its ``hooks`` module.
.. note::
Defining abstract hooks in ``<myapp>/hooks.py`` isn't mandatory, but
*loading* a concrete hook in ``<myapp>/kolibri_plugin.py`` is.
.. warning::
Do not define abstract and registered hooks in the same module. Or to put it
in other words: Always put registered hooks in ``<myapp>/kolibri_plugin.py``. The
registered hooks will only be initialized for use by the Kolibri plugin registry
if they are registered inside the kolibri_plugin.py module for the plugin.
In which order are hooks used/applied?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is entirely up to the registering class. By default, hooks are applied in
the same order that the registered hook gets registered! While it could be the
case that plugins could be enabled in a certain order to get a specific ordering
of hooks - it is best not to depend on this behaviour as it could result in brittleness.
An example of a plugin using a hook
-----------------------------------
.. note::
The example shows a NavigationHook which is simplified for the sake of
readability. The actual implementation in Kolibri will differ.
Example implementation
----------------------
Here is an example of how to use a hook in ``myplugin.kolibri_plugin.py``:
.. code-block:: python
from kolibri.core.hooks import NavigationHook
from kolibri.plugins.hooks import register_hook
@register_hook
class MyPluginNavItem(NavigationHook):
bundle_id = "side_nav"
The decorator ``@register_hook`` signals that the wrapped class is intended to be registered
against any abstract KolibriHook descendants that it inherits from. In this case, the hook
being registered inherits from NavigationHook, so any hook registered will be available on
the ``NavigationHook.registered_hooks`` property.
Here is the definition of the abstract NavigatonHook in kolibri.core.hooks:
.. code-block:: python
from kolibri.core.webpack.hooks import WebpackBundleHook
from kolibri.plugins.hooks import define_hook
@define_hook
class NavigationHook(WebpackBundleHook):
# Set this to True so that the resulting frontend code will be rendered inline.
inline = True
As can be seen from above, to define an abstract hook, instead of using the ``@register_hook``
decorator, the ``@define_hook`` decorator is used instead, to signal that this instance of
inheritance is not intended to register anything against the parent ``WebpackBundleHook``.
However, because of the inheritance relationship, any hook registered against ``NavigationHook``
(like our example registered hook above), will also be registered against the ``WebpackBundleHook``,
so we should expect to see our plugin's nav item listed in the ``WebpackBundleHook.registered_hooks``
property as well as in the ``NavigationHook.registered_hooks`` property.
Usage of the hook
-----------------
The hook can then be used to collect all the information from the hooks, as per this usage
of the ``NavigationHook`` in ``kolibri/core/kolibri_plugin.py``:
.. code-block:: python
from kolibri.core.hooks import NavigationHook
...
def navigation_tags(self):
return [
hook.render_to_page_load_sync_html()
for hook in NavigationHook.registered_hooks
]
Each registered hook is iterated over and its appropriate HTML for rendering into
the frontend are returned. When iterating over ``registered_hooks`` the returned
objects are each instances of the hook classes that were registered.
.. warning::
Do not load registered hook classes outside of a plugin's
``kolibri_plugin``. Either define them there directly or import the modules
that define them. Hook classes should all be seen at load time, and
placing that logic in ``kolibri_plugin`` guarantees that things are
registered correctly.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
from abc import abstractproperty
from functools import partial
from inspect import isabstract
import six
from kolibri.plugins import SingletonMeta
logger = logging.getLogger(__name__)
class HookSingleInstanceError(Exception):
"""
This exception is raised when a hook that is only allowed one active registered hook
has more than one defined
"""
def _make_singleton(subclass):
original_new = subclass.__new__
def new(cls, *args, **kwds):
if cls._instance is None:
cls._instance = original_new(cls, *args, **kwds)
return cls._instance
subclass._instance = None
subclass.__new__ = new
def define_hook(subclass=None, only_one_registered=False):
"""
This method must be used as a decorator to define a new hook inheriting from
the hook class that this is called from, this will return an abstract base
class, which distinguishes is from the classes returned by register_hook
which can be instantiated. Only abstract base classes track registered hooks.
"""
# Allow optional arguments to be passed to define_hook when used as a decorator
if subclass is None:
return partial(define_hook, only_one_registered=only_one_registered)
subclass = six.add_metaclass(KolibriHookMeta)(subclass)
subclass._setup_base_class(only_one_registered=only_one_registered)
return subclass
def register_hook(subclass):
"""
This method must be used as a decorator to register a hook against this hook
class and all parent abstract classes - can only be called on an abstract
base class.
"""
if not any(
hasattr(base, "_registered_hooks")
and base.abstract
and issubclass(base, KolibriHook)
for base in subclass.__bases__
):
raise TypeError(
"register_hook decorator used on a class that does not inherit from any abstract KolibriHook subclasses"
)
if not subclass.__module__.endswith("kolibri_plugin"):
raise RuntimeError(
"register_hook decorator invoked outside of a kolibri_plugin.py module - this hook will not be initialized"
)
attrs = dict(subclass.__dict__)
attrs.update({"_not_abstract": True})
subclass = type(subclass.__name__, subclass.__bases__, attrs)
subclass._registered = True
return subclass
class KolibriHookMeta(SingletonMeta):
"""
We use a metaclass to define class level properties and methods in a simple way.
We could define the classmethods on the KolibriHook object below, but this keeps
the logic contained into one definition.
"""
# : Sets a flag so that we can check that the hook class has properly gone through
# : the register_hook function above.
_registered = False
@property
def abstract(cls):
"""
Check if the class object is an abstract base class or not.
"""
return isabstract(cls)
@property
def registered_hooks(cls):
"""
A generator of all registered hooks.
"""
if not cls.abstract:
raise TypeError("registered_hooks property accessed on a non-abstract hook")
for hook in cls._registered_hooks.values():
yield hook
def _setup_base_class(cls, only_one_registered=False):
"""
Do any setup required specifically if this class is being setup as a hook definition
abstract base class.
"""
cls._registered_hooks = {}
cls._only_one_registered = only_one_registered
def add_hook_to_registries(cls):
"""
Add a concrete hook class to all relevant abstract hook registries.
"""
if not cls.abstract and cls._registered:
hook = cls()
for parent in cls.__mro__:
if (
isabstract(parent)
and issubclass(parent, KolibriHook)
and parent is not KolibriHook
and hasattr(parent, "_registered_hooks")
):
parent.add_hook_to_class_registry(hook)
def add_hook_to_class_registry(cls, hook):
"""
Add a concrete hook instance to the hook registry on this abstract hook
"""
if not cls.abstract:
raise TypeError("add_hook_to_registry method used on a non-abstract hook")
if (
cls._only_one_registered
and cls._registered_hooks
and hook not in cls.registered_hooks
):
for parent in cls.__mro__:
if (
isabstract(parent)
and issubclass(parent, KolibriHook)
and parent is not KolibriHook
and cls._only_one_registered
):
# Find the first ancestor class that enforces the only one registered
# constraint.
raise HookSingleInstanceError(
"Attempted to register more than one instance of {}".format(
parent
)
)
cls._registered_hooks[hook.unique_id] = hook
def get_hook(cls, unique_id):
"""
Fetch a registered hook instance by its unique_id
"""
if not cls.abstract:
raise TypeError("get_hook method used on a non-abstract hook")
return cls._registered_hooks.get(unique_id, None)
class KolibriHook(six.with_metaclass(KolibriHookMeta)):
@abstractproperty
def _not_abstract(self):
"""
A dummy property that we set on classes that are not intended to be abstract in the register_hook function above.
"""
pass
@property
def unique_id(self):
"""
Returns a globally unique id for the frontend module bundle.
This is created by appending the locally unique bundle_id to the
Python module path. This should give a globally unique id for the module
and prevent accidental or malicious collisions.
"""
return "{}.{}".format(self._module_path, self.__class__.__name__)
@property
def _module_path(self):
return ".".join(self.__module__.split(".")[:-1])
| mit |
SnakeJenny/TensorFlow | tensorflow/python/estimator/run_config.py | 7 | 1897 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Environment configuration object for Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class RunConfig(object):
"""This class specifies the configurations for an `Estimator` run."""
@property
def cluster_spec(self):
return None
@property
def evaluation_master(self):
return ''
@property
def is_chief(self):
return True
@property
def master(self):
return ''
@property
def num_ps_replicas(self):
return 0
@property
def num_worker_replicas(self):
return 1
@property
def task_id(self):
return 0
@property
def task_type(self):
return TaskType.WORKER
@property
def tf_random_seed(self):
return 1
@property
def save_summary_steps(self):
return 100
@property
def save_checkpoints_secs(self):
return 600
@property
def session_config(self):
return None
@property
def save_checkpoints_steps(self):
return None
@property
def keep_checkpoint_max(self):
return 5
@property
def keep_checkpoint_every_n_hours(self):
return 10000
| apache-2.0 |
mgedmin/ansible | lib/ansible/module_utils/local.py | 1 | 3353 | #
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import ansible.module_utils.basic
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import remove_values
from ansible.errors import AnsibleModuleExit
_ANSIBLE_CONNECTION = None
def _modify_module(task_args, connection):
params = {'ANSIBLE_MODULE_ARGS': task_args}
ansible.module_utils.basic._ANSIBLE_ARGS = json.dumps(params)
global _ANSIBLE_CONNECTION
_ANSIBLE_CONNECTION = connection
class LocalAnsibleModule(AnsibleModule):
@property
def connection(self):
return _ANSIBLE_CONNECTION
def exec_command(self, args, check_rc=False):
'''
Execute a command, returns rc, stdout, and stderr.
'''
rc, out, err = self.connection.exec_command(args)
if check_rc and rc != 0:
self.fail_json(msg='command %s failed' % args, rc=rc, stderr=err, stdout=out)
return rc, out, err
def exit_json(self, **kwargs):
''' return from the module, without error '''
if not 'changed' in kwargs:
kwargs['changed'] = False
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
raise AnsibleModuleExit(kwargs)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
kwargs = remove_values(kwargs, self.no_log_values)
raise AnsibleModuleExit(kwargs)
| gpl-3.0 |
hynnet/openwrt-mt7620 | staging_dir/host/lib/python2.7/bsddb/test/test_basics.py | 39 | 37246 | """
Basic TestCases for BTree and hash DBs, with and without a DBEnv, with
various DB flags, etc.
"""
import os
import errno
import string
from pprint import pprint
import unittest
import time
from test_all import db, test_support, verbose, get_new_environment_path, \
get_new_database_path
DASH = '-'
#----------------------------------------------------------------------
class VersionTestCase(unittest.TestCase):
def test00_version(self):
info = db.version()
if verbose:
print '\n', '-=' * 20
print 'bsddb.db.version(): %s' % (info, )
print db.DB_VERSION_STRING
print '-=' * 20
self.assertEqual(info, (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
db.DB_VERSION_PATCH))
#----------------------------------------------------------------------
class BasicTestCase(unittest.TestCase):
dbtype = db.DB_UNKNOWN # must be set in derived class
cachesize = (0, 1024*1024, 1)
dbopenflags = 0
dbsetflags = 0
dbmode = 0660
dbname = None
useEnv = 0
envflags = 0
envsetflags = 0
_numKeys = 1002 # PRIVATE. NOTE: must be an even value
import sys
if sys.version_info < (2, 4):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def assertFalse(self, expr, msg=None):
self.failIf(expr,msg=msg)
def setUp(self):
if self.useEnv:
self.homeDir=get_new_environment_path()
try:
self.env = db.DBEnv()
self.env.set_lg_max(1024*1024)
self.env.set_tx_max(30)
self._t = int(time.time())
self.env.set_tx_timestamp(self._t)
self.env.set_flags(self.envsetflags, 1)
self.env.open(self.homeDir, self.envflags | db.DB_CREATE)
self.filename = "test"
# Yes, a bare except is intended, since we're re-raising the exc.
except:
test_support.rmtree(self.homeDir)
raise
else:
self.env = None
self.filename = get_new_database_path()
# create and open the DB
self.d = db.DB(self.env)
if not self.useEnv :
if db.version() >= (4, 2) :
self.d.set_cachesize(*self.cachesize)
cachesize = self.d.get_cachesize()
self.assertEqual(cachesize[0], self.cachesize[0])
self.assertEqual(cachesize[2], self.cachesize[2])
# Berkeley DB expands the cache 25% accounting overhead,
# if the cache is small.
self.assertEqual(125, int(100.0*cachesize[1]/self.cachesize[1]))
self.d.set_flags(self.dbsetflags)
if self.dbname:
self.d.open(self.filename, self.dbname, self.dbtype,
self.dbopenflags|db.DB_CREATE, self.dbmode)
else:
self.d.open(self.filename, # try out keyword args
mode = self.dbmode,
dbtype = self.dbtype,
flags = self.dbopenflags|db.DB_CREATE)
if not self.useEnv:
self.assertRaises(db.DBInvalidArgError,
self.d.set_cachesize, *self.cachesize)
self.populateDB()
def tearDown(self):
self.d.close()
if self.env is not None:
self.env.close()
test_support.rmtree(self.homeDir)
else:
os.remove(self.filename)
def populateDB(self, _txn=None):
d = self.d
for x in range(self._numKeys//2):
key = '%04d' % (self._numKeys - x) # insert keys in reverse order
data = self.makeData(key)
d.put(key, data, _txn)
d.put('empty value', '', _txn)
for x in range(self._numKeys//2-1):
key = '%04d' % x # and now some in forward order
data = self.makeData(key)
d.put(key, data, _txn)
if _txn:
_txn.commit()
num = len(d)
if verbose:
print "created %d records" % num
def makeData(self, key):
return DASH.join([key] * 5)
#----------------------------------------
def test01_GetsAndPuts(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_GetsAndPuts..." % self.__class__.__name__
for key in ['0001', '0100', '0400', '0700', '0999']:
data = d.get(key)
if verbose:
print data
self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
# By default non-existent keys return None...
self.assertEqual(d.get('abcd'), None)
# ...but they raise exceptions in other situations. Call
# set_get_returns_none() to change it.
try:
d.delete('abcd')
except db.DBNotFoundError, val:
import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
else:
self.fail("expected exception")
d.put('abcd', 'a new record')
self.assertEqual(d.get('abcd'), 'a new record')
d.put('abcd', 'same key')
if self.dbsetflags & db.DB_DUP:
self.assertEqual(d.get('abcd'), 'a new record')
else:
self.assertEqual(d.get('abcd'), 'same key')
try:
d.put('abcd', 'this should fail', flags=db.DB_NOOVERWRITE)
except db.DBKeyExistError, val:
import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_KEYEXIST)
else :
self.assertEqual(val.args[0], db.DB_KEYEXIST)
if verbose: print val
else:
self.fail("expected exception")
if self.dbsetflags & db.DB_DUP:
self.assertEqual(d.get('abcd'), 'a new record')
else:
self.assertEqual(d.get('abcd'), 'same key')
d.sync()
d.close()
del d
self.d = db.DB(self.env)
if self.dbname:
self.d.open(self.filename, self.dbname)
else:
self.d.open(self.filename)
d = self.d
self.assertEqual(d.get('0321'), '0321-0321-0321-0321-0321')
if self.dbsetflags & db.DB_DUP:
self.assertEqual(d.get('abcd'), 'a new record')
else:
self.assertEqual(d.get('abcd'), 'same key')
rec = d.get_both('0555', '0555-0555-0555-0555-0555')
if verbose:
print rec
self.assertEqual(d.get_both('0555', 'bad data'), None)
# test default value
data = d.get('bad key', 'bad data')
self.assertEqual(data, 'bad data')
# any object can pass through
data = d.get('bad key', self)
self.assertEqual(data, self)
s = d.stat()
self.assertEqual(type(s), type({}))
if verbose:
print 'd.stat() returned this dictionary:'
pprint(s)
#----------------------------------------
def test02_DictionaryMethods(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_DictionaryMethods..." % \
self.__class__.__name__
for key in ['0002', '0101', '0401', '0701', '0998']:
data = d[key]
self.assertEqual(data, self.makeData(key))
if verbose:
print data
self.assertEqual(len(d), self._numKeys)
keys = d.keys()
self.assertEqual(len(keys), self._numKeys)
self.assertEqual(type(keys), type([]))
d['new record'] = 'a new record'
self.assertEqual(len(d), self._numKeys+1)
keys = d.keys()
self.assertEqual(len(keys), self._numKeys+1)
d['new record'] = 'a replacement record'
self.assertEqual(len(d), self._numKeys+1)
keys = d.keys()
self.assertEqual(len(keys), self._numKeys+1)
if verbose:
print "the first 10 keys are:"
pprint(keys[:10])
self.assertEqual(d['new record'], 'a replacement record')
# We check also the positional parameter
self.assertEqual(d.has_key('0001', None), 1)
# We check also the keyword parameter
self.assertEqual(d.has_key('spam', txn=None), 0)
items = d.items()
self.assertEqual(len(items), self._numKeys+1)
self.assertEqual(type(items), type([]))
self.assertEqual(type(items[0]), type(()))
self.assertEqual(len(items[0]), 2)
if verbose:
print "the first 10 items are:"
pprint(items[:10])
values = d.values()
self.assertEqual(len(values), self._numKeys+1)
self.assertEqual(type(values), type([]))
if verbose:
print "the first 10 values are:"
pprint(values[:10])
#----------------------------------------
def test02b_SequenceMethods(self):
d = self.d
for key in ['0002', '0101', '0401', '0701', '0998']:
data = d[key]
self.assertEqual(data, self.makeData(key))
if verbose:
print data
self.assertTrue(hasattr(d, "__contains__"))
self.assertTrue("0401" in d)
self.assertFalse("1234" in d)
#----------------------------------------
def test03_SimpleCursorStuff(self, get_raises_error=0, set_raises_error=0):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_SimpleCursorStuff (get_error %s, set_error %s)..." % \
(self.__class__.__name__, get_raises_error, set_raises_error)
if self.env and self.dbopenflags & db.DB_AUTO_COMMIT:
txn = self.env.txn_begin()
else:
txn = None
c = self.d.cursor(txn=txn)
rec = c.first()
count = 0
while rec is not None:
count = count + 1
if verbose and count % 100 == 0:
print rec
try:
rec = c.next()
except db.DBNotFoundError, val:
if get_raises_error:
import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
rec = None
else:
self.fail("unexpected DBNotFoundError")
self.assertEqual(c.get_current_size(), len(c.current()[1]),
"%s != len(%r)" % (c.get_current_size(), c.current()[1]))
self.assertEqual(count, self._numKeys)
rec = c.last()
count = 0
while rec is not None:
count = count + 1
if verbose and count % 100 == 0:
print rec
try:
rec = c.prev()
except db.DBNotFoundError, val:
if get_raises_error:
import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
rec = None
else:
self.fail("unexpected DBNotFoundError")
self.assertEqual(count, self._numKeys)
rec = c.set('0505')
rec2 = c.current()
self.assertEqual(rec, rec2)
self.assertEqual(rec[0], '0505')
self.assertEqual(rec[1], self.makeData('0505'))
self.assertEqual(c.get_current_size(), len(rec[1]))
# make sure we get empty values properly
rec = c.set('empty value')
self.assertEqual(rec[1], '')
self.assertEqual(c.get_current_size(), 0)
try:
n = c.set('bad key')
except db.DBNotFoundError, val:
import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
else:
if set_raises_error:
self.fail("expected exception")
if n is not None:
self.fail("expected None: %r" % (n,))
rec = c.get_both('0404', self.makeData('0404'))
self.assertEqual(rec, ('0404', self.makeData('0404')))
try:
n = c.get_both('0404', 'bad data')
except db.DBNotFoundError, val:
import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_NOTFOUND)
else :
self.assertEqual(val.args[0], db.DB_NOTFOUND)
if verbose: print val
else:
if get_raises_error:
self.fail("expected exception")
if n is not None:
self.fail("expected None: %r" % (n,))
if self.d.get_type() == db.DB_BTREE:
rec = c.set_range('011')
if verbose:
print "searched for '011', found: ", rec
rec = c.set_range('011',dlen=0,doff=0)
if verbose:
print "searched (partial) for '011', found: ", rec
if rec[1] != '': self.fail('expected empty data portion')
ev = c.set_range('empty value')
if verbose:
print "search for 'empty value' returned", ev
if ev[1] != '': self.fail('empty value lookup failed')
c.set('0499')
c.delete()
try:
rec = c.current()
except db.DBKeyEmptyError, val:
if get_raises_error:
import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], db.DB_KEYEMPTY)
else :
self.assertEqual(val.args[0], db.DB_KEYEMPTY)
if verbose: print val
else:
self.fail("unexpected DBKeyEmptyError")
else:
if get_raises_error:
self.fail('DBKeyEmptyError exception expected')
c.next()
c2 = c.dup(db.DB_POSITION)
self.assertEqual(c.current(), c2.current())
c2.put('', 'a new value', db.DB_CURRENT)
self.assertEqual(c.current(), c2.current())
self.assertEqual(c.current()[1], 'a new value')
c2.put('', 'er', db.DB_CURRENT, dlen=0, doff=5)
self.assertEqual(c2.current()[1], 'a newer value')
c.close()
c2.close()
if txn:
txn.commit()
# time to abuse the closed cursors and hope we don't crash
methods_to_test = {
'current': (),
'delete': (),
'dup': (db.DB_POSITION,),
'first': (),
'get': (0,),
'next': (),
'prev': (),
'last': (),
'put':('', 'spam', db.DB_CURRENT),
'set': ("0505",),
}
for method, args in methods_to_test.items():
try:
if verbose:
print "attempting to use a closed cursor's %s method" % \
method
# a bug may cause a NULL pointer dereference...
getattr(c, method)(*args)
except db.DBError, val:
import sys
if sys.version_info < (2, 6) :
self.assertEqual(val[0], 0)
else :
self.assertEqual(val.args[0], 0)
if verbose: print val
else:
self.fail("no exception raised when using a buggy cursor's"
"%s method" % method)
#
# free cursor referencing a closed database, it should not barf:
#
oldcursor = self.d.cursor(txn=txn)
self.d.close()
# this would originally cause a segfault when the cursor for a
# closed database was cleaned up. it should not anymore.
# SF pybsddb bug id 667343
del oldcursor
def test03b_SimpleCursorWithoutGetReturnsNone0(self):
# same test but raise exceptions instead of returning None
if verbose:
print '\n', '-=' * 30
print "Running %s.test03b_SimpleCursorStuffWithoutGetReturnsNone..." % \
self.__class__.__name__
old = self.d.set_get_returns_none(0)
self.assertEqual(old, 2)
self.test03_SimpleCursorStuff(get_raises_error=1, set_raises_error=1)
def test03b_SimpleCursorWithGetReturnsNone1(self):
# same test but raise exceptions instead of returning None
if verbose:
print '\n', '-=' * 30
print "Running %s.test03b_SimpleCursorStuffWithoutGetReturnsNone..." % \
self.__class__.__name__
old = self.d.set_get_returns_none(1)
self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=1)
def test03c_SimpleCursorGetReturnsNone2(self):
# same test but raise exceptions instead of returning None
if verbose:
print '\n', '-=' * 30
print "Running %s.test03c_SimpleCursorStuffWithoutSetReturnsNone..." % \
self.__class__.__name__
old = self.d.set_get_returns_none(1)
self.assertEqual(old, 2)
old = self.d.set_get_returns_none(2)
self.assertEqual(old, 1)
self.test03_SimpleCursorStuff(get_raises_error=0, set_raises_error=0)
if db.version() >= (4, 6):
def test03d_SimpleCursorPriority(self) :
c = self.d.cursor()
c.set_priority(db.DB_PRIORITY_VERY_LOW) # Positional
self.assertEqual(db.DB_PRIORITY_VERY_LOW, c.get_priority())
c.set_priority(priority=db.DB_PRIORITY_HIGH) # Keyword
self.assertEqual(db.DB_PRIORITY_HIGH, c.get_priority())
c.close()
#----------------------------------------
def test04_PartialGetAndPut(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test04_PartialGetAndPut..." % \
self.__class__.__name__
key = "partialTest"
data = "1" * 1000 + "2" * 1000
d.put(key, data)
self.assertEqual(d.get(key), data)
self.assertEqual(d.get(key, dlen=20, doff=990),
("1" * 10) + ("2" * 10))
d.put("partialtest2", ("1" * 30000) + "robin" )
self.assertEqual(d.get("partialtest2", dlen=5, doff=30000), "robin")
# There seems to be a bug in DB here... Commented out the test for
# now.
##self.assertEqual(d.get("partialtest2", dlen=5, doff=30010), "")
if self.dbsetflags != db.DB_DUP:
# Partial put with duplicate records requires a cursor
d.put(key, "0000", dlen=2000, doff=0)
self.assertEqual(d.get(key), "0000")
d.put(key, "1111", dlen=1, doff=2)
self.assertEqual(d.get(key), "0011110")
#----------------------------------------
def test05_GetSize(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test05_GetSize..." % self.__class__.__name__
for i in range(1, 50000, 500):
key = "size%s" % i
#print "before ", i,
d.put(key, "1" * i)
#print "after",
self.assertEqual(d.get_size(key), i)
#print "done"
#----------------------------------------
def test06_Truncate(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test06_Truncate..." % self.__class__.__name__
d.put("abcde", "ABCDE");
num = d.truncate()
self.assertTrue(num >= 1, "truncate returned <= 0 on non-empty database")
num = d.truncate()
self.assertEqual(num, 0,
"truncate on empty DB returned nonzero (%r)" % (num,))
#----------------------------------------
def test07_verify(self):
# Verify bug solved in 4.7.3pre8
self.d.close()
d = db.DB(self.env)
d.verify(self.filename)
#----------------------------------------
if db.version() >= (4, 6):
def test08_exists(self) :
self.d.put("abcde", "ABCDE")
self.assertTrue(self.d.exists("abcde") == True,
"DB->exists() returns wrong value")
self.assertTrue(self.d.exists("x") == False,
"DB->exists() returns wrong value")
#----------------------------------------
if db.version() >= (4, 7):
def test_compact(self) :
d = self.d
self.assertEqual(0, d.compact(flags=db.DB_FREELIST_ONLY))
self.assertEqual(0, d.compact(flags=db.DB_FREELIST_ONLY))
d.put("abcde", "ABCDE");
d.put("bcde", "BCDE");
d.put("abc", "ABC");
d.put("monty", "python");
d.delete("abc")
d.delete("bcde")
d.compact(start='abcde', stop='monty', txn=None,
compact_fillpercent=42, compact_pages=1,
compact_timeout=50000000,
flags=db.DB_FREELIST_ONLY|db.DB_FREE_SPACE)
#----------------------------------------
#----------------------------------------------------------------------
class BasicBTreeTestCase(BasicTestCase):
dbtype = db.DB_BTREE
class BasicHashTestCase(BasicTestCase):
dbtype = db.DB_HASH
class BasicBTreeWithThreadFlagTestCase(BasicTestCase):
dbtype = db.DB_BTREE
dbopenflags = db.DB_THREAD
class BasicHashWithThreadFlagTestCase(BasicTestCase):
dbtype = db.DB_HASH
dbopenflags = db.DB_THREAD
class BasicWithEnvTestCase(BasicTestCase):
dbopenflags = db.DB_THREAD
useEnv = 1
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
#----------------------------------------
def test09_EnvRemoveAndRename(self):
if not self.env:
return
if verbose:
print '\n', '-=' * 30
print "Running %s.test09_EnvRemoveAndRename..." % self.__class__.__name__
# can't rename or remove an open DB
self.d.close()
newname = self.filename + '.renamed'
self.env.dbrename(self.filename, None, newname)
self.env.dbremove(newname)
#----------------------------------------
class BasicBTreeWithEnvTestCase(BasicWithEnvTestCase):
dbtype = db.DB_BTREE
class BasicHashWithEnvTestCase(BasicWithEnvTestCase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class BasicTransactionTestCase(BasicTestCase):
import sys
if sys.version_info < (2, 4):
def assertTrue(self, expr, msg=None):
return self.failUnless(expr,msg=msg)
if (sys.version_info < (2, 7)) or ((sys.version_info >= (3, 0)) and
(sys.version_info < (3, 2))) :
def assertIn(self, a, b, msg=None) :
return self.assertTrue(a in b, msg=msg)
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
useEnv = 1
envflags = (db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_TXN)
envsetflags = db.DB_AUTO_COMMIT
def tearDown(self):
self.txn.commit()
BasicTestCase.tearDown(self)
def populateDB(self):
txn = self.env.txn_begin()
BasicTestCase.populateDB(self, _txn=txn)
self.txn = self.env.txn_begin()
def test06_Transactions(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test06_Transactions..." % self.__class__.__name__
self.assertEqual(d.get('new rec', txn=self.txn), None)
d.put('new rec', 'this is a new record', self.txn)
self.assertEqual(d.get('new rec', txn=self.txn),
'this is a new record')
self.txn.abort()
self.assertEqual(d.get('new rec'), None)
self.txn = self.env.txn_begin()
self.assertEqual(d.get('new rec', txn=self.txn), None)
d.put('new rec', 'this is a new record', self.txn)
self.assertEqual(d.get('new rec', txn=self.txn),
'this is a new record')
self.txn.commit()
self.assertEqual(d.get('new rec'), 'this is a new record')
self.txn = self.env.txn_begin()
c = d.cursor(self.txn)
rec = c.first()
count = 0
while rec is not None:
count = count + 1
if verbose and count % 100 == 0:
print rec
rec = c.next()
self.assertEqual(count, self._numKeys+1)
c.close() # Cursors *MUST* be closed before commit!
self.txn.commit()
# flush pending updates
self.env.txn_checkpoint (0, 0, 0)
statDict = self.env.log_stat(0);
self.assertIn('magic', statDict)
self.assertIn('version', statDict)
self.assertIn('cur_file', statDict)
self.assertIn('region_nowait', statDict)
# must have at least one log file present:
logs = self.env.log_archive(db.DB_ARCH_ABS | db.DB_ARCH_LOG)
self.assertNotEqual(logs, None)
for log in logs:
if verbose:
print 'log file: ' + log
if db.version() >= (4,2):
logs = self.env.log_archive(db.DB_ARCH_REMOVE)
self.assertTrue(not logs)
self.txn = self.env.txn_begin()
#----------------------------------------
if db.version() >= (4, 6):
def test08_exists(self) :
txn = self.env.txn_begin()
self.d.put("abcde", "ABCDE", txn=txn)
txn.commit()
txn = self.env.txn_begin()
self.assertTrue(self.d.exists("abcde", txn=txn) == True,
"DB->exists() returns wrong value")
self.assertTrue(self.d.exists("x", txn=txn) == False,
"DB->exists() returns wrong value")
txn.abort()
#----------------------------------------
def test09_TxnTruncate(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test09_TxnTruncate..." % self.__class__.__name__
d.put("abcde", "ABCDE");
txn = self.env.txn_begin()
num = d.truncate(txn)
self.assertTrue(num >= 1, "truncate returned <= 0 on non-empty database")
num = d.truncate(txn)
self.assertEqual(num, 0,
"truncate on empty DB returned nonzero (%r)" % (num,))
txn.commit()
#----------------------------------------
def test10_TxnLateUse(self):
txn = self.env.txn_begin()
txn.abort()
try:
txn.abort()
except db.DBError, e:
pass
else:
raise RuntimeError, "DBTxn.abort() called after DB_TXN no longer valid w/o an exception"
txn = self.env.txn_begin()
txn.commit()
try:
txn.commit()
except db.DBError, e:
pass
else:
raise RuntimeError, "DBTxn.commit() called after DB_TXN no longer valid w/o an exception"
#----------------------------------------
if db.version() >= (4, 4):
def test_txn_name(self) :
txn=self.env.txn_begin()
self.assertEqual(txn.get_name(), "")
txn.set_name("XXYY")
self.assertEqual(txn.get_name(), "XXYY")
txn.set_name("")
self.assertEqual(txn.get_name(), "")
txn.abort()
#----------------------------------------
def test_txn_set_timeout(self) :
txn=self.env.txn_begin()
txn.set_timeout(1234567, db.DB_SET_LOCK_TIMEOUT)
txn.set_timeout(2345678, flags=db.DB_SET_TXN_TIMEOUT)
txn.abort()
#----------------------------------------
if db.version() >= (4, 2) :
def test_get_tx_max(self) :
self.assertEqual(self.env.get_tx_max(), 30)
def test_get_tx_timestamp(self) :
self.assertEqual(self.env.get_tx_timestamp(), self._t)
class BTreeTransactionTestCase(BasicTransactionTestCase):
dbtype = db.DB_BTREE
class HashTransactionTestCase(BasicTransactionTestCase):
dbtype = db.DB_HASH
#----------------------------------------------------------------------
class BTreeRecnoTestCase(BasicTestCase):
dbtype = db.DB_BTREE
dbsetflags = db.DB_RECNUM
def test09_RecnoInBTree(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test09_RecnoInBTree..." % self.__class__.__name__
rec = d.get(200)
self.assertEqual(type(rec), type(()))
self.assertEqual(len(rec), 2)
if verbose:
print "Record #200 is ", rec
c = d.cursor()
c.set('0200')
num = c.get_recno()
self.assertEqual(type(num), type(1))
if verbose:
print "recno of d['0200'] is ", num
rec = c.current()
self.assertEqual(c.set_recno(num), rec)
c.close()
class BTreeRecnoWithThreadFlagTestCase(BTreeRecnoTestCase):
dbopenflags = db.DB_THREAD
#----------------------------------------------------------------------
class BasicDUPTestCase(BasicTestCase):
dbsetflags = db.DB_DUP
def test10_DuplicateKeys(self):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test10_DuplicateKeys..." % \
self.__class__.__name__
d.put("dup0", "before")
for x in "The quick brown fox jumped over the lazy dog.".split():
d.put("dup1", x)
d.put("dup2", "after")
data = d.get("dup1")
self.assertEqual(data, "The")
if verbose:
print data
c = d.cursor()
rec = c.set("dup1")
self.assertEqual(rec, ('dup1', 'The'))
next_reg = c.next()
self.assertEqual(next_reg, ('dup1', 'quick'))
rec = c.set("dup1")
count = c.count()
self.assertEqual(count, 9)
next_dup = c.next_dup()
self.assertEqual(next_dup, ('dup1', 'quick'))
rec = c.set('dup1')
while rec is not None:
if verbose:
print rec
rec = c.next_dup()
c.set('dup1')
rec = c.next_nodup()
self.assertNotEqual(rec[0], 'dup1')
if verbose:
print rec
c.close()
class BTreeDUPTestCase(BasicDUPTestCase):
dbtype = db.DB_BTREE
class HashDUPTestCase(BasicDUPTestCase):
dbtype = db.DB_HASH
class BTreeDUPWithThreadTestCase(BasicDUPTestCase):
dbtype = db.DB_BTREE
dbopenflags = db.DB_THREAD
class HashDUPWithThreadTestCase(BasicDUPTestCase):
dbtype = db.DB_HASH
dbopenflags = db.DB_THREAD
#----------------------------------------------------------------------
class BasicMultiDBTestCase(BasicTestCase):
dbname = 'first'
def otherType(self):
if self.dbtype == db.DB_BTREE:
return db.DB_HASH
else:
return db.DB_BTREE
def test11_MultiDB(self):
d1 = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test11_MultiDB..." % self.__class__.__name__
d2 = db.DB(self.env)
d2.open(self.filename, "second", self.dbtype,
self.dbopenflags|db.DB_CREATE)
d3 = db.DB(self.env)
d3.open(self.filename, "third", self.otherType(),
self.dbopenflags|db.DB_CREATE)
for x in "The quick brown fox jumped over the lazy dog".split():
d2.put(x, self.makeData(x))
for x in string.letters:
d3.put(x, x*70)
d1.sync()
d2.sync()
d3.sync()
d1.close()
d2.close()
d3.close()
self.d = d1 = d2 = d3 = None
self.d = d1 = db.DB(self.env)
d1.open(self.filename, self.dbname, flags = self.dbopenflags)
d2 = db.DB(self.env)
d2.open(self.filename, "second", flags = self.dbopenflags)
d3 = db.DB(self.env)
d3.open(self.filename, "third", flags = self.dbopenflags)
c1 = d1.cursor()
c2 = d2.cursor()
c3 = d3.cursor()
count = 0
rec = c1.first()
while rec is not None:
count = count + 1
if verbose and (count % 50) == 0:
print rec
rec = c1.next()
self.assertEqual(count, self._numKeys)
count = 0
rec = c2.first()
while rec is not None:
count = count + 1
if verbose:
print rec
rec = c2.next()
self.assertEqual(count, 9)
count = 0
rec = c3.first()
while rec is not None:
count = count + 1
if verbose:
print rec
rec = c3.next()
self.assertEqual(count, len(string.letters))
c1.close()
c2.close()
c3.close()
d2.close()
d3.close()
# Strange things happen if you try to use Multiple DBs per file without a
# DBEnv with MPOOL and LOCKing...
class BTreeMultiDBTestCase(BasicMultiDBTestCase):
dbtype = db.DB_BTREE
dbopenflags = db.DB_THREAD
useEnv = 1
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
class HashMultiDBTestCase(BasicMultiDBTestCase):
dbtype = db.DB_HASH
dbopenflags = db.DB_THREAD
useEnv = 1
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK
class PrivateObject(unittest.TestCase) :
import sys
if sys.version_info < (2, 4):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
def tearDown(self) :
del self.obj
def test01_DefaultIsNone(self) :
self.assertEqual(self.obj.get_private(), None)
def test02_assignment(self) :
a = "example of private object"
self.obj.set_private(a)
b = self.obj.get_private()
self.assertTrue(a is b) # Object identity
def test03_leak_assignment(self) :
import sys
a = "example of private object"
refcount = sys.getrefcount(a)
self.obj.set_private(a)
self.assertEqual(refcount+1, sys.getrefcount(a))
self.obj.set_private(None)
self.assertEqual(refcount, sys.getrefcount(a))
def test04_leak_GC(self) :
import sys
a = "example of private object"
refcount = sys.getrefcount(a)
self.obj.set_private(a)
self.obj = None
self.assertEqual(refcount, sys.getrefcount(a))
class DBEnvPrivateObject(PrivateObject) :
def setUp(self) :
self.obj = db.DBEnv()
class DBPrivateObject(PrivateObject) :
def setUp(self) :
self.obj = db.DB()
class CrashAndBurn(unittest.TestCase) :
import sys
if sys.version_info < (2, 4):
def assertTrue(self, expr, msg=None):
self.failUnless(expr,msg=msg)
#def test01_OpenCrash(self) :
# # See http://bugs.python.org/issue3307
# self.assertRaises(db.DBInvalidArgError, db.DB, None, 65535)
if db.version() < (4, 8) :
def test02_DBEnv_dealloc(self):
# http://bugs.python.org/issue3885
import gc
self.assertRaises(db.DBInvalidArgError, db.DBEnv, ~db.DB_RPCCLIENT)
gc.collect()
#----------------------------------------------------------------------
#----------------------------------------------------------------------
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(VersionTestCase))
suite.addTest(unittest.makeSuite(BasicBTreeTestCase))
suite.addTest(unittest.makeSuite(BasicHashTestCase))
suite.addTest(unittest.makeSuite(BasicBTreeWithThreadFlagTestCase))
suite.addTest(unittest.makeSuite(BasicHashWithThreadFlagTestCase))
suite.addTest(unittest.makeSuite(BasicBTreeWithEnvTestCase))
suite.addTest(unittest.makeSuite(BasicHashWithEnvTestCase))
suite.addTest(unittest.makeSuite(BTreeTransactionTestCase))
suite.addTest(unittest.makeSuite(HashTransactionTestCase))
suite.addTest(unittest.makeSuite(BTreeRecnoTestCase))
suite.addTest(unittest.makeSuite(BTreeRecnoWithThreadFlagTestCase))
suite.addTest(unittest.makeSuite(BTreeDUPTestCase))
suite.addTest(unittest.makeSuite(HashDUPTestCase))
suite.addTest(unittest.makeSuite(BTreeDUPWithThreadTestCase))
suite.addTest(unittest.makeSuite(HashDUPWithThreadTestCase))
suite.addTest(unittest.makeSuite(BTreeMultiDBTestCase))
suite.addTest(unittest.makeSuite(HashMultiDBTestCase))
suite.addTest(unittest.makeSuite(DBEnvPrivateObject))
suite.addTest(unittest.makeSuite(DBPrivateObject))
suite.addTest(unittest.makeSuite(CrashAndBurn))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| gpl-2.0 |
ibc/MediaSoup | worker/deps/gyp/test/variables/commands/gyptest-commands-repeated.py | 12 | 1353 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<!()' syntax commands where they are evaluated
more then once..
"""
from __future__ import print_function
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('commands-repeated.gyp.stdout').replace('\r\n', '\n')
test.run_gyp('commands-repeated.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the commands-repeated.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands-repeated.gypd').replace('\r\n', '\n')
expect = test.read('commands-repeated.gypd.golden').replace('\r\n', '\n')
if not test.match(contents, expect):
print("Unexpected contents of `commands-repeated.gypd'")
test.diff(expect, contents, 'commands-repeated.gypd ')
test.fail_test()
test.pass_test()
| isc |
benvermaercke/pyqtgraph | pyqtgraph/frozenSupport.py | 52 | 1830 | ## Definitions helpful in frozen environments (eg py2exe)
import os, sys, zipfile
def listdir(path):
"""Replacement for os.listdir that works in frozen environments."""
if not hasattr(sys, 'frozen'):
return os.listdir(path)
(zipPath, archivePath) = splitZip(path)
if archivePath is None:
return os.listdir(path)
with zipfile.ZipFile(zipPath, "r") as zipobj:
contents = zipobj.namelist()
results = set()
for name in contents:
# components in zip archive paths are always separated by forward slash
if name.startswith(archivePath) and len(name) > len(archivePath):
name = name[len(archivePath):].split('/')[0]
results.add(name)
return list(results)
def isdir(path):
"""Replacement for os.path.isdir that works in frozen environments."""
if not hasattr(sys, 'frozen'):
return os.path.isdir(path)
(zipPath, archivePath) = splitZip(path)
if archivePath is None:
return os.path.isdir(path)
with zipfile.ZipFile(zipPath, "r") as zipobj:
contents = zipobj.namelist()
archivePath = archivePath.rstrip('/') + '/' ## make sure there's exactly one '/' at the end
for c in contents:
if c.startswith(archivePath):
return True
return False
def splitZip(path):
"""Splits a path containing a zip file into (zipfile, subpath).
If there is no zip file, returns (path, None)"""
components = os.path.normpath(path).split(os.sep)
for index, component in enumerate(components):
if component.endswith('.zip'):
zipPath = os.sep.join(components[0:index+1])
archivePath = ''.join([x+'/' for x in components[index+1:]])
return (zipPath, archivePath)
else:
return (path, None)
| mit |
Ashaba/rms | rmslocalenv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/etree.py | 322 | 4684 | from __future__ import absolute_import, division, unicode_literals
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
OrderedDict = dict
import re
from pip._vendor.six import string_types
from . import base
from .._utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, _, _, flag = node
if flag in ("text", "tail"):
return base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return base.COMMENT, node.text
else:
assert isinstance(node.tag, string_types), type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
assert list(parents[-1]).count(parent) == 1
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
| mit |
renweizhukov/LearningPythonTheHardWay | projects/ex48/ex48/parser.py | 1 | 1814 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class ParserError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, obj):
# remember we take ('noun', 'princess') tuples and convert them
self.subject = subject[1]
self.verb = verb[1]
self.object = obj[1]
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list, word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'direction':
return match(word_list, 'direction')
else:
raise ParserError("Expected a noun or direction next.")
def parse_subject(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'verb':
return ('noun', 'player')
else:
# The tutorial has a typo here: "raise ParserError("Expected a verb next.")"
raise ParserError("Expected a subject next.")
def parse_sentence(word_list):
subj = parse_subject(word_list)
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
| lgpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.