repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dendisuhubdy/tensorflow | tensorflow/python/platform/resource_loader.py | 23 | 4143 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resource management library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os as _os
import sys as _sys
from tensorflow.python.util import tf_inspect as _inspect
from tensorflow.python.util.tf_export import tf_export
@tf_export('resource_loader.load_resource')
def load_resource(path):
"""Load the resource at given path, where path is relative to tensorflow/.
Args:
path: a string resource path relative to tensorflow/.
Returns:
The contents of that resource.
Raises:
IOError: If the path is not found, or the resource can't be opened.
"""
tensorflow_root = (_os.path.join(
_os.path.dirname(__file__), _os.pardir, _os.pardir))
path = _os.path.join(tensorflow_root, path)
path = _os.path.abspath(path)
with open(path, 'rb') as f:
return f.read()
# pylint: disable=protected-access
@tf_export('resource_loader.get_data_files_path')
def get_data_files_path():
"""Get a direct path to the data files colocated with the script.
Returns:
The directory where files specified in data attribute of py_test
and py_binary are stored.
"""
return _os.path.dirname(_inspect.getfile(_sys._getframe(1)))
@tf_export('resource_loader.get_root_dir_with_all_resources')
def get_root_dir_with_all_resources():
"""Get a root directory containing all the data attributes in the build rule.
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary. Falls back to returning the same as get_data_files_path if it
fails to detect a bazel runfiles directory.
"""
script_dir = get_data_files_path()
# Create a history of the paths, because the data files are located relative
# to the repository root directory, which is directly under runfiles
# directory.
directories = [script_dir]
data_files_dir = ''
while True:
candidate_dir = directories[-1]
current_directory = _os.path.basename(candidate_dir)
if '.runfiles' in current_directory:
# Our file should never be directly under runfiles.
# If the history has only one item, it means we are directly inside the
# runfiles directory, something is wrong, fall back to the default return
# value, script directory.
if len(directories) > 1:
data_files_dir = directories[-2]
break
else:
new_candidate_dir = _os.path.dirname(candidate_dir)
# If we are at the root directory these two will be the same.
if new_candidate_dir == candidate_dir:
break
else:
directories.append(new_candidate_dir)
return data_files_dir or script_dir
@tf_export('resource_loader.get_path_to_datafile')
def get_path_to_datafile(path):
"""Get the path to the specified file in the data dependencies.
The path is relative to tensorflow/
Args:
path: a string resource path relative to tensorflow/
Returns:
The path to the specified file present in the data attribute of py_test
or py_binary.
Raises:
IOError: If the path is not found, or the resource can't be opened.
"""
data_files_path = _os.path.dirname(_inspect.getfile(_sys._getframe(1)))
return _os.path.join(data_files_path, path)
@tf_export('resource_loader.readahead_file_path')
def readahead_file_path(path, readahead='128M'): # pylint: disable=unused-argument
"""Readahead files not implemented; simply returns given path."""
return path
| apache-2.0 |
shaufi10/odoo | addons/base_import/tests/test_cases.py | 189 | 15021 | # -*- encoding: utf-8 -*-
import unittest2
from openerp.tests.common import TransactionCase
from .. import models
ID_FIELD = {
'id': 'id',
'name': 'id',
'string': "External ID",
'required': False,
'fields': [],
}
def make_field(name='value', string='unknown', required=False, fields=[]):
return [
ID_FIELD,
{'id': name, 'name': name, 'string': string, 'required': required, 'fields': fields},
]
def sorted_fields(fields):
""" recursively sort field lists to ease comparison """
recursed = [dict(field, fields=sorted_fields(field['fields'])) for field in fields]
return sorted(recursed, key=lambda field: field['id'])
class BaseImportCase(TransactionCase):
def assertEqualFields(self, fields1, fields2):
self.assertEqual(sorted_fields(fields1), sorted_fields(fields2))
class test_basic_fields(BaseImportCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_base(self):
""" A basic field is not required """
self.assertEqualFields(self.get_fields('char'), make_field())
def test_required(self):
""" Required fields should be flagged (so they can be fill-required) """
self.assertEqualFields(self.get_fields('char.required'), make_field(required=True))
def test_readonly(self):
""" Readonly fields should be filtered out"""
self.assertEqualFields(self.get_fields('char.readonly'), [ID_FIELD])
def test_readonly_states(self):
""" Readonly fields with states should not be filtered out"""
self.assertEqualFields(self.get_fields('char.states'), make_field())
def test_readonly_states_noreadonly(self):
""" Readonly fields with states having nothing to do with
readonly should still be filtered out"""
self.assertEqualFields(self.get_fields('char.noreadonly'), [ID_FIELD])
def test_readonly_states_stillreadonly(self):
""" Readonly fields with readonly states leaving them readonly
always... filtered out"""
self.assertEqualFields(self.get_fields('char.stillreadonly'), [ID_FIELD])
def test_m2o(self):
""" M2O fields should allow import of themselves (name_get),
their id and their xid"""
self.assertEqualFields(self.get_fields('m2o'), make_field(fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]))
def test_m2o_required(self):
""" If an m2o field is required, its three sub-fields are
required as well (the client has to handle that: requiredness
is id-based)
"""
self.assertEqualFields(self.get_fields('m2o.required'), make_field(required=True, fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': True, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': True, 'fields': []},
]))
class test_o2m(BaseImportCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_shallow(self):
self.assertEqualFields(self.get_fields('o2m'), make_field(fields=[
ID_FIELD,
# FIXME: should reverse field be ignored?
{'id': 'parent_id', 'name': 'parent_id', 'string': 'unknown', 'required': False, 'fields': [
{'id': 'parent_id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'parent_id', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]},
{'id': 'value', 'name': 'value', 'string': 'unknown', 'required': False, 'fields': []},
]))
class test_match_headers_single(TransactionCase):
def test_match_by_name(self):
match = self.registry('base_import.import')._match_header(
'f0', [{'name': 'f0'}], {})
self.assertEqual(match, [{'name': 'f0'}])
def test_match_by_string(self):
match = self.registry('base_import.import')._match_header(
'some field', [{'name': 'bob', 'string': "Some Field"}], {})
self.assertEqual(match, [{'name': 'bob', 'string': "Some Field"}])
def test_nomatch(self):
match = self.registry('base_import.import')._match_header(
'should not be', [{'name': 'bob', 'string': "wheee"}], {})
self.assertEqual(match, [])
def test_recursive_match(self):
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f1', [f], {})
self.assertEqual(match, [f, f['fields'][1]])
def test_recursive_nomatch(self):
""" Match first level, fail to match second level
"""
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f2', [f], {})
self.assertEqual(match, [])
class test_match_headers_multiple(TransactionCase):
def test_noheaders(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
[], [], {}),
(None, None)
)
def test_nomatch(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter([
['foo', 'bar', 'baz', 'qux'],
['v1', 'v2', 'v3', 'v4'],
]),
[],
{'headers': True}),
(
['foo', 'bar', 'baz', 'qux'],
dict.fromkeys(range(4))
)
)
def test_mixed(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter(['foo bar baz qux/corge'.split()]),
[
{'name': 'bar', 'string': 'Bar'},
{'name': 'bob', 'string': 'Baz'},
{'name': 'qux', 'string': 'Qux', 'fields': [
{'name': 'corge', 'fields': []},
]}
],
{'headers': True}),
(['foo', 'bar', 'baz', 'qux/corge'], {
0: None,
1: ['bar'],
2: ['bob'],
3: ['qux', 'corge'],
})
)
class test_preview(TransactionCase):
def make_import(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'res.users',
'file': u"로그인,언어\nbob,1\n".encode('euc_kr'),
})
return Import, id
def test_encoding(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
})
self.assertTrue('error' in result)
def test_csv_errors(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': 'foo',
'separator': ',',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': 'bob',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
def test_success(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
'headers': True,
})
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue'], 2: None})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
# Order depends on iteration order of fields_get
self.assertItemsEqual(result['fields'], [
ID_FIELD,
{'id': 'name', 'name': 'name', 'string': 'Name', 'required':False, 'fields': []},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required':True, 'fields': []},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required':False, 'fields': []},
])
self.assertEqual(result['preview'], [
['foo', '1', '2'],
['bar', '3', '4'],
['qux', '5', '6'],
])
# Ensure we only have the response fields we expect
self.assertItemsEqual(result.keys(), ['matches', 'headers', 'fields', 'preview'])
class test_convert_import_data(TransactionCase):
""" Tests conversion of base_import.import input into data which
can be fed to Model.import_data
"""
def test_all(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', 'somevalue', 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'somevalue', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '1', '2'),
('bar', '3', '4'),
('qux', '5', '6'),
])
def test_filtered(self):
""" If ``False`` is provided as field mapping for a column,
that column should be removed from importable data
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('bar', '4'),
('qux', '6'),
])
def test_norow(self):
""" If a row is composed only of empty values (due to having
filtered out non-empty values from it), it should be removed
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
',3,\n'
',5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('', '6'),
])
def test_empty_rows(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value\n'
'foo,1\n'
'\n'
'bar,2\n'
' \n'
'\t \n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', 'somevalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'somevalue'])
self.assertItemsEqual(data, [
('foo', '1'),
('bar', '2'),
])
def test_nofield(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [],
{'quoting': '"', 'separator': ',', 'headers': True,})
def test_falsefields(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [False, False, False],
{'quoting': '"', 'separator': ',', 'headers': True,})
class test_failures(TransactionCase):
def test_big_attachments(self):
"""
Ensure big fields (e.g. b64-encoded image data) can be imported and
we're not hitting limits of the default CSV parser config
"""
import csv, cStringIO
from PIL import Image
im = Image.new('RGB', (1920, 1080))
fout = cStringIO.StringIO()
writer = csv.writer(fout, dialect=None)
writer.writerows([
['name', 'db_datas'],
['foo', im.tobytes().encode('base64')]
])
Import = self.env['base_import.import']
imp = Import.create({
'res_model': 'ir.attachment',
'file': fout.getvalue()
})
[results] = imp.do(
['name', 'db_datas'],
{'headers': True, 'separator': ',', 'quoting': '"'})
self.assertFalse(
results, "results should be empty on successful import")
| agpl-3.0 |
johndpope/tensorflow | tensorflow/python/debug/cli/debugger_cli_common_test.py | 101 | 45452 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Building Blocks of the TensorFlow Debugger CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import stat
import tempfile
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class CommandLineExitTest(test_util.TensorFlowTestCase):
def testConstructionWithoutToken(self):
exit_exc = debugger_cli_common.CommandLineExit()
self.assertTrue(isinstance(exit_exc, Exception))
def testConstructionWithToken(self):
exit_exc = debugger_cli_common.CommandLineExit(exit_token={"foo": "bar"})
self.assertTrue(isinstance(exit_exc, Exception))
self.assertEqual({"foo": "bar"}, exit_exc.exit_token)
class RichTextLinesTest(test_util.TensorFlowTestCase):
def testRichTextLinesConstructorComplete(self):
# Test RichTextLines constructor.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual(2, len(screen_output.annotations))
self.assertEqual(2, screen_output.num_lines())
def testRichTextLinesConstructorWithInvalidType(self):
with self.assertRaisesRegexp(ValueError, "Unexpected type in lines"):
debugger_cli_common.RichTextLines(123)
def testRichTextLinesConstructorWithString(self):
# Test constructing a RichTextLines object with a string, instead of a list
# of strings.
screen_output = debugger_cli_common.RichTextLines(
"Roses are red",
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
self.assertEqual(1, len(screen_output.lines))
self.assertEqual(1, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.annotations))
def testRichLinesAppendRichLine(self):
rtl = debugger_cli_common.RichTextLines(
"Roses are red",
font_attr_segs={0: [(0, 5, "red")]})
rtl.append_rich_line(debugger_cli_common.RichLine("Violets are ") +
debugger_cli_common.RichLine("blue", "blue"))
self.assertEqual(2, len(rtl.lines))
self.assertEqual(2, len(rtl.font_attr_segs))
self.assertEqual(1, len(rtl.font_attr_segs[0]))
self.assertEqual(1, len(rtl.font_attr_segs[1]))
def testRichLineLenMethodWorks(self):
self.assertEqual(0, len(debugger_cli_common.RichLine()))
self.assertEqual(0, len(debugger_cli_common.RichLine("")))
self.assertEqual(1, len(debugger_cli_common.RichLine("x")))
self.assertEqual(6, len(debugger_cli_common.RichLine("x y z ", "blue")))
def testRichTextLinesConstructorIncomplete(self):
# Test RichTextLines constructor, with incomplete keyword arguments.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual({}, screen_output.annotations)
def testModifyRichTextLinesObject(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
self.assertEqual(2, len(screen_output.lines))
screen_output.lines.append("Sugar is sweet")
self.assertEqual(3, len(screen_output.lines))
def testMergeRichTextLines(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines(
["Lilies are white", "Sunflowers are yellow"],
font_attr_segs={0: [(0, 6, "white")],
1: [(0, 7, "yellow")]},
annotations={
"metadata": "foo",
0: "full spectrum",
1: "medium wavelength"
})
screen_output_1.extend(screen_output_2)
self.assertEqual(4, screen_output_1.num_lines())
self.assertEqual([
"Roses are red", "Violets are blue", "Lilies are white",
"Sunflowers are yellow"
], screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
"metadata": "foo",
0: "longer wavelength",
1: "shorter wavelength",
2: "full spectrum",
3: "medium wavelength"
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptyOther(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines([])
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptySelf(self):
screen_output_1 = debugger_cli_common.RichTextLines([])
screen_output_2 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testAppendALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.append("Violets are blue", [(0, 7, "blue")])
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
def testPrependALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.prepend("Violets are blue", font_attr_segs=[(0, 7, "blue")])
self.assertEqual(["Violets are blue", "Roses are red"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 7, "blue")],
1: [(0, 5, "red")],
}, screen_output_1.font_attr_segs)
def testWriteToFileSucceeds(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = tempfile.mktemp()
screen_output.write_to_file(file_path)
with gfile.Open(file_path, "r") as f:
self.assertEqual("Roses are red\nViolets are blue\n", f.read())
# Clean up.
gfile.Remove(file_path)
def testAttemptToWriteToADirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
with self.assertRaises(Exception):
screen_output.write_to_file("/")
def testAttemptToWriteToFileInNonexistentDirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = os.path.join(tempfile.mkdtemp(), "foo", "bar.txt")
with self.assertRaises(Exception):
screen_output.write_to_file(file_path)
class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._intentional_error_msg = "Intentionally raised exception"
def _noop_handler(self, argv, screen_info=None):
# A handler that does nothing other than returning "Done."
return debugger_cli_common.RichTextLines(["Done."])
def _handler_raising_exception(self, argv, screen_info=None):
# A handler that intentionally raises an exception.
raise RuntimeError(self._intentional_error_msg)
def _handler_returning_wrong_type(self, argv, screen_info=None):
# A handler that returns a wrong type, instead of the correct type
# (RichTextLines).
return "Hello"
def _echo_screen_cols(self, argv, screen_info=None):
# A handler that uses screen_info.
return debugger_cli_common.RichTextLines(
["cols = %d" % screen_info["cols"]])
def _exiting_handler(self, argv, screen_info=None):
"""A handler that exits with an exit token."""
if argv:
exit_token = argv[0]
else:
exit_token = None
raise debugger_cli_common.CommandLineExit(exit_token=exit_token)
def testRegisterEmptyCommandPrefix(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register an empty-string as a command prefix should trigger
# an exception.
with self.assertRaisesRegexp(ValueError, "Empty command prefix"):
registry.register_command_handler("", self._noop_handler, "")
def testRegisterAndInvokeHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
self.assertTrue(registry.is_registered("noop"))
self.assertFalse(registry.is_registered("beep"))
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
# Attempt to invoke an unregistered command prefix should trigger an
# exception.
with self.assertRaisesRegexp(ValueError, "No handler is registered"):
registry.dispatch_command("beep", [])
# Empty command prefix should trigger an exception.
with self.assertRaisesRegexp(ValueError, "Prefix is empty"):
registry.dispatch_command("", [])
def testExitingHandler(self):
"""Test that exit exception is correctly raised."""
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("exit", self._exiting_handler, "")
self.assertTrue(registry.is_registered("exit"))
exit_token = None
try:
registry.dispatch_command("exit", ["foo"])
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
self.assertEqual("foo", exit_token)
def testInvokeHandlerWithScreenInfo(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Register and invoke a command handler that uses screen_info.
registry.register_command_handler("cols", self._echo_screen_cols, "")
cmd_output = registry.dispatch_command(
"cols", [], screen_info={"cols": 100})
self.assertEqual(["cols = 100"], cmd_output.lines)
def testRegisterAndInvokeHandlerWithAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n", "NOOP"])
# is_registered() should work for full prefix and aliases.
self.assertTrue(registry.is_registered("noop"))
self.assertTrue(registry.is_registered("n"))
self.assertTrue(registry.is_registered("NOOP"))
cmd_output = registry.dispatch_command("n", [])
self.assertEqual(["Done."], cmd_output.lines)
cmd_output = registry.dispatch_command("NOOP", [])
self.assertEqual(["Done."], cmd_output.lines)
def testHandlerWithWrongReturnType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("wrong_return",
self._handler_returning_wrong_type, "")
# If the command handler fails to return a RichTextLines instance, an error
# should be triggered.
with self.assertRaisesRegexp(
ValueError,
"Return value from command handler.*is not None or a RichTextLines "
"instance"):
registry.dispatch_command("wrong_return", [])
def testRegisterDuplicateHandlers(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
# Registering the same command prefix more than once should trigger an
# exception.
with self.assertRaisesRegexp(
ValueError, "A handler is already registered for command prefix"):
registry.register_command_handler("noop", self._noop_handler, "")
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
def testRegisterDuplicateAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n"])
# Clash with existing alias.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["n"])
# The name clash should have prevent the handler from being registered.
self.assertFalse(registry.is_registered("cols"))
# Aliases can also clash with command prefixes.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["noop"])
self.assertFalse(registry.is_registered("cols"))
def testDispatchHandlerRaisingException(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("raise_exception",
self._handler_raising_exception, "")
# The registry should catch and wrap exceptions that occur during command
# handling.
cmd_output = registry.dispatch_command("raise_exception", [])
# The error output contains a stack trace.
# So the line count should be >= 2.
self.assertGreater(len(cmd_output.lines), 2)
self.assertTrue(cmd_output.lines[0].startswith(
"Error occurred during handling of command"))
self.assertTrue(cmd_output.lines[1].endswith(self._intentional_error_msg))
def testRegisterNonCallableHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register a non-callable handler should fail.
with self.assertRaisesRegexp(ValueError, "handler is not callable"):
registry.register_command_handler("non_callable", 1, "")
def testRegisterHandlerWithInvalidHelpInfoType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
with self.assertRaisesRegexp(ValueError, "help_info is not a str"):
registry.register_command_handler("noop", self._noop_handler, ["foo"])
def testGetHelpFull(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
help_lines = registry.get_help().lines
# The help info should list commands in alphabetically sorted order,
# regardless of order in which the commands are reigstered.
self.assertEqual("cols", help_lines[0])
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
self.assertFalse(help_lines[4])
self.assertFalse(help_lines[5])
# The default help command should appear in the help output.
self.assertEqual("help", help_lines[6])
self.assertEqual("noop", help_lines[12])
self.assertTrue(help_lines[13].endswith("Aliases: n, NOOP"))
self.assertFalse(help_lines[14])
self.assertTrue(help_lines[15].endswith("No operation."))
self.assertTrue(help_lines[16].endswith("I.e., do nothing."))
def testGetHelpSingleCommand(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help info for one of the two commands, using full prefix.
help_lines = registry.get_help("cols").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for one of the two commands, using alias.
help_lines = registry.get_help("c").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for a nonexistent command.
help_lines = registry.get_help("foo").lines
self.assertEqual("Invalid command prefix: \"foo\"", help_lines[0])
def testHelpCommandWithoutIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help for all commands.
output = registry.dispatch_command("help", [])
self.assertEqual(["cols", " Aliases: c", "",
" Show screen width in number of columns.", "", "",
"help", " Aliases: h", "", " Print this help message.",
"", "", "noop", " Aliases: n, NOOP", "",
" No operation.", " I.e., do nothing.", "", ""],
output.lines)
# Get help for one specific command prefix.
output = registry.dispatch_command("help", ["noop"])
self.assertEqual(["noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing."], output.lines)
# Get help for a nonexistent command prefix.
output = registry.dispatch_command("help", ["foo"])
self.assertEqual(["Invalid command prefix: \"foo\""], output.lines)
def testHelpCommandWithIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
help_intro = debugger_cli_common.RichTextLines(
["Introductory comments.", ""])
registry.set_help_intro(help_intro)
output = registry.dispatch_command("help", [])
self.assertEqual(help_intro.lines + [
"help", " Aliases: h", "", " Print this help message.", "", "",
"noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing.", "", ""
], output.lines)
class RegexFindTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
def testRegexFindWithoutExistingFontAttrSegs(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow")], new_screen_output.font_attr_segs[0])
self.assertEqual([(8, 11, "yellow")], new_screen_output.font_attr_segs[1])
# Check field in annotations carrying a list of matching line indices.
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithExistingFontAttrSegs(self):
# Add a font attribute segment first.
self._orig_screen_output.font_attr_segs[0] = [(9, 12, "red")]
self.assertEqual(1, len(self._orig_screen_output.font_attr_segs))
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow"), (9, 12, "red")],
new_screen_output.font_attr_segs[0])
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithNoMatches(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"infrared", "yellow")
self.assertEqual({}, new_screen_output.font_attr_segs)
self.assertEqual([], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testInvalidRegex(self):
with self.assertRaisesRegexp(ValueError, "Invalid regular expression"):
debugger_cli_common.regex_find(self._orig_screen_output, "[", "yellow")
def testRegexFindOnPrependedLinesWorks(self):
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["Roses are red"])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "red", "bold")
self.assertEqual(
{0: [(10, 13, "bold")]}, searched_rich_lines.font_attr_segs)
rich_lines = debugger_cli_common.RichTextLines(["Violets are blue"])
rich_lines.prepend(["A poem"], font_attr_segs=[(0, 1, "underline")])
searched_rich_lines = debugger_cli_common.regex_find(
rich_lines, "poem", "italic")
self.assertEqual(
{0: [(0, 1, "underline"), (2, 6, "italic")]},
searched_rich_lines.font_attr_segs)
class WrapScreenOutputTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 5, "red"), (6, 9, "gray"), (10, 12, "red"),
(12, 13, "crimson")],
2: [(0, 7, "blue"), (8, 11, "gray"), (12, 14, "blue"),
(14, 16, "indigo")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
def testNoActualWrapping(self):
# Large column limit should lead to no actual wrapping.
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 100)
self.assertEqual(self._orig_screen_output.lines, out.lines)
self.assertEqual(self._orig_screen_output.font_attr_segs,
out.font_attr_segs)
self.assertEqual(self._orig_screen_output.annotations, out.annotations)
self.assertEqual(new_line_indices, [0, 1, 2])
def testWrappingWithAttrCutoff(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 11)
# Add non-row-index field to out.
out.annotations["metadata"] = "foo"
# Check wrapped text.
self.assertEqual(5, len(out.lines))
self.assertEqual("Folk song:", out.lines[0])
self.assertEqual("Roses are r", out.lines[1])
self.assertEqual("ed", out.lines[2])
self.assertEqual("Violets are", out.lines[3])
self.assertEqual(" blue", out.lines[4])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertEqual([(0, 5, "red"), (6, 9, "gray"), (10, 11, "red")],
out.font_attr_segs[1])
self.assertEqual([(0, 1, "red"), (1, 2, "crimson")], out.font_attr_segs[2])
self.assertEqual([(0, 7, "blue"), (8, 11, "gray")], out.font_attr_segs[3])
self.assertEqual([(1, 3, "blue"), (3, 5, "indigo")], out.font_attr_segs[4])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[1])
self.assertFalse(2 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[3])
self.assertFalse(4 in out.annotations)
# Chec that the non-row-index field is present in output.
self.assertEqual("foo", out.annotations["metadata"])
self.assertEqual(new_line_indices, [0, 1, 3])
def testWrappingWithMultipleAttrCutoff(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 12, "red")],
2: [(1, 16, "blue")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 5)
# Check wrapped text.
self.assertEqual(9, len(out.lines))
self.assertEqual("Folk ", out.lines[0])
self.assertEqual("song:", out.lines[1])
self.assertEqual("Roses", out.lines[2])
self.assertEqual(" are ", out.lines[3])
self.assertEqual("red", out.lines[4])
self.assertEqual("Viole", out.lines[5])
self.assertEqual("ts ar", out.lines[6])
self.assertEqual("e blu", out.lines[7])
self.assertEqual("e", out.lines[8])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertFalse(1 in out.font_attr_segs)
self.assertEqual([(0, 5, "red")], out.font_attr_segs[2])
self.assertEqual([(0, 5, "red")], out.font_attr_segs[3])
self.assertEqual([(0, 2, "red")], out.font_attr_segs[4])
self.assertEqual([(1, 5, "blue")], out.font_attr_segs[5])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[6])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[7])
self.assertEqual([(0, 1, "blue")], out.font_attr_segs[8])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertFalse(1 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[2])
self.assertFalse(3 in out.annotations)
self.assertFalse(4 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[5])
self.assertFalse(6 in out.annotations)
self.assertFalse(7 in out.annotations)
self.assertFalse(8 in out.annotations)
self.assertEqual(new_line_indices, [0, 2, 5])
def testWrappingInvalidArguments(self):
with self.assertRaisesRegexp(ValueError,
"Invalid type of input screen_output"):
debugger_cli_common.wrap_rich_text_lines("foo", 12)
with self.assertRaisesRegexp(ValueError, "Invalid type of input cols"):
debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines(["foo", "bar"]), "12")
def testWrappingEmptyInput(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines([]), 10)
self.assertEqual([], out.lines)
self.assertEqual([], new_line_indices)
class SliceRichTextLinesTest(test_util.TensorFlowTestCase):
def setUp(self):
self._original = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={
0: "longer wavelength",
1: "shorter wavelength",
"foo_metadata": "bar"
})
def testSliceBeginning(self):
sliced = self._original.slice(0, 1)
self.assertEqual(["Roses are red"], sliced.lines)
self.assertEqual({0: [(0, 5, "red")]}, sliced.font_attr_segs)
# Non-line-number metadata should be preseved.
self.assertEqual({
0: "longer wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testSliceEnd(self):
sliced = self._original.slice(1, 2)
self.assertEqual(["Violets are blue"], sliced.lines)
# The line index should have changed from 1 to 0.
self.assertEqual({0: [(0, 7, "blue")]}, sliced.font_attr_segs)
self.assertEqual({
0: "shorter wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testAttemptSliceWithNegativeIndex(self):
with self.assertRaisesRegexp(ValueError, "Encountered negative index"):
self._original.slice(0, -1)
class TabCompletionRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tc_reg = debugger_cli_common.TabCompletionRegistry()
# Register the items in an unsorted order deliberately, to test the sorted
# output from get_completions().
self._tc_reg.register_tab_comp_context(
["print_tensor", "pt"],
["node_b:1", "node_b:2", "node_a:1", "node_a:2"])
self._tc_reg.register_tab_comp_context(["node_info"],
["node_c", "node_b", "node_a"])
def testTabCompletion(self):
# The returned completions should have sorted order.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a:1", "node_a:2", "node_b:1", "node_b:2"],
"node_"), self._tc_reg.get_completions("pt", ""))
self.assertEqual((["node_a:1", "node_a:2"], "node_a:"),
self._tc_reg.get_completions("print_tensor", "node_a"))
self.assertEqual((["node_a:1"], "node_a:1"),
self._tc_reg.get_completions("pt", "node_a:1"))
self.assertEqual(([], ""),
self._tc_reg.get_completions("print_tensor", "node_a:3"))
self.assertEqual((None, None), self._tc_reg.get_completions("foo", "node_"))
def testExtendCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.extend_comp_items("print_tensor", ["node_A:1", "node_A:2"])
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
# Extending the completions for one of the context's context words should
# have taken effect on other context words of the same context as well.
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testExtendCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.extend_comp_items("foo", ["node_A:1", "node_A:2"])
def testRemoveCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.remove_comp_items("pt", ["node_a:1", "node_a:2"])
self.assertEqual((["node_b:1", "node_b:2"], "node_b:"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testRemoveCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.remove_comp_items("foo", ["node_a:1", "node_a:2"])
def testDeregisterContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
self.assertEqual((None, None),
self._tc_reg.get_completions("print_tensor", "node_"))
# The alternative context word should be unaffected.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
def testDeregisterNonexistentContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
with self.assertRaisesRegexp(
KeyError,
"Cannot deregister unregistered context word \"print_tensor\""):
self._tc_reg.deregister_context(["print_tensor"])
class CommandHistoryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._history_file_path = tempfile.mktemp()
self._cmd_hist = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
def tearDown(self):
if os.path.isfile(self._history_file_path):
os.remove(self._history_file_path)
def _restoreFileReadWritePermissions(self, file_path):
os.chmod(file_path,
(stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR |
stat.S_IWGRP | stat.S_IWOTH))
def testLookUpMostRecent(self):
self.assertEqual([], self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("node_info node_b")
self.assertEqual(["node_info node_b"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(4))
# Go over the limit.
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(4))
def testLookUpPrefix(self):
self._cmd_hist.add_command("node_info node_b")
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.lookup_prefix("node_info", 10))
self.assertEqual(["node_info node_a"], self._cmd_hist.lookup_prefix(
"node_info", 1))
self.assertEqual([], self._cmd_hist.lookup_prefix("print_tensor", 10))
def testAddNonStrCommand(self):
with self.assertRaisesRegexp(
TypeError, "Attempt to enter non-str entry to command history"):
self._cmd_hist.add_command(["print_tensor node_a:0"])
def testRepeatingCommandsDoNotGetLoggedRepeatedly(self):
self._cmd_hist.add_command("help")
self._cmd_hist.add_command("help")
self.assertEqual(["help"], self._cmd_hist.most_recent_n(2))
def testCommandHistoryFileIsCreated(self):
self.assertFalse(os.path.isfile(self._history_file_path))
self._cmd_hist.add_command("help")
self.assertTrue(os.path.isfile(self._history_file_path))
with open(self._history_file_path, "rt") as f:
self.assertEqual(["help\n"], f.readlines())
def testLoadingCommandHistoryFileObeysLimit(self):
self._cmd_hist.add_command("help 1")
self._cmd_hist.add_command("help 2")
self._cmd_hist.add_command("help 3")
self._cmd_hist.add_command("help 4")
cmd_hist_2 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help 2", "help 3", "help 4"],
cmd_hist_2.most_recent_n(3))
with open(self._history_file_path, "rt") as f:
self.assertEqual(
["help 2\n", "help 3\n", "help 4\n"], f.readlines())
def testCommandHistoryHandlesReadingIOErrorGracoiusly(self):
with open(self._history_file_path, "wt") as f:
f.write("help\n")
# Change file to not readable by anyone.
os.chmod(self._history_file_path, 0)
# The creation of a CommandHistory object should not error out.
debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self._restoreFileReadWritePermissions(self._history_file_path)
def testCommandHistoryHandlesWritingIOErrorGracoiusly(self):
with open(self._history_file_path, "wt") as f:
f.write("help\n")
# Change file to read-only.
os.chmod(self._history_file_path,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
# Reading from the file should still work.
cmd_hist_2 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help"], cmd_hist_2.most_recent_n(1))
# Writing should no longer work, but it should fail silently and
# the within instance-command history should still work.
cmd_hist_2.add_command("foo")
self.assertEqual(["help", "foo"], cmd_hist_2.most_recent_n(2))
cmd_hist_3 = debugger_cli_common.CommandHistory(
limit=3, history_file_path=self._history_file_path)
self.assertEqual(["help"], cmd_hist_3.most_recent_n(1))
self._restoreFileReadWritePermissions(self._history_file_path)
class MenuNodeTest(test_util.TensorFlowTestCase):
def testCommandTypeConstructorSucceeds(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertEqual("water flower", menu_node.caption)
self.assertEqual("water_flower", menu_node.content)
def testDisableWorks(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertTrue(menu_node.is_enabled())
menu_node.disable()
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
def testConstructAsDisabledWorks(self):
menu_node = debugger_cli_common.MenuItem(
"water flower", "water_flower", enabled=False)
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
class MenuTest(test_util.TensorFlowTestCase):
def setUp(self):
self.menu = debugger_cli_common.Menu()
self.assertEqual(0, self.menu.num_items())
self.node1 = debugger_cli_common.MenuItem("water flower", "water_flower")
self.node2 = debugger_cli_common.MenuItem(
"measure wavelength", "measure_wavelength")
self.menu.append(self.node1)
self.menu.append(self.node2)
self.assertEqual(2, self.menu.num_items())
def testFormatAsSingleLineWithStrItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs="underline")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithListItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs=["underline", "bold"])
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline", "bold"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline", "bold"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithNoneItemAttrsWorks(self):
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testInsertNode(self):
self.assertEqual(["water flower", "measure wavelength"],
self.menu.captions())
node2 = debugger_cli_common.MenuItem("write poem", "write_poem")
self.menu.insert(1, node2)
self.assertEqual(["water flower", "write poem", "measure wavelength"],
self.menu.captions())
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, write poem, measure wavelength, "],
output.lines)
def testFormatAsSingleLineWithDisabledNode(self):
node2 = debugger_cli_common.MenuItem(
"write poem", "write_poem", enabled=False)
self.menu.append(node2)
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", disabled_item_attrs="bold")
self.assertEqual(["Menu: water flower, measure wavelength, write poem, "],
output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual((40, 50, ["bold"]), output.font_attr_segs[0][2])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
Frankenmint/p2pool | p2pool/test/util/test_forest.py | 283 | 6804 | import random
import unittest
from p2pool.util import forest, math
class DumbTracker(object):
def __init__(self, items=[]):
self.items = {} # hash -> item
self.reverse = {} # previous_hash -> set of item_hashes
for item in items:
self.add(item)
def add(self, item):
if item.hash in self.items:
raise ValueError('item already present')
self.items[item.hash] = item
self.reverse.setdefault(item.previous_hash, set()).add(item.hash)
def remove(self, item_hash):
item = self.items[item_hash]
del item_hash
self.items.pop(item.hash)
self.reverse[item.previous_hash].remove(item.hash)
if not self.reverse[item.previous_hash]:
self.reverse.pop(item.previous_hash)
@property
def heads(self):
return dict((x, self.get_last(x)) for x in self.items if x not in self.reverse)
@property
def tails(self):
return dict((x, set(y for y in self.items if self.get_last(y) == x and y not in self.reverse)) for x in self.reverse if x not in self.items)
def get_nth_parent_hash(self, item_hash, n):
for i in xrange(n):
item_hash = self.items[item_hash].previous_hash
return item_hash
def get_height(self, item_hash):
height, last = self.get_height_and_last(item_hash)
return height
def get_last(self, item_hash):
height, last = self.get_height_and_last(item_hash)
return last
def get_height_and_last(self, item_hash):
height = 0
while item_hash in self.items:
item_hash = self.items[item_hash].previous_hash
height += 1
return height, item_hash
def get_chain(self, start_hash, length):
# same implementation :/
assert length <= self.get_height(start_hash)
for i in xrange(length):
yield self.items[start_hash]
start_hash = self.items[start_hash].previous_hash
def is_child_of(self, item_hash, possible_child_hash):
if self.get_last(item_hash) != self.get_last(possible_child_hash):
return None
while True:
if possible_child_hash == item_hash:
return True
if possible_child_hash not in self.items:
return False
possible_child_hash = self.items[possible_child_hash].previous_hash
class FakeShare(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
self._attrs = kwargs
def test_tracker(self):
t = DumbTracker(self.items.itervalues())
assert self.items == t.items, (self.items, t.items)
assert self.reverse == t.reverse, (self.reverse, t.reverse)
assert self.heads == t.heads, (self.heads, t.heads)
assert self.tails == t.tails, (self.tails, t.tails)
if random.random() < 0.9:
return
for start in self.items:
a, b = self.get_height_and_last(start), t.get_height_and_last(start)
assert a == b, (a, b)
other = random.choice(self.items.keys())
assert self.is_child_of(start, other) == t.is_child_of(start, other)
assert self.is_child_of(other, start) == t.is_child_of(other, start)
length = random.randrange(a[0])
assert list(self.get_chain(start, length)) == list(t.get_chain(start, length))
def generate_tracker_simple(n):
t = forest.Tracker(math.shuffled(FakeShare(hash=i, previous_hash=i - 1 if i > 0 else None) for i in xrange(n)))
test_tracker(t)
return t
def generate_tracker_random(n):
items = []
for i in xrange(n):
x = random.choice(items + [FakeShare(hash=None), FakeShare(hash=random.randrange(1000000, 2000000))]).hash
items.append(FakeShare(hash=i, previous_hash=x))
t = forest.Tracker(math.shuffled(items))
test_tracker(t)
return t
class Test(unittest.TestCase):
def test_tracker(self):
t = generate_tracker_simple(100)
assert t.heads == {99: None}
assert t.tails == {None: set([99])}
assert t.get_nth_parent_hash(90, 50) == 90 - 50
assert t.get_nth_parent_hash(91, 42) == 91 - 42
def test_get_nth_parent_hash(self):
t = generate_tracker_simple(200)
for i in xrange(1000):
a = random.randrange(200)
b = random.randrange(a + 1)
res = t.get_nth_parent_hash(a, b)
assert res == a - b, (a, b, res)
def test_tracker2(self):
for ii in xrange(20):
t = generate_tracker_random(random.randrange(100))
#print "--start--"
while t.items:
while True:
try:
t.remove(random.choice(list(t.items)))
except NotImplementedError:
pass # print "aborted", x
else:
break
test_tracker(t)
def test_tracker3(self):
for ii in xrange(10):
items = []
for i in xrange(random.randrange(100)):
x = random.choice(items + [FakeShare(hash=None), FakeShare(hash=random.randrange(1000000, 2000000))]).hash
items.append(FakeShare(hash=i, previous_hash=x))
t = forest.Tracker()
test_tracker(t)
for item in math.shuffled(items):
t.add(item)
test_tracker(t)
if random.randrange(3) == 0:
while True:
try:
t.remove(random.choice(list(t.items)))
except NotImplementedError:
pass
else:
break
test_tracker(t)
for item in math.shuffled(items):
if item.hash not in t.items:
t.add(item)
test_tracker(t)
if random.randrange(3) == 0:
while True:
try:
t.remove(random.choice(list(t.items)))
except NotImplementedError:
pass
else:
break
test_tracker(t)
while t.items:
while True:
try:
t.remove(random.choice(list(t.items)))
except NotImplementedError:
pass
else:
break
test_tracker(t)
| gpl-3.0 |
40223143/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/copyreg.py | 749 | 6611 | """Helper to provide extensibility for pickle.
This is only useful to add pickle support for extension types defined in
C, not for instances of user-defined classes.
"""
__all__ = ["pickle", "constructor",
"add_extension", "remove_extension", "clear_extension_cache"]
dispatch_table = {}
def pickle(ob_type, pickle_function, constructor_ob=None):
if not callable(pickle_function):
raise TypeError("reduction functions must be callable")
dispatch_table[ob_type] = pickle_function
# The constructor_ob function is a vestige of safe for unpickling.
# There is no reason for the caller to pass it anymore.
if constructor_ob is not None:
constructor(constructor_ob)
def constructor(object):
if not callable(object):
raise TypeError("constructors must be callable")
# Example: provide pickling support for complex numbers.
try:
complex
except NameError:
pass
else:
def pickle_complex(c):
return complex, (c.real, c.imag)
pickle(complex, pickle_complex, complex)
# Support for pickling new-style objects
def _reconstructor(cls, base, state):
if base is object:
obj = object.__new__(cls)
else:
obj = base.__new__(cls, state)
if base.__init__ != object.__init__:
base.__init__(obj, state)
return obj
_HEAPTYPE = 1<<9
# Python code for object.__reduce_ex__ for protocols 0 and 1
def _reduce_ex(self, proto):
assert proto < 2
for base in self.__class__.__mro__:
if hasattr(base, '__flags__') and not base.__flags__ & _HEAPTYPE:
break
else:
base = object # not really reachable
if base is object:
state = None
else:
if base is self.__class__:
raise TypeError("can't pickle %s objects" % base.__name__)
state = base(self)
args = (self.__class__, base, state)
try:
getstate = self.__getstate__
except AttributeError:
if getattr(self, "__slots__", None):
raise TypeError("a class that defines __slots__ without "
"defining __getstate__ cannot be pickled")
try:
dict = self.__dict__
except AttributeError:
dict = None
else:
dict = getstate()
if dict:
return _reconstructor, args, dict
else:
return _reconstructor, args
# Helper for __reduce_ex__ protocol 2
def __newobj__(cls, *args):
return cls.__new__(cls, *args)
def _slotnames(cls):
"""Return a list of slot names for a given class.
This needs to find slots defined by the class and its bases, so we
can't simply return the __slots__ attribute. We must walk down
the Method Resolution Order and concatenate the __slots__ of each
class found there. (This assumes classes don't modify their
__slots__ attribute to misrepresent their slots after the class is
defined.)
"""
# Get the value from a cache in the class if possible
names = cls.__dict__.get("__slotnames__")
if names is not None:
return names
# Not cached -- calculate the value
names = []
if not hasattr(cls, "__slots__"):
# This class has no slots
pass
else:
# Slots found -- gather slot names from all base classes
for c in cls.__mro__:
if "__slots__" in c.__dict__:
slots = c.__dict__['__slots__']
# if class has a single slot, it can be given as a string
if isinstance(slots, str):
slots = (slots,)
for name in slots:
# special descriptors
if name in ("__dict__", "__weakref__"):
continue
# mangled names
elif name.startswith('__') and not name.endswith('__'):
names.append('_%s%s' % (c.__name__, name))
else:
names.append(name)
# Cache the outcome in the class if at all possible
try:
cls.__slotnames__ = names
except:
pass # But don't die if we can't
return names
# A registry of extension codes. This is an ad-hoc compression
# mechanism. Whenever a global reference to <module>, <name> is about
# to be pickled, the (<module>, <name>) tuple is looked up here to see
# if it is a registered extension code for it. Extension codes are
# universal, so that the meaning of a pickle does not depend on
# context. (There are also some codes reserved for local use that
# don't have this restriction.) Codes are positive ints; 0 is
# reserved.
_extension_registry = {} # key -> code
_inverted_registry = {} # code -> key
_extension_cache = {} # code -> object
# Don't ever rebind those names: pickling grabs a reference to them when
# it's initialized, and won't see a rebinding.
def add_extension(module, name, code):
"""Register an extension code."""
code = int(code)
if not 1 <= code <= 0x7fffffff:
raise ValueError("code out of range")
key = (module, name)
if (_extension_registry.get(key) == code and
_inverted_registry.get(code) == key):
return # Redundant registrations are benign
if key in _extension_registry:
raise ValueError("key %s is already registered with code %s" %
(key, _extension_registry[key]))
if code in _inverted_registry:
raise ValueError("code %s is already in use for key %s" %
(code, _inverted_registry[code]))
_extension_registry[key] = code
_inverted_registry[code] = key
def remove_extension(module, name, code):
"""Unregister an extension code. For testing only."""
key = (module, name)
if (_extension_registry.get(key) != code or
_inverted_registry.get(code) != key):
raise ValueError("key %s is not registered with code %s" %
(key, code))
del _extension_registry[key]
del _inverted_registry[code]
if code in _extension_cache:
del _extension_cache[code]
def clear_extension_cache():
_extension_cache.clear()
# Standard extension code assignments
# Reserved ranges
# First Last Count Purpose
# 1 127 127 Reserved for Python standard library
# 128 191 64 Reserved for Zope
# 192 239 48 Reserved for 3rd parties
# 240 255 16 Reserved for private use (will never be assigned)
# 256 Inf Inf Reserved for future assignment
# Extension codes are assigned by the Python Software Foundation.
| gpl-3.0 |
kchodorow/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn.py | 9 | 31036 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
_CENTERED_BIAS_WEIGHT = "centered_bias_weight"
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _get_feature_dict(features):
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `_Head` instance.
* hidden_units: List of hidden units per layer.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use the Adagrad
optimizer with a default learning rate of 0.05.
* activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
* dropout: When not `None`, the probability we will drop out a given
coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
* input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
head = params["head"]
hidden_units = params["hidden_units"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or "Adagrad"
activation_fn = params.get("activation_fn")
dropout = params.get("dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
input_layer_min_slice_size = (
params.get("input_layer_min_slice_size") or 64 << 20)
num_ps_replicas = config.num_ps_replicas if config else 0
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
features = _get_feature_dict(features)
parent_scope = "dnn"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
parent_scope,
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=input_layer_min_slice_size))
with variable_scope.variable_scope(
"input_from_feature_columns",
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner) as input_layer_scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=input_layer_scope)
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
"hiddenlayer_%d" % layer_id,
values=(net,)) as hidden_layer_scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=activation_fn,
variables_collections=[parent_scope],
scope=hidden_layer_scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(net, keep_prob=(1.0 - dropout))
_add_hidden_layer_summary(net, hidden_layer_scope.name)
with variable_scope.variable_scope(
"logits",
values=(net,)) as logits_scope:
logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[parent_scope],
scope=logits_scope)
_add_hidden_layer_summary(logits, logits_scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
gradient_multipliers=(
dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, parent_scope,
input_layer_scope.name)),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging training_loss.
summaries=[])
return head.create_model_fn_ops(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
class DNNClassifier(estimator.Estimator):
"""A classifier for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y represents label's class index).
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None):
"""Initializes a DNNClassifier instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
It must be greater than 1. Note: Class labels are integers representing
the class index (i.e. values from 0 to n_classes-1). For arbitrary
label values (e.g. string labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
Returns:
A `DNNClassifier` estimator.
Raises:
ValueError: If `n_classes` < 2.
"""
self._hidden_units = hidden_units
self._feature_columns = tuple(feature_columns or [])
self._enable_centered_bias = enable_centered_bias
super(DNNClassifier, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib._multi_class_head( # pylint: disable=protected-access
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units": hidden_units,
"feature_columns": self._feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_classes, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted classes. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_classes` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns classes.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_classes(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_classes(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key].reshape(-1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self,
x=None,
input_fn=None,
batch_size=None,
as_iterable=True):
"""Returns predicted probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = super(DNNClassifier, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return super(DNNClassifier, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
hiddenlayer_weights = [
self.get_variable_value("dnn/hiddenlayer_%d/weights" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_weights = [self.get_variable_value("dnn/logits/weights")]
return hiddenlayer_weights + logits_weights
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
hiddenlayer_bias = [
self.get_variable_value("dnn/hiddenlayer_%d/biases" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_bias = [self.get_variable_value("dnn/logits/biases")]
if self._enable_centered_bias:
centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)]
else:
centered_bias = []
return hiddenlayer_bias + logits_bias + centered_bias
class DNNRegressor(estimator.Estimator):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
label_dimension=1,
embedding_lr_multipliers=None,
input_layer_min_slice_size=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_dimension: Number of regression targets per example. This is the
size of the last dimension of the labels and logits `Tensor` objects
(typically, these have shape `[batch_size, label_dimension]`).
embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
input_layer_min_slice_size: Optional. The min slice size of input layer
partitions. If not provided, will use the default of 64M.
Returns:
A `DNNRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
super(DNNRegressor, self).__init__(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units": hidden_units,
"feature_columns": self._feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
"input_layer_min_slice_size": input_layer_min_slice_size,
},
feature_engineering_fn=feature_engineering_fn)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None):
"""See evaluable.Evaluable."""
# TODO(zakaria): remove once deprecation is finished (b/31229024)
custom_metrics = {}
if metrics:
for key, metric in six.iteritems(metrics):
if (not isinstance(metric, metric_spec.MetricSpec) and
not isinstance(key, tuple)):
custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric
else:
custom_metrics[key] = metric
return super(DNNRegressor, self).evaluate(
x=x,
y=y,
input_fn=input_fn,
feed_fn=feed_fn,
batch_size=batch_size,
steps=steps,
metrics=custom_metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
@deprecated_arg_values(
"2017-03-01",
"Please switch to predict_scores, or set `outputs` argument.",
outputs=None)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
By default, returns predicted scores. But this default will be dropped
soon. Users should either pass `outputs`, or call `predict_scores` method.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns scores.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
If `outputs` is set, returns a dict of predictions.
"""
if not outputs:
return self.predict_scores(
x=x,
input_fn=input_fn,
batch_size=batch_size,
as_iterable=as_iterable)
return super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=outputs,
as_iterable=as_iterable)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE,
estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_scores(self, x=None, input_fn=None, batch_size=None,
as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = super(DNNRegressor, self).predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(examples,
self._feature_columns)
return super(DNNRegressor, self).export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=signature_fn or export.regression_signature_fn,
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
| apache-2.0 |
cmelange/ansible | lib/ansible/modules/cloud/webfaction/webfaction_site.py | 6 | 7096 | #!/usr/bin/python
#
# Create Webfaction website using Ansible and the Webfaction API
#
# ------------------------------------------
#
# (c) Quentin Stafford-Fraser 2015
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_site
short_description: Add or remove a website on a Webfaction host
description:
- Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
- See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the website
required: true
state:
description:
- Whether the website should exist
required: false
choices: ['present', 'absent']
default: "present"
host:
description:
- The webfaction host on which the site should be created.
required: true
https:
description:
- Whether or not to use HTTPS
required: false
choices:
- true
- false
default: 'false'
site_apps:
description:
- A mapping of URLs to apps
required: false
subdomains:
description:
- A list of subdomains associated with this site.
required: false
default: null
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: create website
webfaction_site:
name: testsite1
state: present
host: myhost.webfaction.com
subdomains:
- 'testsite1.my_domain.org'
site_apps:
- ['testapp1', '/']
https: no
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
'''
import socket
import xmlrpclib
webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
host = dict(required=True),
https = dict(required=False, type='bool', default=False),
subdomains = dict(required=False, type='list', default=[]),
site_apps = dict(required=False, type='list', default=[]),
login_name = dict(required=True),
login_password = dict(required=True, no_log=True),
),
supports_check_mode=True
)
site_name = module.params['name']
site_state = module.params['state']
site_host = module.params['host']
site_ip = socket.gethostbyname(site_host)
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
site_list = webfaction.list_websites(session_id)
site_map = dict([(i['name'], i) for i in site_list])
existing_site = site_map.get(site_name)
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a site with this name already exist?
if existing_site:
# If yes, but it's on a different IP address, then fail.
# If we wanted to allow relocation, we could add a 'relocate=true' option
# which would get the existing IP address, delete the site there, and create it
# at the new address. A bit dangerous, perhaps, so for now we'll require manual
# deletion if it's on another host.
if existing_site['ip'] != site_ip:
module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
# If it's on this host and the key parameters are the same, nothing needs to be done.
if (existing_site['https'] == module.boolean(module.params['https'])) and \
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
module.exit_json(
changed = False
)
positional_args = [
session_id, site_name, site_ip,
module.boolean(module.params['https']),
module.params['subdomains'],
]
for a in module.params['site_apps']:
positional_args.append( (a[0], a[1]) )
if not module.check_mode:
# If this isn't a dry run, create or modify the site
result.update(
webfaction.create_website(
*positional_args
) if not existing_site else webfaction.update_website (
*positional_args
)
)
elif site_state == 'absent':
# If the site's already not there, nothing changed.
if not existing_site:
module.exit_json(
changed = False,
)
if not module.check_mode:
# If this isn't a dry run, delete the site
result.update(
webfaction.delete_website(session_id, site_name, site_ip)
)
else:
module.fail_json(msg="Unknown state specified: {}".format(site_state))
module.exit_json(
changed = True,
result = result
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
d2emon/generator-pack | src/genesys/generator/generator/name.py | 1 | 2378 | import random
from fixtures import generator_data
GENDER_NEUTRAL = 0
GENDER_MALE = 1
GENDER_FEMALE = 2
def random_generator(selector, generator_id=None, max_value=10):
if generator_id is None:
generator_id = random.randrange(max_value)
return selector(generator_id)
class Name:
glue = ""
def __init__(self, *items, generator=None):
self._items = items
self.generator = generator
@classmethod
def build_name(cls, items):
return cls.glue.join(items).title()
@property
def value(self):
return self.build_name(self._items)
@value.setter
def __set_name__(self, value):
self._items = value,
def __str__(self):
return self.value
def __repr__(self):
return str(self)
class TextGenerator:
block_id = ''
groups = ()
data = []
def __init__(self, data=None):
data = data or generator_data
self.data = data[self.block_id]
def generate_data(self, group_id=None):
if len(self.groups) < 1:
return self.data
if group_id is None:
group_id = random.choice(self.groups)
return self.data.get(group_id)
def generate(self, group_id=None, item_id=None, *args, **kwargs):
return Name(random.choice(self.generate_data(group_id)), generator=self)
def __iter__(self):
return self
def __next__(self):
return self.generate()
class NameGenerator(TextGenerator):
block_id = 'names'
name_group_id = 'aliens'
gender = GENDER_NEUTRAL
def __init__(self, data=None):
super().__init__(data)
self.data = self.data[self.name_group_id]
def generate(self, race_id=None, item_id=None, *args, **kwargs):
parts = (random.choice(part) for part in self.generate_data(race_id))
return Name(*parts, generator=self)
class ListNameGenerator(NameGenerator):
names = []
@classmethod
def select_names(cls, *args, **kwargs):
return cls.names
@classmethod
def generate(cls, gender=GENDER_NEUTRAL, *args, **kwargs):
return Name(random.choice(cls.select_names(gender=gender, *args, **kwargs)), cls)
class GenderListNameGenerator(ListNameGenerator):
names = dict()
@classmethod
def select_names(cls, gender=GENDER_NEUTRAL, *args, **kwargs):
return cls.names[gender]
| gpl-3.0 |
Avsecz/concise | concise/legacy/models.py | 2 | 4949 | """Template for models
"""
from keras.models import Model
from keras.optimizers import Adam
import keras.layers as kl
import keras.initializers as ki
import keras.regularizers as kr
# concise modules
from concise import initializers as ci
from concise import layers as cl
from concise.utils import PWM
# ### 'First' Concise architecture from Tensorflow
# Splines:
# - `spline_score = X_spline %*% spline_weights`
# - Transform:
# - `exp(spline_score)`
# - `spline_score + 1`
# Linear features:
# - `lm_feat = X_feat %*% feature_weights`
# Model:
# - conv2d, `padding = "valid", w = motif_base_weights`
# - activation: exp or relu, bias = motif_bias
# - elementwise_multiply: `hidden * spline_score`
# - pooling: max, sum or mean (accross the whole model)
# - Optionally: multiply by non-linear scaling factor (model fitting)
# - `pool_layer %*% motif_weights + X_feat %*% feature_weights + final_bias`
# - loss: mse
# - optimizer: Adam, optionally l-BFGS
# Regularization:
# - motif_base_weights, L1: motif_lamb
# - motif_weights, L1: lambd
# - spline_weights:
# - `diag(t(spline_weights) %*% S %*% spline_weights)`, L2_mean: spline_lamb
# - spline_weights, L2 / n_spline_tracks: spline_param_lamb
# convolution model
def single_layer_pos_effect(pooling_layer="sum", # 'sum', 'max' or 'mean'
nonlinearity="relu", # 'relu' or 'exp'
motif_length=9,
n_motifs=6, # number of filters
step_size=0.01,
num_tasks=1, # multi-task learning - 'trans'
n_covariates=0,
seq_length=100, # pre-defined sequence length
# splines
n_splines=None,
share_splines=False, # should the positional bias be shared across motifs
# regularization
lamb=1e-5, # overall motif coefficient regularization
motif_lamb=1e-5,
spline_lamb=1e-5,
spline_param_lamb=1e-5,
# initialization
init_motifs=None, # motifs to intialize
init_motif_bias=0,
init_sd_motif=1e-2,
init_sd_w=1e-3, # initial weight scale of feature w or motif w
**kwargs): # unused params
# initialize conv kernels to known motif pwm's
if init_motifs:
# WARNING - initialization is not the same as for Concise class
pwm_list = [PWM.from_consensus(motif) for motif in init_motifs]
kernel_initializer = ci.PWMKernelInitializer(pwm_list, stddev=init_sd_motif)
bias_initializer = ci.PWMBiasInitializer(pwm_list, kernel_size=motif_length)
else:
# kernel_initializer = "glorot_uniform"
kernel_initializer = ki.RandomNormal(stddev=init_sd_motif)
bias_initializer = ki.Constant(value=init_motif_bias)
activation = nonlinearity # supports 'relu' out-of-the-box
# define the model
# ----------------
inputs = []
seq_input = kl.Input((seq_length, 4))
inputs.append(seq_input)
# convolution
xseq = kl.Conv1D(filters=n_motifs, kernel_size=motif_length,
kernel_regularizer=kr.l1(l=motif_lamb), # Regularization
activation=activation,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer
)(seq_input)
# optional positional effect
if n_splines:
xseq = cl.GAMSmooth(n_bases=n_splines,
share_splines=share_splines,
l2_smooth=spline_lamb,
l2=spline_param_lamb,
)(xseq)
# pooling layer
if pooling_layer is "max":
xseq = kl.pooling.GlobalMaxPooling1D()(xseq)
elif pooling_layer is "mean":
xseq = kl.pooling.GlobalAveragePooling1D()(xseq)
elif pooling_layer is "sum":
xseq = cl.GlobalSumPooling1D()(xseq)
else:
raise ValueError("pooling_layer can only be 'sum', 'mean' or 'max'.")
# -----
# add covariates
if n_covariates:
cov_input = kl.Input((n_covariates, ))
inputs.append(cov_input)
x = kl.concatenate([xseq, cov_input])
else:
x = xseq
# -----
predictions = kl.Dense(units=num_tasks,
kernel_regularizer=kr.l1(lamb),
kernel_initializer=ki.RandomNormal(stddev=init_sd_w)
)(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=Adam(lr=step_size), loss="mse", metrics=["mse"])
return model
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/google/protobuf/internal/service_reflection_test.py | 75 | 5170 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.internal.service_reflection."""
__author__ = '[email protected] (Petar Petrov)'
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
from google.protobuf import unittest_pb2
from google.protobuf import service_reflection
from google.protobuf import service
class FooUnitTest(unittest.TestCase):
def testService(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request, response, callback):
self.method = method
self.controller = controller
self.request = request
callback(response)
class MockRpcController(service.RpcController):
def SetFailed(self, msg):
self.failure_message = msg
self.callback_response = None
class MyService(unittest_pb2.TestService):
pass
self.callback_response = None
def MyCallback(response):
self.callback_response = response
rpc_controller = MockRpcController()
channel = MockRpcChannel()
srvc = MyService()
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual('Method Foo not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
rpc_controller.failure_message = None
service_descriptor = unittest_pb2.TestService.GetDescriptor()
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual('Method Bar not implemented.',
rpc_controller.failure_message)
self.assertEqual(None, self.callback_response)
class MyServiceImpl(unittest_pb2.TestService):
def Foo(self, rpc_controller, request, done):
self.foo_called = True
def Bar(self, rpc_controller, request, done):
self.bar_called = True
srvc = MyServiceImpl()
rpc_controller.failure_message = None
srvc.Foo(rpc_controller, unittest_pb2.FooRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.foo_called)
rpc_controller.failure_message = None
srvc.CallMethod(service_descriptor.methods[1], rpc_controller,
unittest_pb2.BarRequest(), MyCallback)
self.assertEqual(None, rpc_controller.failure_message)
self.assertEqual(True, srvc.bar_called)
def testServiceStub(self):
class MockRpcChannel(service.RpcChannel):
def CallMethod(self, method, controller, request,
response_class, callback):
self.method = method
self.controller = controller
self.request = request
callback(response_class())
self.callback_response = None
def MyCallback(response):
self.callback_response = response
channel = MockRpcChannel()
stub = unittest_pb2.TestService_Stub(channel)
rpc_controller = 'controller'
request = 'request'
# GetDescriptor now static, still works as instance method for compatibility
self.assertEqual(unittest_pb2.TestService_Stub.GetDescriptor(),
stub.GetDescriptor())
# Invoke method.
stub.Foo(rpc_controller, request, MyCallback)
self.assertIsInstance(self.callback_response, unittest_pb2.FooResponse)
self.assertEqual(request, channel.request)
self.assertEqual(rpc_controller, channel.controller)
self.assertEqual(stub.GetDescriptor().methods[0], channel.method)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
blueburningcoder/nupic | ci/travis/deploy-wheel-to-s3.py | 34 | 1809 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
import boto
from boto.s3.key import Key
# This script assumes the following environment variables are set for boto:
# - AWS_ACCESS_KEY_ID
# - AWS_SECRET_ACCESS_KEY
REGION = "us-west-2"
BUCKET = "artifacts.numenta.org"
RELEASE_FOLDER = "numenta/nupic/releases"
def upload(artifactsBucket, wheelFileName, wheelPath):
key = Key(artifactsBucket)
key.key = "%s/%s" % (RELEASE_FOLDER, wheelFileName)
print "Uploading %s to %s/%s..." % (wheelFileName, BUCKET, RELEASE_FOLDER)
key.set_contents_from_filename(wheelPath)
def run(wheelPath):
wheelFileName = os.path.basename(wheelPath)
conn = boto.connect_s3()
artifactsBucket = conn.get_bucket(BUCKET)
upload(artifactsBucket, wheelFileName, wheelPath)
if __name__ == "__main__":
wheelPath = sys.argv[1]
run(wheelPath)
| agpl-3.0 |
tquizzle/Sick-Beard | sickbeard/clients/requests/utils.py | 204 | 17497 | # -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import os
import platform
import re
import sys
import zlib
from netrc import netrc, NetrcParseError
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import quote, urlparse, bytes, str, OrderedDict, urlunparse
from .cookies import RequestsCookieJar, cookiejar_from_dict
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
if hasattr(o, '__len__'):
return len(o)
if hasattr(o, 'len'):
return o.len
if hasattr(o, 'fileno'):
return os.fstat(o.fileno()).st_size
def get_netrc_auth(url):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
locations = (os.path.expanduser('~/{0}'.format(f)) for f in NETRC_FILES)
netrc_path = None
for loc in locations:
if os.path.exists(loc) and not netrc_path:
netrc_path = loc
# Abort early if there isn't one.
if netrc_path is None:
return netrc_path
ri = urlparse(url)
# Strip port numbers from netloc
host = ri.netloc.split(':')[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth
pass
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if name and name[0] != '<' and name[-1] != '>':
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, dict):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
return charset_re.findall(content)
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode('', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. every encodings from ``<meta ... charset=XXX>``
3. fall back and replace all unicode characters
"""
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
def stream_decompress(iterator, mode='gzip'):
"""Stream decodes an iterator over compressed data
:param iterator: An iterator over compressed data
:param mode: 'gzip' or 'deflate'
:return: An iterator over decompressed data
"""
if mode not in ['gzip', 'deflate']:
raise ValueError('stream_decompress mode must be gzip or deflate')
zlib_mode = 16 + zlib.MAX_WBITS if mode == 'gzip' else -zlib.MAX_WBITS
dec = zlib.decompressobj(zlib_mode)
try:
for chunk in iterator:
rv = dec.decompress(chunk)
if rv:
yield rv
except zlib.error:
# If there was an error decompressing, just return the raw chunk
yield chunk
# Continue to return the rest of the raw data
for chunk in iterator:
yield chunk
else:
# Make sure everything has been returned from the decompression object
buf = dec.decompress(bytes())
rv = buf + dec.flush()
if rv:
yield rv
def stream_untransfer(gen, resp):
ce = resp.headers.get('content-encoding', '').lower()
if 'gzip' in ce:
gen = stream_decompress(gen, mode='gzip')
elif 'deflate' in ce:
gen = stream_decompress(gen, mode='deflate')
return gen
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
c = chr(int(h, 16))
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved, unreserved,
# or '%')
return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
def get_environ_proxies(url):
"""Return a dict of environment proxies."""
proxy_keys = [
'all',
'http',
'https',
'ftp',
'socks'
]
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = no_proxy.split(',')
netloc = urlparse(url).netloc
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return {}
# If we get here, we either didn't have no_proxy set or we're not going
# anywhere that no_proxy applies to.
proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys]
return dict([(key, val) for (key, val) in proxies if val])
def default_user_agent():
"""Return a string representing the default user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['python-requests/%s' % __version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def default_headers():
return {
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')),
'Accept': '*/*'
}
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
"""
links = []
replace_chars = " '\""
for val in value.split(","):
try:
url, params = val.split(";", 1)
except ValueError:
url, params = val, ''
link = {}
link["url"] = url.strip("<> '\"")
for param in params.split(";"):
try:
key, value = param.split("=")
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
'''Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.'''
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password."""
if url:
parsed = urlparse(url)
return (parsed.username, parsed.password)
else:
return ('', '')
| gpl-3.0 |
skuicloud/chef-openstack-cookbooks | cookbooks/openstack-image/files/default/glance_plugin.py | 10 | 4858 | from glance.client import V1Client
from glance.common import exception
import collectd
global NAME, OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL, OS_AUTH_STRATEGY, VERBOSE_LOGGING
NAME = "glance_plugin"
OS_USERNAME = "username"
OS_PASSWORD = "password"
OS_TENANT_NAME = "tenantname"
OS_AUTH_URL = "http://localhost:5000/v2.0"
OS_AUTH_STRATEGY = "keystone"
VERBOSE_LOGGING = False
def get_stats(user, passwd, tenant, url, host=None):
creds = {"username": user, "password": passwd, "tenant": tenant,"auth_url": url, "strategy": OS_AUTH_STRATEGY}
client = V1Client(host,creds=creds)
try:
image_list = client.get_images_detailed()
except exception.NotAuthenticated:
msg = "Client credentials appear to be invalid"
raise exception.ClientConnectionError(msg)
else:
# TODO(shep): this needs to be rewritten more inline with the keystone|nova plugins
data = dict()
data["count"] = int(len(image_list))
data["bytes"] = 0
data["snapshot.count"] = 0
data["snapshot.bytes"] = 0
data["tenant"] = dict()
for image in image_list:
data["bytes"] += int(image["size"])
if "image_type" in image["properties"] and image["properties"]["image_type"] == "snapshot":
data["snapshot.count"] += 1
data["snapshot.bytes"] += int(image["size"])
uuid = str(image["owner"])
if uuid in data["tenant"]:
data["tenant"][uuid]["count"] += 1
data["tenant"][uuid]["bytes"] += int(image["size"])
if "image_type" in image["properties"] and image["properties"]["image_type"] == "snapshot":
data["tenant"][uuid]["snapshot.count"] += 1
data["tenant"][uuid]["snapshot.bytes"] += int(image["size"])
else:
data["tenant"][uuid] = dict()
data["tenant"][uuid]["count"] = 1
data["tenant"][uuid]["bytes"] = int(image["size"])
data["tenant"][uuid]["snapshot.count"] = 0
data["tenant"][uuid]["snapshot.bytes"] = 0
if "image_type" in image["properties"] and image["properties"]["image_type"] == "snapshot":
data["tenant"][uuid]["snapshot.count"] += 1
data["tenant"][uuid]["snapshot.bytes"] += int(image["size"])
# debug
#for key in data.keys():
# if key == "tenant":
# for uuid in data[key].keys():
# for field in data[key][uuid]:
# print "glance.images.tenant.%s.%s : %i" % (uuid, field, data[key][uuid][field])
# else:
# print "glance.images.%s : %i" % (key, data[key])
##########
return data
def configure_callback(conf):
"""Received configuration information"""
global OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL
for node in conf.children:
if node.key == "Username":
OS_USERNAME = node.values[0]
elif node.key == "Password":
OS_PASSWORD = node.values[0]
elif node.key == "TenantName":
OS_TENANT_NAME = node.values[0]
elif node.key == "AuthURL":
OS_AUTH_URL = node.values[0]
elif node.key == "Verbose":
VERBOSE_LOGGING = node.values[0]
else:
logger("warn", "Unknown config key: %s" % node.key)
def read_callback():
logger("verb", "read_callback")
info = get_stats(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL)
if not info:
logger("err", "No information received")
return
for key in info.keys():
if key == "tenant":
for uuid in info[key].keys():
for field in info[key][uuid]:
logger('verb', 'Dispatching glance.images.tenant.%s.%s : %i' % (uuid, field, int(info[key][uuid][field])))
path = 'glance.images.%s.%s' % (uuid, field)
val = collectd.Values(plugin=path)
val.type = 'gauge'
val.values = [int(info[key][uuid][field])]
val.dispatch()
else:
logger('verb', 'Dispatching %s : %i' % (key, int(info[key])))
path = 'glance.images.%s' % (key)
val = collectd.Values(plugin=path)
val.type = 'gauge'
val.values = [int(info[key])]
val.dispatch()
def logger(t, msg):
if t == 'err':
collectd.error('%s: %s' % (NAME, msg))
if t == 'warn':
collectd.warning('%s: %s' % (NAME, msg))
elif t == 'verb' and VERBOSE_LOGGING == True:
collectd.info('%s: %s' % (NAME, msg))
collectd.register_config(configure_callback)
collectd.warning("Initializing glance plugin")
collectd.register_read(read_callback)
| apache-2.0 |
40223243/40223243w17 | static/Brython3.1.1-20150328-091302/Lib/_dummy_thread.py | 742 | 4769 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import _thread
except ImportError:
import _dummy_thread as _thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
# A dummy value
TIMEOUT_MAX = 2**31
# NOTE: this module can be imported early in the extension building process,
# and so top level imports of other modules should be avoided. Instead, all
# imports are done when needed on a function-by-function basis. Since threads
# are disabled, the import lock should not be an issue anyway (??).
error = RuntimeError
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of _thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by _thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
import traceback
traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of _thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of _thread.get_ident().
Since this module should only be used when _threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of _thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of _thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of _thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the _thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None, timeout=-1):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
if timeout > 0:
import time
time.sleep(timeout)
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
| gpl-3.0 |
BeATz-UnKNoWN/python-for-android | python-modules/twisted/twisted/mail/topfiles/setup.py | 54 | 1918 | # Copyright (c) 2008 Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
try:
from twisted.python import dist
except ImportError:
raise SystemExit("twisted.python.dist module not found. Make sure you "
"have installed the Twisted core package before "
"attempting to install any other Twisted projects.")
if __name__ == '__main__':
if sys.version_info[:2] >= (2, 4):
extraMeta = dict(
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: No Input/Output (Daemon)",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Topic :: Communications :: Email :: Post-Office :: IMAP",
"Topic :: Communications :: Email :: Post-Office :: POP3",
"Topic :: Software Development :: Libraries :: Python Modules",
])
else:
extraMeta = {}
dist.setup(
twisted_subproject="mail",
scripts=dist.getScripts("mail"),
# metadata
name="Twisted Mail",
description="A Twisted Mail library, server and client.",
author="Twisted Matrix Laboratories",
author_email="[email protected]",
maintainer="Jp Calderone",
url="http://twistedmatrix.com/trac/wiki/TwistedMail",
license="MIT",
long_description="""\
An SMTP, IMAP and POP protocol implementation together with clients
and servers.
Twisted Mail contains high-level, efficient protocol implementations
for both clients and servers of SMTP, POP3, and IMAP4. Additionally,
it contains an "out of the box" combination SMTP/POP3 virtual-hosting
mail server. Also included is a read/write Maildir implementation and
a basic Mail Exchange calculator.
""",
**extraMeta)
| apache-2.0 |
dingocuster/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Andreas Mueller <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
vitaly-krugl/nupic | src/nupic/data/generators/data_generator.py | 10 | 16250 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import random as rand
from nupic.encoders import adaptive_scalar, sdr_category, date
from nupic.bindings.math import GetNTAReal
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.generators.distributions import *
realDType = GetNTAReal()
class DataGenerator():
"""The DataGenerator provides a framework for generating, encoding, saving
and exporting records. Each column of the output contains records with a
specific set of parameters such as encoderType, n, w, etc. This interface
is intended to be used for testing the spatial pooler, temporal memory and
for generating artificial datasets.
"""
def __init__(self, name='testDataset', seed=42, verbosity=0):
"""Initialize the dataset generator with a random seed and a name"""
self.name=name
self.verbosity=verbosity
self.setSeed(seed)
self.fields=[]
def getDescription(self):
"""Returns a description of the dataset"""
description = {'name':self.name, 'fields':[f.name for f in self.fields], \
'numRecords by field':[f.numRecords for f in self.fields]}
return description
def setSeed(self, seed):
"""Set the random seed and the numpy seed
Parameters:
--------------------------------------------------------------------
seed: random seed
"""
rand.seed(seed)
np.random.seed(seed)
def addField(self, name, fieldParams, encoderParams):
"""Add a single field to the dataset.
Parameters:
-------------------------------------------------------------------
name: The user-specified name of the field
fieldSpec: A list of one or more dictionaries specifying parameters
to be used for dataClass initialization. Each dict must
contain the key 'type' that specifies a distribution for
the values in this field
encoderParams: Parameters for the field encoder
"""
assert fieldParams is not None and'type' in fieldParams
dataClassName = fieldParams.pop('type')
try:
dataClass=eval(dataClassName)(fieldParams)
except TypeError, e:
print ("#### Error in constructing %s class object. Possibly missing "
"some required constructor parameters. Parameters "
"that were provided are: %s" % (dataClass, fieldParams))
raise
encoderParams['dataClass']=dataClass
encoderParams['dataClassName']=dataClassName
fieldIndex = self.defineField(name, encoderParams)
def addMultipleFields(self, fieldsInfo):
"""Add multiple fields to the dataset.
Parameters:
-------------------------------------------------------------------
fieldsInfo: A list of dictionaries, containing a field name, specs for
the data classes and encoder params for the corresponding
field.
"""
assert all(x in field for x in ['name', 'fieldSpec', 'encoderParams'] for field \
in fieldsInfo)
for spec in fieldsInfo:
self.addField(spec.pop('name'), spec.pop('fieldSpec'), spec.pop('encoderParams'))
def defineField(self, name, encoderParams=None):
"""Initialize field using relevant encoder parameters.
Parameters:
-------------------------------------------------------------------
name: Field name
encoderParams: Parameters for the encoder.
Returns the index of the field
"""
self.fields.append(_field(name, encoderParams))
return len(self.fields)-1
def setFlag(self, index, flag):
"""Set flag for field at index. Flags are special characters such as 'S' for
sequence or 'T' for timestamp.
Parameters:
--------------------------------------------------------------------
index: index of field whose flag is being set
flag: special character
"""
assert len(self.fields)>index
self.fields[index].flag=flag
def generateRecord(self, record):
"""Generate a record. Each value is stored in its respective field.
Parameters:
--------------------------------------------------------------------
record: A 1-D array containing as many values as the number of fields
fields: An object of the class field that specifies the characteristics
of each value in the record
Assertion:
--------------------------------------------------------------------
len(record)==len(fields): A value for each field must be specified.
Replace missing values of any type by
SENTINEL_VALUE_FOR_MISSING_DATA
This method supports external classes but not combination of classes.
"""
assert(len(record)==len(self.fields))
if record is not None:
for x in range(len(self.fields)):
self.fields[x].addValue(record[x])
else:
for field in self.fields:
field.addValue(field.dataClass.getNext())
def generateRecords(self, records):
"""Generate multiple records. Refer to definition for generateRecord"""
if self.verbosity>0: print 'Generating', len(records), 'records...'
for record in records:
self.generateRecord(record)
def getRecord(self, n=None):
"""Returns the nth record"""
if n is None:
assert len(self.fields)>0
n = self.fields[0].numRecords-1
assert (all(field.numRecords>n for field in self.fields))
record = [field.values[n] for field in self.fields]
return record
def getAllRecords(self):
"""Returns all the records"""
values=[]
numRecords = self.fields[0].numRecords
assert (all(field.numRecords==numRecords for field in self.fields))
for x in range(numRecords):
values.append(self.getRecord(x))
return values
def encodeRecord(self, record, toBeAdded=True):
"""Encode a record as a sparse distributed representation
Parameters:
--------------------------------------------------------------------
record: Record to be encoded
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields
"""
encoding=[self.fields[i].encodeValue(record[i], toBeAdded) for i in \
xrange(len(self.fields))]
return encoding
def encodeAllRecords(self, records=None, toBeAdded=True):
"""Encodes a list of records.
Parameters:
--------------------------------------------------------------------
records: One or more records. (i,j)th element of this 2D array
specifies the value at field j of record i.
If unspecified, records previously generated and stored are
used.
toBeAdded: Whether the encodings corresponding to the record are added to
the corresponding fields
"""
if records is None:
records = self.getAllRecords()
if self.verbosity>0: print 'Encoding', len(records), 'records.'
encodings = [self.encodeRecord(record, toBeAdded) for record in records]
return encodings
def addValueToField(self, i, value=None):
"""Add 'value' to the field i.
Parameters:
--------------------------------------------------------------------
value: value to be added
i: value is added to field i
"""
assert(len(self.fields)>i)
if value is None:
value = self.fields[i].dataClass.getNext()
self.fields[i].addValue(value)
return value
else: self.fields[i].addValue(value)
def addValuesToField(self, i, numValues):
"""Add values to the field i."""
assert(len(self.fields)>i)
values = [self.addValueToField(i) for n in range(numValues)]
return values
def getSDRforValue(self, i, j):
"""Returns the sdr for jth value at column i"""
assert len(self.fields)>i
assert self.fields[i].numRecords>j
encoding = self.fields[i].encodings[j]
return encoding
def getZeroedOutEncoding(self, n):
"""Returns the nth encoding with the predictedField zeroed out"""
assert all(field.numRecords>n for field in self.fields)
encoding = np.concatenate([field.encoder.encode(SENTINEL_VALUE_FOR_MISSING_DATA)\
if field.isPredictedField else field.encodings[n] for field in self.fields])
return encoding
def getTotaln(self):
"""Returns the cumulative n for all the fields in the dataset"""
n = sum([field.n for field in self.fields])
return n
def getTotalw(self):
"""Returns the cumulative w for all the fields in the dataset"""
w = sum([field.w for field in self.fields])
return w
def getEncoding(self, n):
"""Returns the nth encoding"""
assert (all(field.numEncodings>n for field in self.fields))
encoding = np.concatenate([field.encodings[n] for field in self.fields])
return encoding
def getAllEncodings(self):
"""Returns encodings for all the records"""
numEncodings=self.fields[0].numEncodings
assert (all(field.numEncodings==numEncodings for field in self.fields))
encodings = [self.getEncoding(index) for index in range(numEncodings)]
return encodings
def getAllFieldNames(self):
"""Returns all field names"""
names = [field.name for field in self.fields]
return names
def getAllFlags(self):
"""Returns flags for all fields"""
flags = [field.flag for field in self.fields]
return flags
def getAllDataTypes(self):
"""Returns data types for all fields"""
dataTypes = [field.dataType for field in self.fields]
return dataTypes
def getFieldDescriptions(self):
"""Returns descriptions for all fields"""
descriptions = [field.getDescription() for field in self.fields]
return descriptions
def saveRecords(self, path='myOutput'):
"""Export all the records into a csv file in numenta format.
Example header format:
fieldName1 fieldName2 fieldName3
date string float
T S
Parameters:
--------------------------------------------------------------------
path: Relative path of the file to which the records are to be exported
"""
numRecords = self.fields[0].numRecords
assert (all(field.numRecords==numRecords for field in self.fields))
import csv
with open(path+'.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(self.getAllFieldNames())
writer.writerow(self.getAllDataTypes())
writer.writerow(self.getAllFlags())
writer.writerows(self.getAllRecords())
if self.verbosity>0:
print '******', numRecords,'records exported in numenta format to file:',\
path,'******\n'
def removeAllRecords(self):
"""Deletes all the values in the dataset"""
for field in self.fields:
field.encodings, field.values=[], []
field.numRecords, field.numEncodings= (0, 0)
class _field():
def __init__(self, name, encoderSpec):
"""Initialize a field with various parameters such as n, w, flag, dataType,
encoderType, and tag predicted field."""
self.name=name
#Default values
self.n, self.w = (100, 15)
self.encoderType,self.dataType,self.dataClassName = (None, None, None)
self.flag=''
self.isPredictedField=False
if encoderSpec is not None:
if 'n' in encoderSpec: self.n = encoderSpec.pop('n')
if 'w' in encoderSpec: self.w = encoderSpec.pop('w')
if 'flag' in encoderSpec: self.flag = encoderSpec.pop('flag')
if 'isPredictedField' in encoderSpec: self.isPredictedField\
= encoderSpec.pop('isPredictedField')
if 'dataClass' in encoderSpec: self.dataClass \
= encoderSpec.pop('dataClass')
if 'dataClassName' in encoderSpec: self.dataClassName \
= encoderSpec.pop('dataClassName')
if 'dataType' in encoderSpec: self.dataType = encoderSpec.pop('dataType')
if 'encoderType' in encoderSpec: self.encoderType \
= encoderSpec.pop('encoderType')
# ==========================================================================
# Setting up the encoders
if self.dataType is None and self.encoderType is None:
raise RuntimeError('At least one of dataType and encoderType must be specified')
assert(self.dataType is not None or self.encoderType is not None)
if self.dataType is None or self.encoderType is None:
self._setTypes(encoderSpec)
self._initializeEncoders(encoderSpec)
self.encodings=[]
self.values=[]
self.numRecords=0
self.numEncodings=0
def getDescription(self):
description = dict(n=self.n, w=self.w, flag=self.flag, isPredictedField=\
self.isPredictedField, dataClass=self.dataClassName, encoderType= \
self.encoderType, numRecords=self.numRecords, numEncodings=self.numEncodings)
return description
def addValues(self, values):
"""Add values to the field"""
for v in values:
self.addValue(v)
def addValue(self, value):
"""Add value to the field"""
self.values.append(value)
self.numRecords+=1
def encodeValue(self, value, toBeAdded=True):
"""Value is encoded as a sdr using the encoding parameters of the Field"""
encodedValue = np.array(self.encoder.encode(value), dtype=realDType)
if toBeAdded:
self.encodings.append(encodedValue)
self.numEncodings+=1
return encodedValue
def _setTypes(self, encoderSpec):
"""Set up the dataTypes and initialize encoders"""
if self.encoderType is None:
if self.dataType in ['int','float']:
self.encoderType='adaptiveScalar'
elif self.dataType=='string':
self.encoderType='category'
elif self.dataType in ['date', 'datetime']:
self.encoderType='date'
if self.dataType is None:
if self.encoderType in ['scalar','adaptiveScalar']:
self.dataType='float'
elif self.encoderType in ['category', 'enumeration']:
self.dataType='string'
elif self.encoderType in ['date', 'datetime']:
self.dataType='datetime'
def _initializeEncoders(self, encoderSpec):
""" Initialize the encoders"""
#Initializing scalar encoder
if self.encoderType in ['adaptiveScalar', 'scalar']:
if 'minval' in encoderSpec:
self.minval = encoderSpec.pop('minval')
else: self.minval=None
if 'maxval' in encoderSpec:
self.maxval = encoderSpec.pop('maxval')
else: self.maxval = None
self.encoder=adaptive_scalar.AdaptiveScalarEncoder(name='AdaptiveScalarEncoder', \
w=self.w, n=self.n, minval=self.minval, maxval=self.maxval, periodic=False, forced=True)
#Initializing category encoder
elif self.encoderType=='category':
self.encoder=sdr_category.SDRCategoryEncoder(name='categoryEncoder', \
w=self.w, n=self.n)
#Initializing date encoder
elif self.encoderType in ['date', 'datetime']:
self.encoder=date.DateEncoder(name='dateEncoder')
else:
raise RuntimeError('Error in constructing class object. Either encoder type'
'or dataType must be specified')
| agpl-3.0 |
campbe13/openhatch | vendor/packages/kombu/kombu/transport/virtual/exchange.py | 33 | 4580 | """
kombu.transport.virtual.exchange
================================
Implementations of the standard exchanges defined
by the AMQ protocol (excluding the `headers` exchange).
"""
from __future__ import absolute_import
from kombu.utils import escape_regex
import re
class ExchangeType(object):
"""Implements the specifics for an exchange type.
:param channel: AMQ Channel
"""
type = None
def __init__(self, channel):
self.channel = channel
def lookup(self, table, exchange, routing_key, default):
"""Lookup all queues matching `routing_key` in `exchange`.
:returns: `default` if no queues matched.
"""
raise NotImplementedError('subclass responsibility')
def prepare_bind(self, queue, exchange, routing_key, arguments):
"""Return tuple of `(routing_key, regex, queue)` to be stored
for bindings to this exchange."""
return routing_key, None, queue
def equivalent(self, prev, exchange, type,
durable, auto_delete, arguments):
"""Return true if `prev` and `exchange` is equivalent."""
return (type == prev['type'] and
durable == prev['durable'] and
auto_delete == prev['auto_delete'] and
(arguments or {}) == (prev['arguments'] or {}))
class DirectExchange(ExchangeType):
"""The `direct` exchange routes based on exact routing keys."""
type = 'direct'
def lookup(self, table, exchange, routing_key, default):
return [queue for rkey, _, queue in table
if rkey == routing_key]
def deliver(self, message, exchange, routing_key, **kwargs):
_lookup = self.channel._lookup
_put = self.channel._put
for queue in _lookup(exchange, routing_key):
_put(queue, message, **kwargs)
class TopicExchange(ExchangeType):
"""The `topic` exchange routes messages based on words separated by
dots, using wildcard characters ``*`` (any single word), and ``#``
(one or more words)."""
type = 'topic'
#: map of wildcard to regex conversions
wildcards = {'*': r'.*?[^\.]',
'#': r'.*?'}
#: compiled regex cache
_compiled = {}
def lookup(self, table, exchange, routing_key, default):
return [queue for rkey, pattern, queue in table
if self._match(pattern, routing_key)]
def deliver(self, message, exchange, routing_key, **kwargs):
_lookup = self.channel._lookup
_put = self.channel._put
deadletter = self.channel.deadletter_queue
for queue in [q for q in _lookup(exchange, routing_key)
if q and q != deadletter]:
_put(queue, message, **kwargs)
def prepare_bind(self, queue, exchange, routing_key, arguments):
return routing_key, self.key_to_pattern(routing_key), queue
def key_to_pattern(self, rkey):
"""Get the corresponding regex for any routing key."""
return '^%s$' % ('\.'.join(
self.wildcards.get(word, word)
for word in escape_regex(rkey, '.#*').split('.')
))
def _match(self, pattern, string):
"""Same as :func:`re.match`, except the regex is compiled and cached,
then reused on subsequent matches with the same pattern."""
try:
compiled = self._compiled[pattern]
except KeyError:
compiled = self._compiled[pattern] = re.compile(pattern, re.U)
return compiled.match(string)
class FanoutExchange(ExchangeType):
"""The `fanout` exchange implements broadcast messaging by delivering
copies of all messages to all queues bound to the exchange.
To support fanout the virtual channel needs to store the table
as shared state. This requires that the `Channel.supports_fanout`
attribute is set to true, and the `Channel._queue_bind` and
`Channel.get_table` methods are implemented. See the redis backend
for an example implementation of these methods.
"""
type = 'fanout'
def lookup(self, table, exchange, routing_key, default):
return [queue for _, _, queue in table]
def deliver(self, message, exchange, routing_key, **kwargs):
if self.channel.supports_fanout:
self.channel._put_fanout(
exchange, message, routing_key, **kwargs)
#: Map of standard exchange types and corresponding classes.
STANDARD_EXCHANGE_TYPES = {'direct': DirectExchange,
'topic': TopicExchange,
'fanout': FanoutExchange}
| agpl-3.0 |
umlaeute/tto-oracle | OracleClient.py | 1 | 1756 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2015, IOhannes m zmölnig, forum::für::umläute
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, see <http://www.gnu.org/licenses/>.
import requests
import json
if '__main__' == __name__:
def test_json(payload):
print("payload: %s" % (payload))
j=json.dumps(payload)
print("JSON : %s" % (j))
b=bytes(j, 'utf-8')
print("bytes : %s" % (b))
J=json.loads(b.decode())
print("JSON : %s" % (J))
D=J['comments']
print("data : %s (%s)" % (D, type(D)))
URL='http://localhost:8000'
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', type=str, default=URL, help='connection URL for oracle server (default: %s)' % URL)
parser.add_argument('text', nargs='+', help='some text you want to enter')
args=parser.parse_args()
payload={'comments': args.text}
#test_json(payload)
r = requests.post(args.url, data=json.dumps(payload))
print(r.text)
#t=o.speak(inputtext="The artist is stupid!", nouns=["oracle", "situtation"], adjectives=["solid", "nice"], truncate=True)
#print(ot.array2text(t))
| gpl-2.0 |
kseistrup/qtile | test/scripts/window.py | 16 | 3845 | # Copyright (c) 2011 Florian Mounier
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#!/usr/bin/env python
"""
This program is carefully crafted to exercise a number of corner-cases in
Qtile.
"""
from __future__ import print_function
import sys
import time
import xcffib
import xcffib.xproto
def configure(window):
window.configure(
width=100,
height=100,
x=0,
y=0,
border_width=1,
)
for i in range(20):
try:
conn = xcffib.connect(display=sys.argv[1])
except xcffib.ConnectionException:
time.sleep(0.1)
continue
except Exception as v:
print("Error opening test window: ", type(v), v, file=sys.stderr)
sys.exit(1)
break
else:
print("Could not open window on display %s" % (sys.argv[1]), file=sys.stderr)
sys.exit(1)
screen = conn.get_setup().roots[conn.pref_screen]
window = conn.generate_id()
background = conn.core.AllocColor(screen.default_colormap, 0x2828, 0x8383, 0xCECE).reply().pixel # Color "#2883ce"
conn.core.CreateWindow(xcffib.CopyFromParent, window, screen.root,
100, 100, 100, 100, 1,
xcffib.xproto.WindowClass.InputOutput, screen.root_visual,
xcffib.xproto.CW.BackPixel | xcffib.xproto.CW.EventMask,
[background, xcffib.xproto.EventMask.StructureNotify | xcffib.xproto.EventMask.Exposure])
conn.core.ChangeProperty(xcffib.xproto.PropMode.Replace,
window, xcffib.xproto.Atom.WM_NAME,
xcffib.xproto.Atom.STRING, 8, len(sys.argv[2]),
sys.argv[2])
wm_protocols = "WM_PROTOCOLS"
wm_protocols = conn.core.InternAtom(0, len(wm_protocols), wm_protocols).reply().atom
wm_delete_window = "WM_DELETE_WINDOW"
wm_delete_window = conn.core.InternAtom(0, len(wm_delete_window), wm_delete_window).reply().atom
conn.core.ChangeProperty(xcffib.xproto.PropMode.Replace,
window, wm_protocols,
xcffib.xproto.Atom.ATOM, 32, 1,
[wm_delete_window])
conn.core.ConfigureWindow(window,
xcffib.xproto.ConfigWindow.X | xcffib.xproto.ConfigWindow.Y |
xcffib.xproto.ConfigWindow.Width | xcffib.xproto.ConfigWindow.Height |
xcffib.xproto.ConfigWindow.BorderWidth,
[0, 0, 100, 100, 1])
conn.core.MapWindow(window)
conn.flush()
conn.core.ConfigureWindow(window,
xcffib.xproto.ConfigWindow.X | xcffib.xproto.ConfigWindow.Y |
xcffib.xproto.ConfigWindow.Width | xcffib.xproto.ConfigWindow.Height |
xcffib.xproto.ConfigWindow.BorderWidth,
[0, 0, 100, 100, 1])
try:
while 1:
event = conn.wait_for_event()
if event.__class__ == xcffib.xproto.ClientMessageEvent:
if conn.core.GetAtomName(event.data.data32[0]).reply().name.to_string() == "WM_DELETE_WINDOW":
sys.exit(1)
except xcffib.XcffibException:
pass
| mit |
KyoungRan/Django_React_ex | Django_React_Workshop-mbrochh/django/myvenv/lib/python3.4/site-packages/pip/_vendor/lockfile/pidlockfile.py | 536 | 6090 | # -*- coding: utf-8 -*-
# pidlockfile.py
#
# Copyright © 2008–2009 Ben Finney <[email protected]>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
import errno
import os
import time
from . import (LockBase, AlreadyLocked, LockFailed, NotLocked, NotMyLock,
LockTimeout)
class PIDLockFile(LockBase):
""" Lockfile implemented as a Unix PID file.
The lock file is a normal file named by the attribute `path`.
A lock's PID file contains a single line of text, containing
the process ID (PID) of the process that acquired the lock.
>>> lock = PIDLockFile('somefile')
>>> lock = PIDLockFile('somefile')
"""
def __init__(self, path, threaded=False, timeout=None):
# pid lockfiles don't support threaded operation, so always force
# False as the threaded arg.
LockBase.__init__(self, path, False, timeout)
self.unique_name = self.path
def read_pid(self):
""" Get the PID from the lock file.
"""
return read_pid_from_pidfile(self.path)
def is_locked(self):
""" Test if the lock is currently held.
The lock is held if the PID file for this lock exists.
"""
return os.path.exists(self.path)
def i_am_locking(self):
""" Test if the lock is held by the current process.
Returns ``True`` if the current process ID matches the
number stored in the PID file.
"""
return self.is_locked() and os.getpid() == self.read_pid()
def acquire(self, timeout=None):
""" Acquire the lock.
Creates the PID file for this lock, or raises an error if
the lock could not be acquired.
"""
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
try:
write_pid_to_pidfile(self.path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# The lock creation failed. Maybe sleep a bit.
if time.time() > end_time:
if timeout is not None and timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout / 10 or 0.1)
else:
raise LockFailed("failed to create %s" % self.path)
else:
return
def release(self):
""" Release the lock.
Removes the PID file to release the lock, or raises an
error if the current process does not hold the lock.
"""
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
remove_existing_pidfile(self.path)
def break_lock(self):
""" Break an existing lock.
Removes the PID file if it already exists, otherwise does
nothing.
"""
remove_existing_pidfile(self.path)
def read_pid_from_pidfile(pidfile_path):
""" Read the PID recorded in the named PID file.
Read and return the numeric PID recorded as text in the named
PID file. If the PID file cannot be read, or if the content is
not a valid PID, return ``None``.
"""
pid = None
try:
pidfile = open(pidfile_path, 'r')
except IOError:
pass
else:
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character.
#
# Programs that read PID files should be somewhat flexible
# in what they accept; i.e., they should ignore extra
# whitespace, leading zeroes, absence of the trailing
# newline, or additional lines in the PID file.
line = pidfile.readline().strip()
try:
pid = int(line)
except ValueError:
pass
pidfile.close()
return pid
def write_pid_to_pidfile(pidfile_path):
""" Write the PID in the named PID file.
Get the numeric process ID (“PID”) of the current process
and write it to the named file as a line of text.
"""
open_flags = (os.O_CREAT | os.O_EXCL | os.O_WRONLY)
open_mode = 0o644
pidfile_fd = os.open(pidfile_path, open_flags, open_mode)
pidfile = os.fdopen(pidfile_fd, 'w')
# According to the FHS 2.3 section on PID files in /var/run:
#
# The file must consist of the process identifier in
# ASCII-encoded decimal, followed by a newline character. For
# example, if crond was process number 25, /var/run/crond.pid
# would contain three characters: two, five, and newline.
pid = os.getpid()
pidfile.write("%s\n" % pid)
pidfile.close()
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
| mit |
Elandril/SickRage | lib/synchronousdeluge/rencode.py | 156 | 12982 |
"""
rencode -- Web safe object pickling/unpickling.
Public domain, Connelly Barnes 2006-2007.
The rencode module is a modified version of bencode from the
BitTorrent project. For complex, heterogeneous data structures with
many small elements, r-encodings take up significantly less space than
b-encodings:
>>> len(rencode.dumps({'a':0, 'b':[1,2], 'c':99}))
13
>>> len(bencode.bencode({'a':0, 'b':[1,2], 'c':99}))
26
The rencode format is not standardized, and may change with different
rencode module versions, so you should check that you are using the
same rencode version throughout your project.
"""
__version__ = '1.0.1'
__all__ = ['dumps', 'loads']
# Original bencode module by Petru Paler, et al.
#
# Modifications by Connelly Barnes:
#
# - Added support for floats (sent as 32-bit or 64-bit in network
# order), bools, None.
# - Allowed dict keys to be of any serializable type.
# - Lists/tuples are always decoded as tuples (thus, tuples can be
# used as dict keys).
# - Embedded extra information in the 'typecodes' to save some space.
# - Added a restriction on integer length, so that malicious hosts
# cannot pass us large integers which take a long time to decode.
#
# Licensed by Bram Cohen under the "MIT license":
#
# "Copyright (C) 2001-2002 Bram Cohen
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# The Software is provided "AS IS", without warranty of any kind,
# express or implied, including but not limited to the warranties of
# merchantability, fitness for a particular purpose and
# noninfringement. In no event shall the authors or copyright holders
# be liable for any claim, damages or other liability, whether in an
# action of contract, tort or otherwise, arising from, out of or in
# connection with the Software or the use or other dealings in the
# Software."
#
# (The rencode module is licensed under the above license as well).
#
import struct
import string
from threading import Lock
# Default number of bits for serialized floats, either 32 or 64 (also a parameter for dumps()).
DEFAULT_FLOAT_BITS = 32
# Maximum length of integer when written as base 10 string.
MAX_INT_LENGTH = 64
# The bencode 'typecodes' such as i, d, etc have been extended and
# relocated on the base-256 character set.
CHR_LIST = chr(59)
CHR_DICT = chr(60)
CHR_INT = chr(61)
CHR_INT1 = chr(62)
CHR_INT2 = chr(63)
CHR_INT4 = chr(64)
CHR_INT8 = chr(65)
CHR_FLOAT32 = chr(66)
CHR_FLOAT64 = chr(44)
CHR_TRUE = chr(67)
CHR_FALSE = chr(68)
CHR_NONE = chr(69)
CHR_TERM = chr(127)
# Positive integers with value embedded in typecode.
INT_POS_FIXED_START = 0
INT_POS_FIXED_COUNT = 44
# Dictionaries with length embedded in typecode.
DICT_FIXED_START = 102
DICT_FIXED_COUNT = 25
# Negative integers with value embedded in typecode.
INT_NEG_FIXED_START = 70
INT_NEG_FIXED_COUNT = 32
# Strings with length embedded in typecode.
STR_FIXED_START = 128
STR_FIXED_COUNT = 64
# Lists with length embedded in typecode.
LIST_FIXED_START = STR_FIXED_START+STR_FIXED_COUNT
LIST_FIXED_COUNT = 64
def decode_int(x, f):
f += 1
newf = x.index(CHR_TERM, f)
if newf - f >= MAX_INT_LENGTH:
raise ValueError('overflow')
try:
n = int(x[f:newf])
except (OverflowError, ValueError):
n = long(x[f:newf])
if x[f] == '-':
if x[f + 1] == '0':
raise ValueError
elif x[f] == '0' and newf != f+1:
raise ValueError
return (n, newf+1)
def decode_intb(x, f):
f += 1
return (struct.unpack('!b', x[f:f+1])[0], f+1)
def decode_inth(x, f):
f += 1
return (struct.unpack('!h', x[f:f+2])[0], f+2)
def decode_intl(x, f):
f += 1
return (struct.unpack('!l', x[f:f+4])[0], f+4)
def decode_intq(x, f):
f += 1
return (struct.unpack('!q', x[f:f+8])[0], f+8)
def decode_float32(x, f):
f += 1
n = struct.unpack('!f', x[f:f+4])[0]
return (n, f+4)
def decode_float64(x, f):
f += 1
n = struct.unpack('!d', x[f:f+8])[0]
return (n, f+8)
def decode_string(x, f):
colon = x.index(':', f)
try:
n = int(x[f:colon])
except (OverflowError, ValueError):
n = long(x[f:colon])
if x[f] == '0' and colon != f+1:
raise ValueError
colon += 1
s = x[colon:colon+n]
try:
t = s.decode("utf8")
if len(t) != len(s):
s = t
except UnicodeDecodeError:
pass
return (s, colon+n)
def decode_list(x, f):
r, f = [], f+1
while x[f] != CHR_TERM:
v, f = decode_func[x[f]](x, f)
r.append(v)
return (tuple(r), f + 1)
def decode_dict(x, f):
r, f = {}, f+1
while x[f] != CHR_TERM:
k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f + 1)
def decode_true(x, f):
return (True, f+1)
def decode_false(x, f):
return (False, f+1)
def decode_none(x, f):
return (None, f+1)
decode_func = {}
decode_func['0'] = decode_string
decode_func['1'] = decode_string
decode_func['2'] = decode_string
decode_func['3'] = decode_string
decode_func['4'] = decode_string
decode_func['5'] = decode_string
decode_func['6'] = decode_string
decode_func['7'] = decode_string
decode_func['8'] = decode_string
decode_func['9'] = decode_string
decode_func[CHR_LIST ] = decode_list
decode_func[CHR_DICT ] = decode_dict
decode_func[CHR_INT ] = decode_int
decode_func[CHR_INT1 ] = decode_intb
decode_func[CHR_INT2 ] = decode_inth
decode_func[CHR_INT4 ] = decode_intl
decode_func[CHR_INT8 ] = decode_intq
decode_func[CHR_FLOAT32] = decode_float32
decode_func[CHR_FLOAT64] = decode_float64
decode_func[CHR_TRUE ] = decode_true
decode_func[CHR_FALSE ] = decode_false
decode_func[CHR_NONE ] = decode_none
def make_fixed_length_string_decoders():
def make_decoder(slen):
def f(x, f):
s = x[f+1:f+1+slen]
try:
t = s.decode("utf8")
if len(t) != len(s):
s = t
except UnicodeDecodeError:
pass
return (s, f+1+slen)
return f
for i in range(STR_FIXED_COUNT):
decode_func[chr(STR_FIXED_START+i)] = make_decoder(i)
make_fixed_length_string_decoders()
def make_fixed_length_list_decoders():
def make_decoder(slen):
def f(x, f):
r, f = [], f+1
for i in range(slen):
v, f = decode_func[x[f]](x, f)
r.append(v)
return (tuple(r), f)
return f
for i in range(LIST_FIXED_COUNT):
decode_func[chr(LIST_FIXED_START+i)] = make_decoder(i)
make_fixed_length_list_decoders()
def make_fixed_length_int_decoders():
def make_decoder(j):
def f(x, f):
return (j, f+1)
return f
for i in range(INT_POS_FIXED_COUNT):
decode_func[chr(INT_POS_FIXED_START+i)] = make_decoder(i)
for i in range(INT_NEG_FIXED_COUNT):
decode_func[chr(INT_NEG_FIXED_START+i)] = make_decoder(-1-i)
make_fixed_length_int_decoders()
def make_fixed_length_dict_decoders():
def make_decoder(slen):
def f(x, f):
r, f = {}, f+1
for j in range(slen):
k, f = decode_func[x[f]](x, f)
r[k], f = decode_func[x[f]](x, f)
return (r, f)
return f
for i in range(DICT_FIXED_COUNT):
decode_func[chr(DICT_FIXED_START+i)] = make_decoder(i)
make_fixed_length_dict_decoders()
def encode_dict(x,r):
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
def loads(x):
try:
r, l = decode_func[x[0]](x, 0)
except (IndexError, KeyError):
raise ValueError
if l != len(x):
raise ValueError
return r
from types import StringType, IntType, LongType, DictType, ListType, TupleType, FloatType, NoneType, UnicodeType
def encode_int(x, r):
if 0 <= x < INT_POS_FIXED_COUNT:
r.append(chr(INT_POS_FIXED_START+x))
elif -INT_NEG_FIXED_COUNT <= x < 0:
r.append(chr(INT_NEG_FIXED_START-1-x))
elif -128 <= x < 128:
r.extend((CHR_INT1, struct.pack('!b', x)))
elif -32768 <= x < 32768:
r.extend((CHR_INT2, struct.pack('!h', x)))
elif -2147483648 <= x < 2147483648:
r.extend((CHR_INT4, struct.pack('!l', x)))
elif -9223372036854775808 <= x < 9223372036854775808:
r.extend((CHR_INT8, struct.pack('!q', x)))
else:
s = str(x)
if len(s) >= MAX_INT_LENGTH:
raise ValueError('overflow')
r.extend((CHR_INT, s, CHR_TERM))
def encode_float32(x, r):
r.extend((CHR_FLOAT32, struct.pack('!f', x)))
def encode_float64(x, r):
r.extend((CHR_FLOAT64, struct.pack('!d', x)))
def encode_bool(x, r):
r.extend({False: CHR_FALSE, True: CHR_TRUE}[bool(x)])
def encode_none(x, r):
r.extend(CHR_NONE)
def encode_string(x, r):
if len(x) < STR_FIXED_COUNT:
r.extend((chr(STR_FIXED_START + len(x)), x))
else:
r.extend((str(len(x)), ':', x))
def encode_unicode(x, r):
encode_string(x.encode("utf8"), r)
def encode_list(x, r):
if len(x) < LIST_FIXED_COUNT:
r.append(chr(LIST_FIXED_START + len(x)))
for i in x:
encode_func[type(i)](i, r)
else:
r.append(CHR_LIST)
for i in x:
encode_func[type(i)](i, r)
r.append(CHR_TERM)
def encode_dict(x,r):
if len(x) < DICT_FIXED_COUNT:
r.append(chr(DICT_FIXED_START + len(x)))
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
else:
r.append(CHR_DICT)
for k, v in x.items():
encode_func[type(k)](k, r)
encode_func[type(v)](v, r)
r.append(CHR_TERM)
encode_func = {}
encode_func[IntType] = encode_int
encode_func[LongType] = encode_int
encode_func[StringType] = encode_string
encode_func[ListType] = encode_list
encode_func[TupleType] = encode_list
encode_func[DictType] = encode_dict
encode_func[NoneType] = encode_none
encode_func[UnicodeType] = encode_unicode
lock = Lock()
try:
from types import BooleanType
encode_func[BooleanType] = encode_bool
except ImportError:
pass
def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
lock.acquire()
try:
if float_bits == 32:
encode_func[FloatType] = encode_float32
elif float_bits == 64:
encode_func[FloatType] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
finally:
lock.release()
return ''.join(r)
def test():
f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
L = (({'a':15, 'bb':f1, 'ccc':f2, '':(f3,(),False,True,'')},('a',10**20),tuple(range(-100000,100000)),'b'*31,'b'*62,'b'*64,2**30,2**33,2**62,2**64,2**30,2**33,2**62,2**64,False,False, True, -1, 2, 0),)
assert loads(dumps(L)) == L
d = dict(zip(range(-100000,100000),range(-100000,100000)))
d.update({'a':20, 20:40, 40:41, f1:f2, f2:f3, f3:False, False:True, True:False})
L = (d, {}, {5:6}, {7:7,True:8}, {9:10, 22:39, 49:50, 44: ''})
assert loads(dumps(L)) == L
L = ('', 'a'*10, 'a'*100, 'a'*1000, 'a'*10000, 'a'*100000, 'a'*1000000, 'a'*10000000)
assert loads(dumps(L)) == L
L = tuple([dict(zip(range(n),range(n))) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple([dict(zip(range(n),range(-n,0))) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple([tuple(range(n)) for n in range(100)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple(['a'*n for n in range(1000)]) + ('b',)
assert loads(dumps(L)) == L
L = tuple(['a'*n for n in range(1000)]) + (None,True,None)
assert loads(dumps(L)) == L
assert loads(dumps(None)) == None
assert loads(dumps({None:None})) == {None:None}
assert 1e-10<abs(loads(dumps(1.1))-1.1)<1e-6
assert 1e-10<abs(loads(dumps(1.1,32))-1.1)<1e-6
assert abs(loads(dumps(1.1,64))-1.1)<1e-12
assert loads(dumps(u"Hello World!!"))
try:
import psyco
psyco.bind(dumps)
psyco.bind(loads)
except ImportError:
pass
if __name__ == '__main__':
test()
| gpl-3.0 |
dongjoon-hyun/tensorflow | tensorflow/contrib/framework/python/framework/tensor_util.py | 25 | 11885 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.deprecation import deprecated
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
# Temporary for backwards compatibility
is_tensor = tensor_util.is_tensor
assert_same_float_dtype = check_ops.assert_same_float_dtype
assert_scalar = check_ops.assert_scalar
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
@deprecated(
None, "Please switch to remove_squeezable_dimensions from "
"tf.confusion_matrix. Note that the order of the inputs and outputs of "
"labels and predictions have also been switched.")
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _shape_tensor_compatible(expected_shape, actual_shape):
"""Returns whether actual_shape is compatible with expected_shape.
Note that -1 in `expected_shape` is recognized as unknown dimension.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_shape: Shape of the tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('shape_tensor_equal',
values=[expected_shape, actual_shape]) as scope:
return math_ops.reduce_all(
math_ops.logical_or(
math_ops.equal(expected_shape, -1),
math_ops.equal(expected_shape, actual_shape, 'equal'),
name='exclude_partial_shape'),
name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Note that -1 in `expected_shape` is recognized as unknown dimension.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _shape_tensor_compatible(expected_shape, actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Note that unknown dimension in `expected_shape` will be ignored.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
if (isinstance(expected_shape, tensor_shape.TensorShape)
and not expected_shape.is_fully_defined()):
expected_shape = [d if d else -1 for d in expected_shape.as_list()]
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
The original tensor argument, possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if tensor_util.is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if (not actual_shape.is_fully_defined()
or tensor_util.is_tensor(expected_shape)):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if (not tensor_util.is_tensor(expected_shape)
and (len(expected_shape) < 1)):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not tensor_util.is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not tensor_util.is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of integer type.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if not data_type.base_dtype.is_integer:
raise ValueError('Expected integer type for %s, received type: %s.'
% (tensor.name, data_type))
return check_ops.assert_scalar(tensor, name=name_scope)
| apache-2.0 |
aitjcize/PyTox | tools/apicomplete.py | 2 | 1029 | # simple script to test the completeness of the python bindings:
from sys import version_info
if version_info[0] < 3:
from urllib import urlopen
else:
from urllib.request import urlopen
import re
TOXURL =\
"https://raw.githubusercontent.com/irungentoo/toxcore/master/toxcore/tox.h"
PYTOXURL =\
"https://raw.githubusercontent.com/aitjcize/PyTox/master/pytox/core.c"
PYTOXURL =\
"https://raw.githubusercontent.com/kitech/PyTox/newapi/pytox/core.c"
toxsrc = urlopen(TOXURL).read()
pytoxsrc = urlopen(PYTOXURL).read()
res = None
if version_info[0] < 3:
res = re.findall(r"\n[_a-z0-9]+ (tox_[\_a-z]+\()", str(toxsrc))
else:
res = re.findall(r'[_a-z0-9]+ (tox_[\_a-z]+\()', str(toxsrc))
incl = 0
excl = []
for function in res:
if function in str(pytoxsrc):
incl += 1
else:
excl.append(function)
print(
"PyTox includes %d out of %d functions found in tox.h" % (incl, len(res))
)
print("Not included are the functions:")
for item in excl:
print(" %s" % item[:-1])
| gpl-3.0 |
pierreg/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/models/nn.py | 190 | 1567 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple baseline feed-forward neural network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.python import hybrid_model
from tensorflow.contrib.tensor_forest.hybrid.python.layers import fully_connected
from tensorflow.python.training import adagrad
class NN(hybrid_model.HybridModel):
"""A simple baseline feed-forward neural network."""
def __init__(self,
params,
device_assigner=None,
optimizer_class=adagrad.AdagradOptimizer,
**kwargs):
super(NN, self).__init__(
params,
device_assigner=device_assigner,
optimizer_class=optimizer_class,
**kwargs)
self.layers = [fully_connected.FullyConnectedLayer(
params, 0, device_assigner=device_assigner)]
| apache-2.0 |
mxOBS/deb-pkg_trusty_chromium-browser | third_party/pycoverage/coverage/execfile.py | 209 | 5865 | """Execute files of Python code."""
import imp, marshal, os, sys
from coverage.backward import exec_code_object, open_source
from coverage.misc import ExceptionDuringRun, NoCode, NoSource
try:
# In Py 2.x, the builtins were in __builtin__
BUILTINS = sys.modules['__builtin__']
except KeyError:
# In Py 3.x, they're in builtins
BUILTINS = sys.modules['builtins']
def rsplit1(s, sep):
"""The same as s.rsplit(sep, 1), but works in 2.3"""
parts = s.split(sep)
return sep.join(parts[:-1]), parts[-1]
def run_python_module(modulename, args):
"""Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
"""
openfile = None
glo, loc = globals(), locals()
try:
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if '.' in modulename:
packagename, name = rsplit1(modulename, '.')
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource(
"module does not live in a file: %r" % modulename
)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = '__main__'
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError:
_, err, _ = sys.exc_info()
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
# Finally, hand the file off to run_python_file for execution.
pathname = os.path.abspath(pathname)
args[0] = pathname
run_python_file(pathname, args, package=packagename)
def run_python_file(filename, args, package=None):
"""Run a python file as if it were the main program on the command line.
`filename` is the path to the file to execute, it need not be a .py file.
`args` is the argument array to present as sys.argv, including the first
element naming the file being executed. `package` is the name of the
enclosing package, if any.
"""
# Create a module to serve as __main__
old_main_mod = sys.modules['__main__']
main_mod = imp.new_module('__main__')
sys.modules['__main__'] = main_mod
main_mod.__file__ = filename
if package:
main_mod.__package__ = package
main_mod.__builtins__ = BUILTINS
# Set sys.argv properly.
old_argv = sys.argv
sys.argv = args
try:
# Make a code object somehow.
if filename.endswith(".pyc") or filename.endswith(".pyo"):
code = make_code_from_pyc(filename)
else:
code = make_code_from_py(filename)
# Execute the code object.
try:
exec_code_object(code, main_mod.__dict__)
except SystemExit:
# The user called sys.exit(). Just pass it along to the upper
# layers, where it will be handled.
raise
except:
# Something went wrong while executing the user code.
# Get the exc_info, and pack them into an exception that we can
# throw up to the outer loop. We peel two layers off the traceback
# so that the coverage.py code doesn't appear in the final printed
# traceback.
typ, err, tb = sys.exc_info()
raise ExceptionDuringRun(typ, err, tb.tb_next.tb_next)
finally:
# Restore the old __main__
sys.modules['__main__'] = old_main_mod
# Restore the old argv and path
sys.argv = old_argv
def make_code_from_py(filename):
"""Get source from `filename` and make a code object of it."""
# Open the source file.
try:
source_file = open_source(filename)
except IOError:
raise NoSource("No file to run: %r" % filename)
try:
source = source_file.read()
finally:
source_file.close()
# We have the source. `compile` still needs the last line to be clean,
# so make sure it is, then compile a code object from it.
if not source or source[-1] != '\n':
source += '\n'
code = compile(source, filename, "exec")
return code
def make_code_from_pyc(filename):
"""Get a code object from a .pyc file."""
try:
fpyc = open(filename, "rb")
except IOError:
raise NoCode("No file to run: %r" % filename)
try:
# First four bytes are a version-specific magic number. It has to
# match or we won't run the file.
magic = fpyc.read(4)
if magic != imp.get_magic():
raise NoCode("Bad magic number in .pyc file")
# Skip the junk in the header that we don't need.
fpyc.read(4) # Skip the moddate.
if sys.version_info >= (3, 3):
# 3.3 added another long to the header (size), skip it.
fpyc.read(4)
# The rest of the file is the code object we want.
code = marshal.load(fpyc)
finally:
fpyc.close()
return code
| bsd-3-clause |
mrquim/mrquimrepo | repo/script.module.kodi65/lib/kodi65/youtube.py | 4 | 9607 | # -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <[email protected]>
# This program is Free Software see LICENSE file for details
import urllib
import itertools
from kodi65 import utils
from kodi65 import VideoItem
from kodi65 import ItemList
YT_KEY = 'AIzaSyB-BOZ_o09NLVwq_lMskvvj1olDkFI4JK0'
BASE_URL = "https://www.googleapis.com/youtube/v3/"
PLUGIN_BASE = "plugin://script.extendedinfo/?info="
def handle_videos(results, extended=False):
"""
process vidoe api result to ItemList
"""
videos = ItemList(content_type="videos")
for item in results:
snippet = item["snippet"]
thumb = snippet["thumbnails"]["high"]["url"] if "thumbnails" in snippet else ""
try:
video_id = item["id"]["videoId"]
except Exception:
video_id = snippet["resourceId"]["videoId"]
video = VideoItem(label=snippet["title"],
path=PLUGIN_BASE + 'youtubevideo&&id=%s' % video_id)
video.set_infos({'plot': snippet["description"],
'mediatype': "video",
'premiered': snippet["publishedAt"][:10]})
video.set_artwork({'thumb': thumb})
video.set_playable(True)
video.set_properties({'channel_title': snippet["channelTitle"],
'channel_id': snippet["channelId"],
'type': "video",
'youtube_id': video_id})
videos.append(video)
if not extended:
return videos
params = {"part": "contentDetails,statistics",
"id": ",".join([i.get_property("youtube_id") for i in videos])}
ext_results = get_data(method="videos",
params=params)
if not ext_results:
return videos
for item in videos:
for ext_item in ext_results["items"]:
if not item.get_property("youtube_id") == ext_item['id']:
continue
details = ext_item['contentDetails']
stats = ext_item['statistics']
likes = stats.get('likeCount')
dislikes = stats.get('dislikeCount')
item.update_infos({"duration": get_duration_in_seconds(details['duration'])})
props = {"duration": details['duration'][2:].lower(),
"formatted_duration": get_formatted_duration(details['duration']),
"dimension": details['dimension'],
"definition": details['definition'],
"caption": details['caption'],
"viewcount": utils.millify(stats['viewCount']),
"likes": likes,
"dislikes": dislikes}
item.update_properties(props)
if likes and dislikes:
vote_count = int(likes) + int(dislikes)
if vote_count > 0:
item.set_info("rating", round(float(likes) / vote_count * 10, 1))
break
return videos
def get_duration_in_seconds(duration):
"""
convert youtube duration string to seconds int
"""
duration = duration[2:-1].replace("H", "M").split("M")
if len(duration) == 3:
return int(duration[0]) * 3600 + int(duration[1]) * 60 + int(duration[2])
elif len(duration) == 2:
return int(duration[0]) * 60 + int(duration[1])
else:
return int(duration[0])
def get_formatted_duration(duration):
"""
convert youtube duration string to formatted duration
"""
duration = duration[2:-1].replace("H", "M").split("M")
if len(duration) == 3:
return "{}:{}:{}".format(duration[0].zfill(2), duration[1].zfill(2), duration[2].zfill(2))
elif len(duration) == 2:
return "{}:{}".format(duration[0].zfill(2), duration[1].zfill(2))
else:
return "00:{}".format(duration[0].zfill(2))
def handle_playlists(results):
"""
process playlist api result to ItemList
"""
playlists = ItemList(content_type="videos")
for item in results:
snippet = item["snippet"]
thumb = snippet["thumbnails"]["high"]["url"] if "thumbnails" in snippet else ""
try:
playlist_id = item["id"]["playlistId"]
except Exception:
playlist_id = snippet["resourceId"]["playlistId"]
playlist = VideoItem(label=snippet["title"],
path=PLUGIN_BASE + 'youtubeplaylist&&id=%s' % playlist_id)
playlist.set_infos({'plot': snippet["description"],
"mediatype": "video",
'premiered': snippet["publishedAt"][:10]})
playlist.set_art("thumb", thumb)
playlist.set_properties({'youtube_id': playlist_id,
'channel_title': snippet["channelTitle"],
'type': "playlist",
'live': snippet["liveBroadcastContent"].replace("none", "")})
playlists.append(playlist)
params = {"id": ",".join([i.get_property("youtube_id") for i in playlists]),
"part": "contentDetails"}
ext_results = get_data(method="playlists",
params=params)
for item, ext_item in itertools.product(playlists, ext_results["items"]):
if item.get_property("youtube_id") == ext_item['id']:
item.set_property("itemcount", ext_item['contentDetails']['itemCount'])
return playlists
def handle_channels(results):
"""
process channel api result to ItemList
"""
channels = ItemList(content_type="videos")
for item in results:
snippet = item["snippet"]
thumb = snippet["thumbnails"]["high"]["url"] if "thumbnails" in snippet else ""
try:
channel_id = item["id"]["channelId"]
except Exception:
channel_id = snippet["resourceId"]["channelId"]
channel = VideoItem(label=snippet["title"],
path=PLUGIN_BASE + 'youtubechannel&&id=%s' % channel_id)
channel.set_infos({'plot': snippet["description"],
'mediatype': "video",
'premiered': snippet["publishedAt"][:10]})
channel.set_art("thumb", thumb)
channel.set_properties({"youtube_id": channel_id,
"type": "channel"})
channels.append(channel)
channel_ids = [item.get_property("youtube_id") for item in channels]
params = {"id": ",".join(channel_ids),
"part": "contentDetails,statistics,brandingSettings"}
ext_results = get_data(method="channels",
params=params)
for item, ext_item in itertools.product(channels, ext_results["items"]):
if item.get_property("youtube_id") == ext_item['id']:
item.set_property("itemcount", ext_item['statistics']['videoCount'])
item.set_art("fanart", ext_item["brandingSettings"]["image"].get("bannerTvMediumImageUrl"))
return channels
def get_data(method, params=None, cache_days=0.5):
"""
fetch data from youtube API
"""
params = params if params else {}
params["key"] = YT_KEY
params = {k: unicode(v).encode('utf-8') for k, v in params.iteritems() if v}
url = "{base_url}{method}?{params}".format(base_url=BASE_URL,
method=method,
params=urllib.urlencode(params))
return utils.get_JSON_response(url=url,
cache_days=cache_days,
folder="YouTube")
def search(search_str="", hd="", orderby="relevance", limit=40, extended=True, page="", filters=None, media_type="video"):
"""
returns ItemList according to search term, filters etc.
"""
params = {"part": "id,snippet",
"maxResults": limit,
"type": media_type,
"order": orderby,
"pageToken": page,
"hd": str(hd and not hd == "false"),
"q": search_str.replace('"', '')}
results = get_data(method="search",
params=utils.merge_dicts(params, filters if filters else {}))
if not results:
return None
if media_type == "video":
listitems = handle_videos(results["items"], extended=extended)
elif media_type == "playlist":
listitems = handle_playlists(results["items"])
elif media_type == "channel":
listitems = handle_channels(results["items"])
listitems.total_pages = results["pageInfo"]["resultsPerPage"]
listitems.totals = results["pageInfo"]["totalResults"]
listitems.next_page_token = results.get("nextPageToken", "")
listitems.prev_page_token = results.get("prevPageToken", "")
return listitems
def get_playlist_videos(playlist_id=""):
"""
returns ItemList from playlist with *playlist_id
"""
if not playlist_id:
return []
params = {"part": "id,snippet",
"maxResults": "50",
"playlistId": playlist_id}
results = get_data(method="playlistItems",
params=params)
if not results:
return []
return handle_videos(results["items"])
def get_user_playlists(username=""):
"""
returns ItemList with user uploads from *username
"""
params = {"part": "contentDetails",
"forUsername": username}
results = get_data(method="channels",
params=params)
if not results["items"]:
return None
return results["items"][0]["contentDetails"]["relatedPlaylists"]
| gpl-2.0 |
ShineFan/odoo | addons/sale_order_dates/sale_order_dates.py | 223 | 5308 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
class sale_order_dates(osv.osv):
"""Add several date fields to Sale Orders, computed or user-entered"""
_inherit = 'sale.order'
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
"""Compute the expected date from the requested date, not the order date"""
if order and order.requested_date:
date_planned = datetime.strptime(order.requested_date, DEFAULT_SERVER_DATETIME_FORMAT)
date_planned -= timedelta(days=order.company_id.security_lead)
return date_planned.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return super(sale_order_dates, self)._get_date_planned(
cr, uid, order, line, start_date, context=context)
def _get_effective_date(self, cr, uid, ids, name, arg, context=None):
"""Read the shipping date from the related packings"""
# TODO: would be better if it returned the date the picking was processed?
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
for pick in order.picking_ids:
dates_list.append(pick.date)
if dates_list:
res[order.id] = min(dates_list)
else:
res[order.id] = False
return res
def _get_commitment_date(self, cr, uid, ids, name, arg, context=None):
"""Compute the commitment date"""
res = {}
dates_list = []
for order in self.browse(cr, uid, ids, context=context):
dates_list = []
order_datetime = datetime.strptime(order.date_order, DEFAULT_SERVER_DATETIME_FORMAT)
for line in order.order_line:
if line.state == 'cancel':
continue
dt = order_datetime + timedelta(days=line.delay or 0.0)
dt_s = dt.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
dates_list.append(dt_s)
if dates_list:
res[order.id] = min(dates_list)
return res
def onchange_requested_date(self, cr, uid, ids, requested_date,
commitment_date, context=None):
"""Warn if the requested dates is sooner than the commitment date"""
if (requested_date and commitment_date and requested_date < commitment_date):
return {'warning': {
'title': _('Requested date is too soon!'),
'message': _("The date requested by the customer is "
"sooner than the commitment date. You may be "
"unable to honor the customer's request.")
}
}
return {}
_columns = {
'commitment_date': fields.function(_get_commitment_date, store=True,
type='datetime', string='Commitment Date',
help="Date by which the products are sure to be delivered. This is "
"a date that you can promise to the customer, based on the "
"Product Lead Times."),
'requested_date': fields.datetime('Requested Date',
readonly=True, states={'draft': [('readonly', False)],
'sent': [('readonly', False)]}, copy=False,
help="Date by which the customer has requested the items to be "
"delivered.\n"
"When this Order gets confirmed, the Delivery Order's "
"expected date will be computed based on this date and the "
"Company's Security Delay.\n"
"Leave this field empty if you want the Delivery Order to be "
"processed as soon as possible. In that case the expected "
"date will be computed using the default method: based on "
"the Product Lead Times and the Company's Security Delay."),
'effective_date': fields.function(_get_effective_date, type='date',
store=True, string='Effective Date',
help="Date on which the first Delivery Order was created."),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
0xffea/keystone | keystone/common/openssl.py | 2 | 8439 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
#
import os
import stat
import subprocess
from keystone.common import logging
from keystone import config
LOG = logging.getLogger(__name__)
CONF = config.CONF
DIR_PERMS = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
CERT_PERMS = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
PRIV_PERMS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
DEFAULT_SUBJECT = '/C=US/ST=Unset/L=Unset/O=Unset/CN=www.example.com'
def file_exists(file_path):
return os.path.exists(file_path)
class ConfigurePKI(object):
"""Generate files for PKI signing using OpenSSL.
Signed tokens require a private key and signing certificate which itself
must be signed by a CA. This class generates them with workable defaults
if each of the files are not present
"""
def __init__(self, keystone_user, keystone_group, **kw):
self.conf_dir = os.path.dirname(CONF.signing.ca_certs)
self.use_keystone_user = keystone_user
self.use_keystone_group = keystone_group
self.ssl_config_file_name = os.path.join(self.conf_dir, "openssl.conf")
self.ca_key_file = os.path.join(self.conf_dir, "cakey.pem")
self.request_file_name = os.path.join(self.conf_dir, "req.pem")
self.ssl_dictionary = {'conf_dir': self.conf_dir,
'ca_cert': CONF.signing.ca_certs,
'ssl_config': self.ssl_config_file_name,
'ca_private_key': self.ca_key_file,
'ca_cert_cn': 'hostname',
'request_file': self.request_file_name,
'signing_key': CONF.signing.keyfile,
'signing_cert': CONF.signing.certfile,
'default_subject': DEFAULT_SUBJECT,
'key_size': int(CONF.signing.key_size),
'valid_days': int(CONF.signing.valid_days),
'ca_password': CONF.signing.ca_password}
def _make_dirs(self, file_name):
dir = os.path.dirname(file_name)
if not file_exists(dir):
os.makedirs(dir, DIR_PERMS)
if os.geteuid() == 0 and self.use_keystone_group:
os.chown(dir, -1, self.use_keystone_group)
def _set_permissions(self, file_name, perms):
os.chmod(file_name, perms)
if os.geteuid() == 0:
os.chown(file_name, self.use_keystone_user or -1,
self.use_keystone_group or -1)
def exec_command(self, command):
to_exec = command % self.ssl_dictionary
LOG.info(to_exec)
subprocess.check_call(to_exec.rsplit(' '))
def build_ssl_config_file(self):
if not file_exists(self.ssl_config_file_name):
self._make_dirs(self.ssl_config_file_name)
ssl_config_file = open(self.ssl_config_file_name, 'w')
ssl_config_file.write(self.sslconfig % self.ssl_dictionary)
ssl_config_file.close()
self._set_permissions(self.ssl_config_file_name, CERT_PERMS)
index_file_name = os.path.join(self.conf_dir, 'index.txt')
if not file_exists(index_file_name):
index_file = open(index_file_name, 'w')
index_file.write('')
index_file.close()
self._set_permissions(self.ssl_config_file_name, PRIV_PERMS)
serial_file_name = os.path.join(self.conf_dir, 'serial')
if not file_exists(serial_file_name):
index_file = open(serial_file_name, 'w')
index_file.write('01')
index_file.close()
self._set_permissions(self.ssl_config_file_name, PRIV_PERMS)
def build_ca_cert(self):
if not file_exists(CONF.signing.ca_certs):
if not os.path.exists(self.ca_key_file):
self._make_dirs(self.ca_key_file)
self.exec_command('openssl genrsa -out %(ca_private_key)s '
'%(key_size)d -config %(ssl_config)s')
self._set_permissions(self.ssl_dictionary['ca_private_key'],
stat.S_IRUSR)
self.exec_command('openssl req -new -x509 -extensions v3_ca '
'-passin pass:%(ca_password)s '
'-key %(ca_private_key)s -out %(ca_cert)s '
'-days %(valid_days)d '
'-config %(ssl_config)s '
'-subj %(default_subject)s')
self._set_permissions(self.ssl_dictionary['ca_cert'], CERT_PERMS)
def build_private_key(self):
signing_keyfile = self.ssl_dictionary['signing_key']
if not file_exists(signing_keyfile):
self._make_dirs(signing_keyfile)
self.exec_command('openssl genrsa -out %(signing_key)s '
'%(key_size)d '
'-config %(ssl_config)s')
self._set_permissions(os.path.dirname(signing_keyfile), PRIV_PERMS)
self._set_permissions(signing_keyfile, stat.S_IRUSR)
def build_signing_cert(self):
if not file_exists(CONF.signing.certfile):
self._make_dirs(CONF.signing.certfile)
self.exec_command('openssl req -key %(signing_key)s -new -nodes '
'-out %(request_file)s -config %(ssl_config)s '
'-subj %(default_subject)s')
self.exec_command('openssl ca -batch -out %(signing_cert)s '
'-config %(ssl_config)s '
'-infiles %(request_file)s')
def run(self):
self.build_ssl_config_file()
self.build_ca_cert()
self.build_private_key()
self.build_signing_cert()
sslconfig = """
# OpenSSL configuration file.
#
# Establish working directory.
dir = %(conf_dir)s
[ ca ]
default_ca = CA_default
[ CA_default ]
new_certs_dir = $dir
serial = $dir/serial
database = $dir/index.txt
certificate = %(ca_cert)s
private_key = %(ca_private_key)s
default_days = 365
default_md = md5
preserve = no
email_in_dn = no
nameopt = default_ca
certopt = default_ca
policy = policy_match
[ policy_match ]
countryName = match
stateOrProvinceName = match
organizationName = match
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
default_bits = 1024 # Size of keys
default_keyfile = key.pem # name of generated keys
default_md = md5 # message digest algorithm
string_mask = nombstr # permitted characters
distinguished_name = req_distinguished_name
req_extensions = v3_req
[ req_distinguished_name ]
0.organizationName = Organization Name (company)
organizationalUnitName = Organizational Unit Name (department, division)
emailAddress = Email Address
emailAddress_max = 40
localityName = Locality Name (city, district)
stateOrProvinceName = State or Province Name (full name)
countryName = Country Name (2 letter code)
countryName_min = 2
countryName_max = 2
commonName = Common Name (hostname, IP, or your name)
commonName_max = 64
# Default values for the above, for consistency and less typing.
0.organizationName_default = Openstack, Inc
localityName_default = Undefined
stateOrProvinceName_default = Undefined
countryName_default = US
commonName_default = %(ca_cert_cn)s
[ v3_ca ]
basicConstraints = CA:TRUE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer:always
[ v3_req ]
basicConstraints = CA:FALSE
subjectKeyIdentifier = hash"""
| apache-2.0 |
pombredanne/pants | src/python/pants/backend/codegen/wire/java/java_wire_library.py | 9 | 2834 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.backend.jvm.targets.exportable_jvm_library import ExportableJvmLibrary
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.base.validation import assert_list
logger = logging.getLogger(__name__)
class JavaWireLibrary(ExportableJvmLibrary):
"""A Java library generated from Wire IDL files.
Supports Wire 1.x only.
For an example Wire 2.x interface that generates service stubs see:
https://github.com/ericzundel/mvn2pants/tree/master/src/python/squarepants/plugins/sake_wire_codegen
But note this requires you to write a custom wire code generator with a command line interface.
:API: public
"""
def __init__(self,
payload=None,
service_writer=None,
service_writer_options=None,
roots=None,
registry_class=None,
enum_options=None,
no_options=None,
**kwargs):
"""
:param string service_writer: the name of the class to pass as the --service_writer option to
the Wire compiler (For wire 1.0 only)
:param list service_writer_options: A list of options to pass to the service writer (For
wire 1.x only)
:param list roots: passed through to the --roots option of the Wire compiler
:param string registry_class: fully qualified class name of RegistryClass to create. If in
doubt, specify com.squareup.wire.SimpleServiceWriter
:param list enum_options: list of enums to pass to as the --enum-enum_options option, # optional
:param boolean no_options: boolean that determines if --no_options flag is passed
"""
if not service_writer and service_writer_options:
raise TargetDefinitionException(self,
'service_writer_options requires setting service_writer')
payload = payload or Payload()
payload.add_fields({
'service_writer': PrimitiveField(service_writer or None),
'service_writer_options': PrimitiveField(
assert_list(service_writer_options, key_arg='service_writer_options',
raise_type=TargetDefinitionException)),
'roots': PrimitiveField(roots or []),
'registry_class': PrimitiveField(registry_class or None),
'enum_options': PrimitiveField(enum_options or []),
'no_options': PrimitiveField(no_options or False),
})
super(JavaWireLibrary, self).__init__(payload=payload, **kwargs)
| apache-2.0 |
graphite-server/carbon | lib/carbon/conf.py | 4 | 19985 | """Copyright 2009 Chris Davis
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import os
import sys
import pwd
import errno
from os.path import join, dirname, normpath, exists, isdir
from optparse import OptionParser
from ConfigParser import ConfigParser
import whisper
from carbon import log
from carbon.exceptions import CarbonConfigException
from twisted.python import usage
defaults = dict(
USER="",
MAX_CACHE_SIZE=float('inf'),
MAX_UPDATES_PER_SECOND=500,
MAX_CREATES_PER_MINUTE=float('inf'),
LINE_RECEIVER_INTERFACE='0.0.0.0',
LINE_RECEIVER_PORT=2003,
ENABLE_UDP_LISTENER=False,
UDP_RECEIVER_INTERFACE='0.0.0.0',
UDP_RECEIVER_PORT=2003,
PICKLE_RECEIVER_INTERFACE='0.0.0.0',
PICKLE_RECEIVER_PORT=2004,
CACHE_QUERY_INTERFACE='0.0.0.0',
CACHE_QUERY_PORT=7002,
LOG_UPDATES=True,
LOG_CACHE_HITS=True,
WHISPER_AUTOFLUSH=False,
WHISPER_SPARSE_CREATE=False,
WHISPER_FALLOCATE_CREATE=False,
WHISPER_LOCK_WRITES=False,
MAX_DATAPOINTS_PER_MESSAGE=500,
MAX_AGGREGATION_INTERVALS=5,
MAX_QUEUE_SIZE=1000,
QUEUE_LOW_WATERMARK_PCT=0.8,
TIME_TO_DEFER_SENDING=0.0001,
ENABLE_AMQP=False,
AMQP_VERBOSE=False,
BIND_PATTERNS=['#'],
ENABLE_MANHOLE=False,
MANHOLE_INTERFACE='127.0.0.1',
MANHOLE_PORT=7222,
MANHOLE_USER="",
MANHOLE_PUBLIC_KEY="",
RELAY_METHOD='rules',
REPLICATION_FACTOR=1,
DESTINATIONS=[],
USE_FLOW_CONTROL=True,
USE_INSECURE_UNPICKLER=False,
USE_WHITELIST=False,
CARBON_METRIC_PREFIX='carbon',
CARBON_METRIC_INTERVAL=60,
CACHE_WRITE_STRATEGY='sorted',
WRITE_BACK_FREQUENCY=None,
MIN_RESET_STAT_FLOW=1000,
MIN_RESET_RATIO=0.9,
MIN_RESET_INTERVAL=121,
USE_RATIO_RESET=False,
LOG_LISTENER_CONN_SUCCESS=True,
LOG_AGGREGATOR_MISSES=True,
AGGREGATION_RULES='aggregation-rules.conf',
REWRITE_RULES='rewrite-rules.conf',
RELAY_RULES='relay-rules.conf',
ENABLE_LOGROTATE=True,
)
def _umask(value):
return int(value, 8)
def _process_alive(pid):
if exists("/proc"):
return exists("/proc/%d" % pid)
else:
try:
os.kill(int(pid), 0)
return True
except OSError, err:
return err.errno == errno.EPERM
class OrderedConfigParser(ConfigParser):
"""Hacky workaround to ensure sections are always returned in the order
they are defined in. Note that this does *not* make any guarantees about
the order of options within a section or the order in which sections get
written back to disk on write()."""
_ordered_sections = []
def read(self, path):
# Verifies a file exists *and* is readable
if not os.access(path, os.R_OK):
raise CarbonConfigException("Error: Missing config file or wrong perms on %s" % path)
result = ConfigParser.read(self, path)
sections = []
for line in open(path):
line = line.strip()
if line.startswith('[') and line.endswith(']'):
sections.append( line[1:-1] )
self._ordered_sections = sections
return result
def sections(self):
return list( self._ordered_sections ) # return a copy for safety
class Settings(dict):
__getattr__ = dict.__getitem__
def __init__(self):
dict.__init__(self)
self.update(defaults)
def readFrom(self, path, section):
parser = ConfigParser()
if not parser.read(path):
raise CarbonConfigException("Failed to read config file %s" % path)
if not parser.has_section(section):
return
for key,value in parser.items(section):
key = key.upper()
# Detect type from defaults dict
if key in defaults:
valueType = type( defaults[key] )
else:
valueType = str
if valueType is list:
value = [ v.strip() for v in value.split(',') ]
elif valueType is bool:
value = parser.getboolean(section, key)
else:
# Attempt to figure out numeric types automatically
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
self[key] = value
settings = Settings()
settings.update(defaults)
class CarbonCacheOptions(usage.Options):
optFlags = [
["debug", "", "Run in debug mode."],
]
optParameters = [
["config", "c", None, "Use the given config file."],
["instance", "", "a", "Manage a specific carbon instance."],
["logdir", "", None, "Write logs to the given directory."],
["whitelist", "", None, "List of metric patterns to allow."],
["blacklist", "", None, "List of metric patterns to disallow."],
]
def postOptions(self):
global settings
program = self.parent.subCommand
# Use provided pidfile (if any) as default for configuration. If it's
# set to 'twistd.pid', that means no value was provided and the default
# was used.
pidfile = self.parent["pidfile"]
if pidfile.endswith("twistd.pid"):
pidfile = None
self["pidfile"] = pidfile
# Enforce a default umask of '022' if none was set.
if not self.parent.has_key("umask") or self.parent["umask"] is None:
self.parent["umask"] = 022
# Read extra settings from the configuration file.
program_settings = read_config(program, self)
settings.update(program_settings)
settings["program"] = program
# Normalize and expand paths
settings["STORAGE_DIR"] = os.path.normpath(os.path.expanduser(settings["STORAGE_DIR"]))
settings["LOCAL_DATA_DIR"] = os.path.normpath(os.path.expanduser(settings["LOCAL_DATA_DIR"]))
settings["WHITELISTS_DIR"] = os.path.normpath(os.path.expanduser(settings["WHITELISTS_DIR"]))
settings["PID_DIR"] = os.path.normpath(os.path.expanduser(settings["PID_DIR"]))
settings["LOG_DIR"] = os.path.normpath(os.path.expanduser(settings["LOG_DIR"]))
settings["pidfile"] = os.path.normpath(os.path.expanduser(settings["pidfile"]))
# Set process uid/gid by changing the parent config, if a user was
# provided in the configuration file.
if settings.USER:
self.parent["uid"], self.parent["gid"] = (
pwd.getpwnam(settings.USER)[2:4])
# Set the pidfile in parent config to the value that was computed by
# C{read_config}.
self.parent["pidfile"] = settings["pidfile"]
storage_schemas = join(settings["CONF_DIR"], "storage-schemas.conf")
if not exists(storage_schemas):
print "Error: missing required config %s" % storage_schemas
sys.exit(1)
if settings.WHISPER_AUTOFLUSH:
log.msg("Enabling Whisper autoflush")
whisper.AUTOFLUSH = True
if settings.WHISPER_FALLOCATE_CREATE:
if whisper.CAN_FALLOCATE:
log.msg("Enabling Whisper fallocate support")
else:
log.err("WHISPER_FALLOCATE_CREATE is enabled but linking failed.")
if settings.WHISPER_LOCK_WRITES:
if whisper.CAN_LOCK:
log.msg("Enabling Whisper file locking")
whisper.LOCK = True
else:
log.err("WHISPER_LOCK_WRITES is enabled but import of fcntl module failed.")
if not "action" in self:
self["action"] = "start"
self.handleAction()
# If we are not running in debug mode or non-daemon mode, then log to a
# directory, otherwise log output will go to stdout. If parent options
# are set to log to syslog, then use that instead.
if not self["debug"]:
if self.parent.get("syslog", None):
log.logToSyslog(self.parent["prefix"])
elif not self.parent["nodaemon"]:
logdir = settings.LOG_DIR
if not isdir(logdir):
os.makedirs(logdir)
if settings.USER:
# We have not yet switched to the specified user,
# but that user must be able to create files in this
# directory.
os.chown(logdir, self.parent["uid"], self.parent["gid"])
log.logToDir(logdir)
if self["whitelist"] is None:
self["whitelist"] = join(settings["CONF_DIR"], "whitelist.conf")
settings["whitelist"] = self["whitelist"]
if self["blacklist"] is None:
self["blacklist"] = join(settings["CONF_DIR"], "blacklist.conf")
settings["blacklist"] = self["blacklist"]
def parseArgs(self, *action):
"""If an action was provided, store it for further processing."""
if len(action) == 1:
self["action"] = action[0]
def handleAction(self):
"""Handle extra argument for backwards-compatibility.
* C{start} will simply do minimal pid checking and otherwise let twistd
take over.
* C{stop} will kill an existing running process if it matches the
C{pidfile} contents.
* C{status} will simply report if the process is up or not.
"""
action = self["action"]
pidfile = self.parent["pidfile"]
program = settings["program"]
instance = self["instance"]
if action == "stop":
if not exists(pidfile):
print "Pidfile %s does not exist" % pidfile
raise SystemExit(0)
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except IOError:
print "Could not read pidfile %s" % pidfile
raise SystemExit(1)
print "Sending kill signal to pid %d" % pid
try:
os.kill(pid, 15)
except OSError, e:
if e.errno == errno.ESRCH:
print "No process with pid %d running" % pid
else:
raise
raise SystemExit(0)
elif action == "status":
if not exists(pidfile):
print "%s (instance %s) is not running" % (program, instance)
raise SystemExit(1)
pf = open(pidfile, "r")
try:
pid = int(pf.read().strip())
pf.close()
except IOError:
print "Failed to read pid from %s" % pidfile
raise SystemExit(1)
if _process_alive(pid):
print ("%s (instance %s) is running with pid %d" %
(program, instance, pid))
raise SystemExit(0)
else:
print "%s (instance %s) is not running" % (program, instance)
raise SystemExit(1)
elif action == "start":
if exists(pidfile):
pf = open(pidfile, 'r')
try:
pid = int(pf.read().strip())
pf.close()
except IOError:
print "Could not read pidfile %s" % pidfile
raise SystemExit(1)
if _process_alive(pid):
print ("%s (instance %s) is already running with pid %d" %
(program, instance, pid))
raise SystemExit(1)
else:
print "Removing stale pidfile %s" % pidfile
try:
os.unlink(pidfile)
except IOError:
print "Could not remove pidfile %s" % pidfile
# Try to create the PID directory
else:
if not os.path.exists(settings["PID_DIR"]):
try:
os.makedirs(settings["PID_DIR"])
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(settings["PID_DIR"]):
pass
else:
raise
print "Starting %s (instance %s)" % (program, instance)
else:
print "Invalid action '%s'" % action
print "Valid actions: start stop status"
raise SystemExit(1)
class CarbonAggregatorOptions(CarbonCacheOptions):
optParameters = [
["rules", "", None, "Use the given aggregation rules file."],
["rewrite-rules", "", None, "Use the given rewrite rules file."],
] + CarbonCacheOptions.optParameters
def postOptions(self):
CarbonCacheOptions.postOptions(self)
if self["rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['AGGREGATION_RULES'])
settings["aggregation-rules"] = self["rules"]
if self["rewrite-rules"] is None:
self["rewrite-rules"] = join(settings["CONF_DIR"],
settings['REWRITE_RULES'])
settings["rewrite-rules"] = self["rewrite-rules"]
class CarbonRelayOptions(CarbonCacheOptions):
optParameters = [
["rules", "", None, "Use the given relay rules file."],
["aggregation-rules", "", None, "Use the given aggregation rules file."],
] + CarbonCacheOptions.optParameters
def postOptions(self):
CarbonCacheOptions.postOptions(self)
if self["rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['RELAY_RULES'])
settings["relay-rules"] = self["rules"]
if self["aggregation-rules"] is None:
self["rules"] = join(settings["CONF_DIR"], settings['AGGREGATION_RULES'])
settings["aggregation-rules"] = self["rules"]
if settings["RELAY_METHOD"] not in ("rules", "consistent-hashing", "aggregated-consistent-hashing"):
print ("In carbon.conf, RELAY_METHOD must be either 'rules' or "
"'consistent-hashing' or 'aggregated-consistent-hashing'. Invalid value: '%s'" %
settings.RELAY_METHOD)
sys.exit(1)
def get_default_parser(usage="%prog [options] <start|stop|status>"):
"""Create a parser for command line options."""
parser = OptionParser(usage=usage)
parser.add_option(
"--debug", action="store_true",
help="Run in the foreground, log to stdout")
parser.add_option(
"--nodaemon", action="store_true",
help="Run in the foreground")
parser.add_option(
"--profile",
help="Record performance profile data to the given file")
parser.add_option(
"--pidfile", default=None,
help="Write pid to the given file")
parser.add_option(
"--umask", default=None,
help="Use the given umask when creating files")
parser.add_option(
"--config",
default=None,
help="Use the given config file")
parser.add_option(
"--whitelist",
default=None,
help="Use the given whitelist file")
parser.add_option(
"--blacklist",
default=None,
help="Use the given blacklist file")
parser.add_option(
"--logdir",
default=None,
help="Write logs in the given directory")
parser.add_option(
"--instance",
default='a',
help="Manage a specific carbon instance")
return parser
def get_parser(name):
parser = get_default_parser()
if name == "carbon-aggregator":
parser.add_option(
"--rules",
default=None,
help="Use the given aggregation rules file.")
parser.add_option(
"--rewrite-rules",
default=None,
help="Use the given rewrite rules file.")
elif name == "carbon-relay":
parser.add_option(
"--rules",
default=None,
help="Use the given relay rules file.")
return parser
def parse_options(parser, args):
"""
Parse command line options and print usage message if no arguments were
provided for the command.
"""
(options, args) = parser.parse_args(args)
if not args:
parser.print_usage()
raise SystemExit(1)
if args[0] not in ("start", "stop", "status"):
parser.print_usage()
raise SystemExit(1)
return options, args
def read_config(program, options, **kwargs):
"""
Read settings for 'program' from configuration file specified by
'options["config"]', with missing values provided by 'defaults'.
"""
settings = Settings()
settings.update(defaults)
# Initialize default values if not set yet.
for name, value in kwargs.items():
settings.setdefault(name, value)
graphite_root = kwargs.get("ROOT_DIR")
if graphite_root is None:
graphite_root = os.environ.get('GRAPHITE_ROOT')
if graphite_root is None:
raise CarbonConfigException("Either ROOT_DIR or GRAPHITE_ROOT "
"needs to be provided.")
# Default config directory to root-relative, unless overriden by the
# 'GRAPHITE_CONF_DIR' environment variable.
settings.setdefault("CONF_DIR",
os.environ.get("GRAPHITE_CONF_DIR",
join(graphite_root, "conf")))
if options["config"] is None:
options["config"] = join(settings["CONF_DIR"], "carbon.conf")
else:
# Set 'CONF_DIR' to the parent directory of the 'carbon.conf' config
# file.
settings["CONF_DIR"] = dirname(normpath(options["config"]))
# Storage directory can be overriden by the 'GRAPHITE_STORAGE_DIR'
# environment variable. It defaults to a path relative to GRAPHITE_ROOT
# for backwards compatibility though.
settings.setdefault("STORAGE_DIR",
os.environ.get("GRAPHITE_STORAGE_DIR",
join(graphite_root, "storage")))
# By default, everything is written to subdirectories of the storage dir.
settings.setdefault(
"PID_DIR", settings["STORAGE_DIR"])
settings.setdefault(
"LOG_DIR", join(settings["STORAGE_DIR"], "log", program))
settings.setdefault(
"LOCAL_DATA_DIR", join(settings["STORAGE_DIR"], "whisper"))
settings.setdefault(
"WHITELISTS_DIR", join(settings["STORAGE_DIR"], "lists"))
# Read configuration options from program-specific section.
section = program[len("carbon-"):]
config = options["config"]
if not exists(config):
raise CarbonConfigException("Error: missing required config %r" % config)
settings.readFrom(config, section)
settings.setdefault("instance", options["instance"])
# If a specific instance of the program is specified, augment the settings
# with the instance-specific settings and provide sane defaults for
# optional settings.
if options["instance"]:
settings.readFrom(config,
"%s:%s" % (section, options["instance"]))
settings["pidfile"] = (
options["pidfile"] or
join(settings["PID_DIR"], "%s-%s.pid" %
(program, options["instance"])))
settings["LOG_DIR"] = (options["logdir"] or
join(settings["LOG_DIR"],
"%s-%s" % (program ,options["instance"])))
else:
settings["pidfile"] = (
options["pidfile"] or
join(settings["PID_DIR"], '%s.pid' % program))
settings["LOG_DIR"] = (options["logdir"] or settings["LOG_DIR"])
return settings
| apache-2.0 |
einstein95/crunchy-xml-decoder | crunchy-xml-decoder/unidecode/x0fd.py | 252 | 3764 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'[?]', # 0x40
'[?]', # 0x41
'[?]', # 0x42
'[?]', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'[?]', # 0x47
'[?]', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'[?]', # 0x4b
'[?]', # 0x4c
'[?]', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'', # 0x74
'', # 0x75
'', # 0x76
'', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'', # 0x7d
'', # 0x7e
'', # 0x7f
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
beiko-lab/gengis | bin/Lib/test/test_tuple.py | 13 | 5569 | from test import test_support, seq_tests
import gc
class TupleTest(seq_tests.CommonTest):
type2test = tuple
def test_constructors(self):
super(TupleTest, self).test_constructors()
# calling built-in types without argument must return empty
self.assertEqual(tuple(), ())
t0_3 = (0, 1, 2, 3)
t0_3_bis = tuple(t0_3)
self.assertTrue(t0_3 is t0_3_bis)
self.assertEqual(tuple([]), ())
self.assertEqual(tuple([0, 1, 2, 3]), (0, 1, 2, 3))
self.assertEqual(tuple(''), ())
self.assertEqual(tuple('spam'), ('s', 'p', 'a', 'm'))
def test_truth(self):
super(TupleTest, self).test_truth()
self.assertTrue(not ())
self.assertTrue((42, ))
def test_len(self):
super(TupleTest, self).test_len()
self.assertEqual(len(()), 0)
self.assertEqual(len((0,)), 1)
self.assertEqual(len((0, 1, 2)), 3)
def test_iadd(self):
super(TupleTest, self).test_iadd()
u = (0, 1)
u2 = u
u += (2, 3)
self.assertTrue(u is not u2)
def test_imul(self):
super(TupleTest, self).test_imul()
u = (0, 1)
u2 = u
u *= 3
self.assertTrue(u is not u2)
def test_tupleresizebug(self):
# Check that a specific bug in _PyTuple_Resize() is squashed.
def f():
for i in range(1000):
yield i
self.assertEqual(list(tuple(f())), range(1000))
def test_hash(self):
# See SF bug 942952: Weakness in tuple hash
# The hash should:
# be non-commutative
# should spread-out closely spaced values
# should not exhibit cancellation in tuples like (x,(x,y))
# should be distinct from element hashes: hash(x)!=hash((x,))
# This test exercises those cases.
# For a pure random hash and N=50, the expected number of occupied
# buckets when tossing 252,600 balls into 2**32 buckets
# is 252,592.6, or about 7.4 expected collisions. The
# standard deviation is 2.73. On a box with 64-bit hash
# codes, no collisions are expected. Here we accept no
# more than 15 collisions. Any worse and the hash function
# is sorely suspect.
N=50
base = range(N)
xp = [(i, j) for i in base for j in base]
inps = base + [(i, j) for i in base for j in xp] + \
[(i, j) for i in xp for j in base] + xp + zip(base)
collisions = len(inps) - len(set(map(hash, inps)))
self.assertTrue(collisions <= 15)
def test_repr(self):
l0 = tuple()
l2 = (0, 1, 2)
a0 = self.type2test(l0)
a2 = self.type2test(l2)
self.assertEqual(str(a0), repr(l0))
self.assertEqual(str(a2), repr(l2))
self.assertEqual(repr(a0), "()")
self.assertEqual(repr(a2), "(0, 1, 2)")
def _not_tracked(self, t):
# Nested tuples can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
@test_support.cpython_only
def test_track_literals(self):
# Test GC-optimization of tuple literals
x, y, z = 1.5, "a", []
self._not_tracked(())
self._not_tracked((1,))
self._not_tracked((1, 2))
self._not_tracked((1, 2, "a"))
self._not_tracked((1, 2, (None, True, False, ()), int))
self._not_tracked((object(),))
self._not_tracked(((1, x), y, (2, 3)))
# Tuples with mutable elements are always tracked, even if those
# elements are not tracked right now.
self._tracked(([],))
self._tracked(([1],))
self._tracked(({},))
self._tracked((set(),))
self._tracked((x, y, z))
def check_track_dynamic(self, tp, always_track):
x, y, z = 1.5, "a", []
check = self._tracked if always_track else self._not_tracked
check(tp())
check(tp([]))
check(tp(set()))
check(tp([1, x, y]))
check(tp(obj for obj in [1, x, y]))
check(tp(set([1, x, y])))
check(tp(tuple([obj]) for obj in [1, x, y]))
check(tuple(tp([obj]) for obj in [1, x, y]))
self._tracked(tp([z]))
self._tracked(tp([[x, y]]))
self._tracked(tp([{x: y}]))
self._tracked(tp(obj for obj in [x, y, z]))
self._tracked(tp(tuple([obj]) for obj in [x, y, z]))
self._tracked(tuple(tp([obj]) for obj in [x, y, z]))
@test_support.cpython_only
def test_track_dynamic(self):
# Test GC-optimization of dynamically constructed tuples.
self.check_track_dynamic(tuple, False)
@test_support.cpython_only
def test_track_subtypes(self):
# Tuple subtypes must always be tracked
class MyTuple(tuple):
pass
self.check_track_dynamic(MyTuple, True)
@test_support.cpython_only
def test_bug7466(self):
# Trying to untrack an unfinished tuple could crash Python
self._not_tracked(tuple(gc.collect() for i in range(101)))
def test_main():
test_support.run_unittest(TupleTest)
if __name__=="__main__":
test_main()
| gpl-3.0 |
lcy-seso/models | globally_normalized_reader/model.py | 4 | 14339 | #!/usr/bin/env python
#coding=utf-8
import paddle.v2 as paddle
from paddle.v2.layer import parse_network
import basic_modules
from config import ModelConfig
__all__ = ["GNR"]
def build_pretrained_embedding(name, data_type, emb_dim, emb_drop=0.):
"""create word a embedding layer which loads pre-trained embeddings.
Arguments:
- name: The name of the data layer which accepts one-hot input.
- data_type: PaddlePaddle's data type for data layer.
- emb_dim: The path to the data files.
"""
return paddle.layer.embedding(
input=paddle.layer.data(
name=name, type=data_type),
size=emb_dim,
param_attr=paddle.attr.Param(
name="GloveVectors", is_static=True),
layer_attr=paddle.attr.ExtraLayerAttribute(drop_rate=emb_drop), )
def encode_question(input_embedding,
lstm_hidden_dim,
depth,
passage_indep_embedding_dim,
prefix=""):
"""build question encoding by using bidirectional LSTM.
Each question word is encoded by runing a stack of bidirectional LSTM over
word embedding in question, producing hidden states. The hidden states are
used to compute a passage-independent question embedding.
The final question encoding is constructed by concatenating the final
hidden states of the forward and backward LSTMs and the passage-independent
embedding.
Arguments:
- input_embedding: The question word embeddings.
- lstm_hidden_dim: The dimension of bi-directional LSTM.
- depth: The depth of stacked bi-directional LSTM.
- passage_indep_embedding_dim: The dimension of passage-independent
embedding.
- prefix: A string which will be appended to name of each layer
created in this function. Each layer in a network should
has a unique name. The prefix makes this fucntion can be
called multiple times.
"""
# stacked bi-directional LSTM to process question embeddings.
lstm_final, lstm_outs = basic_modules.stacked_bidirectional_lstm(
input_embedding, lstm_hidden_dim, depth, 0., prefix)
# compute passage-independent embeddings.
candidates = paddle.layer.fc(input=lstm_outs,
bias_attr=False,
size=passage_indep_embedding_dim,
act=paddle.activation.Linear())
weights = paddle.layer.fc(input=lstm_outs,
size=1,
bias_attr=False,
act=paddle.activation.SequenceSoftmax())
weighted_candidates = paddle.layer.scaling(input=candidates, weight=weights)
passage_indep_embedding = paddle.layer.pooling(
input=weighted_candidates, pooling_type=paddle.pooling.Sum())
return paddle.layer.concat(
input=[lstm_final, passage_indep_embedding]), lstm_outs
def question_aligned_passage_embedding(question_lstm_outs, document_embeddings,
passage_aligned_embedding_dim):
"""create question aligned passage embedding.
Arguments:
- question_lstm_outs: The dimension of output of LSTM that process
question word embedding.
- document_embeddings: The document embeddings.
- passage_aligned_embedding_dim: The dimension of passage aligned
embedding.
"""
def outer_sentence_step(document_embeddings, question_lstm_outs,
passage_aligned_embedding_dim):
"""step function for PaddlePaddle's recurrent_group.
In this function, the original input document_embeddings are scattered
from nested sequence into sequence by recurrent_group in PaddlePaddle.
The step function iterates over each sentence in the document.
Arguments:
- document_embeddings: The word embeddings of the document.
- question_lstm_outs: The dimension of output of LSTM that
process question word embedding.
- passage_aligned_embedding_dim: The dimension of passage aligned
embedding.
"""
def inner_word_step(word_embedding, question_lstm_outs,
question_outs_proj, passage_aligned_embedding_dim):
"""
In this recurrent_group, sentence embedding has been scattered into
word embeddings. The step function iterates over each word in one
sentence in the document.
Arguments:
- word_embedding: The word embeddings of documents.
- question_lstm_outs: The dimension of output of LSTM that
process question word embedding.
- question_outs_proj: The projection of question_lstm_outs
into a new hidden space.
- passage_aligned_embedding_dim: The dimension of passage
aligned embedding.
"""
doc_word_expand = paddle.layer.expand(
input=word_embedding,
expand_as=question_lstm_outs,
expand_level=paddle.layer.ExpandLevel.FROM_NO_SEQUENCE)
weights = paddle.layer.fc(
input=[question_lstm_outs, doc_word_expand],
size=1,
bias_attr=False,
act=paddle.activation.SequenceSoftmax())
weighted_candidates = paddle.layer.scaling(
input=question_outs_proj, weight=weights)
return paddle.layer.pooling(
input=weighted_candidates, pooling_type=paddle.pooling.Sum())
question_outs_proj = paddle.layer.fc(input=question_lstm_outs,
bias_attr=False,
size=passage_aligned_embedding_dim)
return paddle.layer.recurrent_group(
input=[
paddle.layer.SubsequenceInput(document_embeddings),
paddle.layer.StaticInput(question_lstm_outs),
paddle.layer.StaticInput(question_outs_proj),
passage_aligned_embedding_dim,
],
step=inner_word_step,
name="iter_over_word")
return paddle.layer.recurrent_group(
input=[
paddle.layer.SubsequenceInput(document_embeddings),
paddle.layer.StaticInput(question_lstm_outs),
passage_aligned_embedding_dim
],
step=outer_sentence_step,
name="iter_over_sen")
def encode_documents(input_embedding, same_as_question, question_vector,
question_lstm_outs, passage_indep_embedding_dim, prefix):
"""Build the final question-aware document embeddings.
Each word in the document is represented as concatenation of its word
vector, the question vector, boolean features indicating if a word appers
in the question or is repeated, and a question aligned embedding.
Arguments:
- input_embedding: The word embeddings of the document.
- same_as_question: The boolean features indicating if a word appears
in the question or is repeated.
- question_lstm_outs: The final question encoding.
- passage_indep_embedding_dim: The dimension of passage independent
embedding.
- prefix: The prefix which will be appended to name of each layer in
This function.
"""
question_expanded = paddle.layer.expand(
input=question_vector,
expand_as=input_embedding,
expand_level=paddle.layer.ExpandLevel.FROM_NO_SEQUENCE)
question_aligned_embedding = question_aligned_passage_embedding(
question_lstm_outs, input_embedding, passage_indep_embedding_dim)
return paddle.layer.concat(input=[
input_embedding, question_expanded, same_as_question,
question_aligned_embedding
])
def search_answer(doc_lstm_outs, sentence_idx, start_idx, end_idx, config,
is_infer):
"""Search the answer from the document.
The search process for this layer begins with searching a target sequence
from a nested sequence by using paddle.layer.kmax_seq_score and
paddle.layer.sub_nested_seq_layer. In the first search step, top beam size
sequences with highest scores, indices of these top k sequences in the
original nested sequence, and the ground truth (also called gold)
altogether (a triple) make up of the first beam.
Then, start and end positions are searched. In these searches, top k
positions with highest scores are selected, and then sequence, starting
from the selected starts till ends of the sequences are taken to search
next by using paddle.layer.seq_slice.
Finally, the layer paddle.layer.cross_entropy_over_beam takes all the beam
expansions which contain several candidate targets found along the
three-step search. cross_entropy_over_beam calculates cross entropy over
the expanded beams which all the candidates in the beam as the normalized
factor.
Note that, if gold falls off the beam at search step t, then the cost is
calculated over the beam at step t.
Arguments:
- doc_lstm_outs: The output of LSTM that process each document words.
- sentence_idx: Ground-truth indicating sentence index of the answer
in the document.
- start_idx: Ground-truth indicating start span index of the answer
in the sentence.
- end_idx: Ground-truth indicating end span index of the answer
in the sentence.
- is_infer: The boolean parameter indicating inferring or training.
"""
last_state_of_sentence = paddle.layer.last_seq(
input=doc_lstm_outs, agg_level=paddle.layer.AggregateLevel.TO_SEQUENCE)
sentence_scores = paddle.layer.fc(input=last_state_of_sentence,
size=1,
bias_attr=False,
act=paddle.activation.Linear())
topk_sentence_ids = paddle.layer.kmax_seq_score(
input=sentence_scores, beam_size=config.beam_size)
topk_sen = paddle.layer.sub_nested_seq(
input=doc_lstm_outs, selected_indices=topk_sentence_ids)
# expand beam to search start positions on selected sentences
start_pos_scores = paddle.layer.fc(
input=topk_sen,
size=1,
layer_attr=paddle.attr.ExtraLayerAttribute(
error_clipping_threshold=5.0),
bias_attr=False,
act=paddle.activation.Linear())
topk_start_pos_ids = paddle.layer.kmax_seq_score(
input=start_pos_scores, beam_size=config.beam_size)
topk_start_spans = paddle.layer.seq_slice(
input=topk_sen, starts=topk_start_pos_ids, ends=None)
# expand beam to search end positions on selected start spans
_, end_span_embedding = basic_modules.stacked_bidirectional_lstm(
topk_start_spans, config.lstm_hidden_dim, config.lstm_depth,
config.lstm_hidden_droprate, "__end_span_embeddings__")
end_pos_scores = paddle.layer.fc(input=end_span_embedding,
size=1,
bias_attr=False,
act=paddle.activation.Linear())
topk_end_pos_ids = paddle.layer.kmax_seq_score(
input=end_pos_scores, beam_size=config.beam_size)
if is_infer:
return [
sentence_scores, topk_sentence_ids, start_pos_scores,
topk_start_pos_ids, end_pos_scores, topk_end_pos_ids
]
else:
return paddle.layer.cross_entropy_over_beam(input=[
paddle.layer.BeamInput(sentence_scores, topk_sentence_ids,
sentence_idx),
paddle.layer.BeamInput(start_pos_scores, topk_start_pos_ids,
start_idx),
paddle.layer.BeamInput(end_pos_scores, topk_end_pos_ids, end_idx)
])
def GNR(config, is_infer=False):
"""Build the globally normalized reader model.
Arguments:
- config: The model configuration.
- is_infer: The boolean parameter indicating inferring or training.
"""
# encode question words
question_embeddings = build_pretrained_embedding(
"question",
paddle.data_type.integer_value_sequence(config.vocab_size),
config.embedding_dim, config.embedding_droprate)
question_vector, question_lstm_outs = encode_question(
question_embeddings, config.lstm_hidden_dim, config.lstm_depth,
config.passage_indep_embedding_dim, "__ques")
# encode document words
document_embeddings = build_pretrained_embedding(
"documents",
paddle.data_type.integer_value_sub_sequence(config.vocab_size),
config.embedding_dim, config.embedding_droprate)
same_as_question = paddle.layer.data(
name="same_as_question",
type=paddle.data_type.dense_vector_sub_sequence(1))
document_words_ecoding = encode_documents(
document_embeddings, same_as_question, question_vector,
question_lstm_outs, config.passage_indep_embedding_dim, "__doc")
doc_lstm_outs = basic_modules.stacked_bidirectional_lstm_by_nested_seq(
document_words_ecoding, config.lstm_depth, config.lstm_hidden_dim,
"__doc_lstm")
# search the answer.
sentence_idx = paddle.layer.data(
name="sen_idx", type=paddle.data_type.integer_value(1))
start_idx = paddle.layer.data(
name="start_idx", type=paddle.data_type.integer_value(1))
end_idx = paddle.layer.data(
name="end_idx", type=paddle.data_type.integer_value(1))
return search_answer(doc_lstm_outs, sentence_idx, start_idx, end_idx,
config, is_infer)
if __name__ == "__main__":
print(parse_network(GNR(ModelConfig)))
| apache-2.0 |
lostdj/Jaklin-OpenJFX | modules/web/src/main/native/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py | 2 | 9887 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import random
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.bot import irc_command
from webkitpy.tool.bot.queueengine import TerminateQueue
from webkitpy.tool.bot.sheriff import Sheriff
from webkitpy.tool.bot.ircbot import IRCBot
from webkitpy.tool.bot.ircbot import Eliza
from webkitpy.tool.bot.sheriff_unittest import MockSheriffBot
from webkitpy.tool.mocktool import MockTool
def run(message):
tool = MockTool()
tool.ensure_irc_connected(None)
bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
bot._message_queue.post(["mock_nick", message])
bot.process_pending_messages()
class IRCBotTest(unittest.TestCase):
def test_eliza(self):
eliza = Eliza()
eliza.execute("tom", "hi", None, None)
eliza.execute("tom", "bye", None, None)
def test_parse_command_and_args(self):
tool = MockTool()
bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
self.assertEqual(bot._parse_command_and_args(""), (Eliza, [""]))
self.assertEqual(bot._parse_command_and_args(" "), (Eliza, [""]))
self.assertEqual(bot._parse_command_and_args(" hi "), (irc_command.Hi, []))
self.assertEqual(bot._parse_command_and_args(" hi there "), (irc_command.Hi, ["there"]))
def test_exception_during_command(self):
tool = MockTool()
tool.ensure_irc_connected(None)
bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
class CommandWithException(object):
def execute(self, nick, args, tool, sheriff):
raise Exception("mock_exception")
bot._parse_command_and_args = lambda request: (CommandWithException, [])
expected_logs = 'MOCK: irc.post: Exception executing command: mock_exception\n'
OutputCapture().assert_outputs(self, bot.process_message, args=["mock_nick", "ignored message"], expected_logs=expected_logs)
class CommandWithException(object):
def execute(self, nick, args, tool, sheriff):
raise KeyboardInterrupt()
bot._parse_command_and_args = lambda request: (CommandWithException, [])
# KeyboardInterrupt and SystemExit are not subclasses of Exception and thus correctly will not be caught.
OutputCapture().assert_outputs(self, bot.process_message, args=["mock_nick", "ignored message"], expected_exception=KeyboardInterrupt)
def test_hi(self):
random.seed(23324)
expected_logs = 'MOCK: irc.post: "Only you can prevent forest fires." -- Smokey the Bear\n'
OutputCapture().assert_outputs(self, run, args=["hi"], expected_logs=expected_logs)
def test_help(self):
expected_logs = 'MOCK: irc.post: mock_nick: Available commands: create-bug, help, hi, ping, restart, rollout, whois, yt?\nMOCK: irc.post: mock_nick: Type "mock-sheriff-bot: help COMMAND" for help on my individual commands.\n'
OutputCapture().assert_outputs(self, run, args=["help"], expected_logs=expected_logs)
expected_logs = 'MOCK: irc.post: mock_nick: Usage: hi\nMOCK: irc.post: mock_nick: Responds with hi.\nMOCK: irc.post: mock_nick: Aliases: hello\n'
OutputCapture().assert_outputs(self, run, args=["help hi"], expected_logs=expected_logs)
OutputCapture().assert_outputs(self, run, args=["help hello"], expected_logs=expected_logs)
def test_restart(self):
expected_logs = "MOCK: irc.post: Restarting...\n"
OutputCapture().assert_outputs(self, run, args=["restart"], expected_logs=expected_logs, expected_exception=TerminateQueue)
def test_rollout(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["rollout 21654 This patch broke the world"], expected_logs=expected_logs)
def test_revert(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["revert 21654 This patch broke the world"], expected_logs=expected_logs)
def test_multi_rollout(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["rollout 21654 21655 21656 This 21654 patch broke the world"], expected_logs=expected_logs)
def test_rollout_with_r_in_svn_revision(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["rollout r21654 This patch broke the world"], expected_logs=expected_logs)
def test_multi_rollout_with_r_in_svn_revision(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["rollout r21654 21655 r21656 This r21654 patch broke the world"], expected_logs=expected_logs)
def test_rollout_bananas(self):
expected_logs = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
OutputCapture().assert_outputs(self, run, args=["rollout bananas"], expected_logs=expected_logs)
def test_rollout_invalidate_revision(self):
# When folks pass junk arguments, we should just spit the usage back at them.
expected_logs = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
OutputCapture().assert_outputs(self, run,
args=["rollout --component=Tools 21654"],
expected_logs=expected_logs)
def test_rollout_invalidate_reason(self):
# FIXME: I'm slightly confused as to why this doesn't return the USAGE message.
expected_logs = """MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...
MOCK: irc.post: mock_nick, abarth, darin, eseidel: Failed to create rollout patch:
MOCK: irc.post: The rollout reason may not begin with - (\"-bad (Requested by mock_nick on #webkit).\").
"""
OutputCapture().assert_outputs(self, run,
args=["rollout 21654 -bad"],
expected_logs=expected_logs)
def test_multi_rollout_invalidate_reason(self):
expected_logs = """MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...
MOCK: irc.post: mock_nick, abarth, darin, eseidel: Failed to create rollout patch:
MOCK: irc.post: The rollout reason may not begin with - (\"-bad (Requested by mock_nick on #webkit).\").
"""
OutputCapture().assert_outputs(self, run,
args=["rollout "
"21654 21655 r21656 -bad"],
expected_logs=expected_logs)
def test_rollout_no_reason(self):
expected_logs = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
OutputCapture().assert_outputs(self, run, args=["rollout 21654"], expected_logs=expected_logs)
def test_multi_rollout_no_reason(self):
expected_logs = "MOCK: irc.post: mock_nick: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON\n"
OutputCapture().assert_outputs(self, run, args=["rollout 21654 21655 r21656"], expected_logs=expected_logs)
| gpl-2.0 |
bud4/samba | third_party/waf/wafadmin/Runner.py | 32 | 5555 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2008 (ita)
"Execute the tasks"
import os, sys, random, time, threading, traceback
try: from Queue import Queue
except ImportError: from queue import Queue
import Build, Utils, Logs, Options
from Logs import debug, error
from Constants import *
GAP = 15
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
def process_task(tsk):
m = tsk.master
if m.stop:
m.out.put(tsk)
return
try:
tsk.generator.bld.printout(tsk.display())
if tsk.__class__.stat: ret = tsk.__class__.stat(tsk)
# actual call to task's run() function
else: ret = tsk.call_run()
except Exception, e:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
# TODO cleanup
m.error_handler(tsk)
m.out.put(tsk)
return
if ret:
tsk.err_code = ret
tsk.hasrun = CRASHED
else:
try:
tsk.post_run()
except Utils.WafError:
pass
except Exception:
tsk.err_msg = Utils.ex_stack()
tsk.hasrun = EXCEPTION
else:
tsk.hasrun = SUCCESS
if tsk.hasrun != SUCCESS:
m.error_handler(tsk)
m.out.put(tsk)
class TaskConsumer(threading.Thread):
ready = Queue(0)
consumers = []
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(1)
self.start()
def run(self):
try:
self.loop()
except:
pass
def loop(self):
while 1:
tsk = TaskConsumer.ready.get()
process_task(tsk)
class Parallel(object):
"""
keep the consumer threads busy, and avoid consuming cpu cycles
when no more tasks can be added (end of the build, etc)
"""
def __init__(self, bld, j=2):
# number of consumers
self.numjobs = j
self.manager = bld.task_manager
self.manager.current_group = 0
self.total = self.manager.total()
# tasks waiting to be processed - IMPORTANT
self.outstanding = []
self.maxjobs = MAXJOBS
# tasks that are awaiting for another task to complete
self.frozen = []
# tasks returned by the consumers
self.out = Queue(0)
self.count = 0 # tasks not in the producer area
self.processed = 1 # progress indicator
self.stop = False # error condition to stop the build
self.error = False # error flag
def get_next(self):
"override this method to schedule the tasks in a particular order"
if not self.outstanding:
return None
return self.outstanding.pop(0)
def postpone(self, tsk):
"override this method to schedule the tasks in a particular order"
# TODO consider using a deque instead
if random.randint(0, 1):
self.frozen.insert(0, tsk)
else:
self.frozen.append(tsk)
def refill_task_list(self):
"called to set the next group of tasks"
while self.count > self.numjobs + GAP or self.count >= self.maxjobs:
self.get_out()
while not self.outstanding:
if self.count:
self.get_out()
if self.frozen:
self.outstanding += self.frozen
self.frozen = []
elif not self.count:
(jobs, tmp) = self.manager.get_next_set()
if jobs != None: self.maxjobs = jobs
if tmp: self.outstanding += tmp
break
def get_out(self):
"the tasks that are put to execute are all collected using get_out"
ret = self.out.get()
self.manager.add_finished(ret)
if not self.stop and getattr(ret, 'more_tasks', None):
self.outstanding += ret.more_tasks
self.total += len(ret.more_tasks)
self.count -= 1
def error_handler(self, tsk):
"by default, errors make the build stop (not thread safe so be careful)"
if not Options.options.keep:
self.stop = True
self.error = True
def start(self):
"execute the tasks"
if TaskConsumer.consumers:
# the worker pool is usually loaded lazily (see below)
# in case it is re-used with a different value of numjobs:
while len(TaskConsumer.consumers) < self.numjobs:
TaskConsumer.consumers.append(TaskConsumer())
while not self.stop:
self.refill_task_list()
# consider the next task
tsk = self.get_next()
if not tsk:
if self.count:
# tasks may add new ones after they are run
continue
else:
# no tasks to run, no tasks running, time to exit
break
if tsk.hasrun:
# if the task is marked as "run", just skip it
self.processed += 1
self.manager.add_finished(tsk)
continue
try:
st = tsk.runnable_status()
except Exception, e:
self.processed += 1
if self.stop and not Options.options.keep:
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
continue
self.error_handler(tsk)
self.manager.add_finished(tsk)
tsk.hasrun = EXCEPTION
tsk.err_msg = Utils.ex_stack()
continue
if st == ASK_LATER:
self.postpone(tsk)
elif st == SKIP_ME:
self.processed += 1
tsk.hasrun = SKIPPED
self.manager.add_finished(tsk)
else:
# run me: put the task in ready queue
tsk.position = (self.processed, self.total)
self.count += 1
tsk.master = self
self.processed += 1
if self.numjobs == 1:
process_task(tsk)
else:
TaskConsumer.ready.put(tsk)
# create the consumer threads only if there is something to consume
if not TaskConsumer.consumers:
TaskConsumer.consumers = [TaskConsumer() for i in xrange(self.numjobs)]
# self.count represents the tasks that have been made available to the consumer threads
# collect all the tasks after an error else the message may be incomplete
while self.error and self.count:
self.get_out()
#print loop
assert (self.count == 0 or self.stop)
| gpl-3.0 |
erinspace/osf.io | api/wb/serializers.py | 1 | 2711 | from django.db import IntegrityError
from rest_framework import serializers as ser
from rest_framework import exceptions
from website.files import exceptions as file_exceptions
from api.base.serializers import IDField, ShowIfVersion
class DestinationSerializer(ser.Serializer):
parent = ser.CharField(write_only=True)
target = ser.CharField(write_only=True)
name = ser.CharField(write_only=True, allow_blank=True, allow_null=True)
node = ShowIfVersion(
ser.CharField(write_only=True),
min_version='2.0', max_version='2.7'
)
class WaterbutlerMetadataSerializer(ser.Serializer):
source = ser.CharField(write_only=True)
destination = DestinationSerializer(write_only=True)
id = IDField(source='_id', read_only=True)
kind = ser.CharField(read_only=True)
name = ser.CharField(read_only=True, help_text='Display name used in the general user interface')
created = ser.CharField(read_only=True)
modified = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
checkout = ser.SerializerMethodField(read_only=True)
version = ser.IntegerField(help_text='Latest file version', read_only=True, source='current_version_number')
downloads = ser.SerializerMethodField()
sha256 = ser.SerializerMethodField()
md5 = ser.SerializerMethodField()
size = ser.SerializerMethodField()
def get_checkout(self, obj):
return obj.checkout._id if obj.checkout else None
def get_downloads(self, obj):
return obj.get_download_count()
def get_sha256(self, obj):
return obj.versions.first().metadata.get('sha256', None) if obj.versions.exists() else None
def get_md5(self, obj):
return obj.versions.first().metadata.get('md5', None) if obj.versions.exists() else None
def get_size(self, obj):
if obj.versions.exists():
self.size = obj.versions.first().size
return self.size
return None
def create(self, validated_data):
source = validated_data.pop('source')
destination = validated_data.pop('destination')
name = validated_data.pop('name')
try:
return self.context['view'].perform_file_action(source, destination, name)
except IntegrityError:
raise exceptions.ValidationError('File already exists with this name.')
except file_exceptions.FileNodeCheckedOutError:
raise exceptions.ValidationError('Cannot move file as it is checked out.')
except file_exceptions.FileNodeIsPrimaryFile:
raise exceptions.ValidationError('Cannot move file as it is the primary file of preprint.')
class Meta:
type_ = 'file_metadata'
| apache-2.0 |
amghost/myblog | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/formatters/latex.py | 96 | 13931 | # -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
*New in Pygments 0.7.*
*New in Pygments 0.10:* the default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``). *New in Pygments 1.2.*
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``). *New in Pygments 1.2.*
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in self.cmd2def.items():
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(r'\begin{Verbatim}[commandchars=\\\{\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(',numbers=left' +
(start and ',firstnumber=%d' % start or '') +
(step and ',stepnumber=%d' % step or ''))
if self.mathescape or self.texcomments:
outfile.write(r',codes={\catcode`\$=3\catcode`\^=7\catcode`\_=8}')
if self.verboptions:
outfile.write(',' + self.verboptions)
outfile.write(']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in range(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
else:
value = escape_tex(value, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write('\\end{Verbatim}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
| mit |
ashvina/heron | heron/tools/tracker/src/python/handlers/runtimestatehandler.py | 4 | 4623 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' runtimestatehandler.py '''
import traceback
import tornado.gen
import tornado.web
from heron.common.src.python.utils.log import Log
from heron.proto import tmaster_pb2
from heron.tools.tracker.src.python.handlers import BaseHandler
# pylint: disable=attribute-defined-outside-init
class RuntimeStateHandler(BaseHandler):
"""
URL - /topologies/runtimestate
Parameters:
- cluster (required)
- environ (required)
- role - (optional) Role used to submit the topology.
- topology (required) name of the requested topology
The response JSON is a dictionary with all the
runtime information of a topology. Static properties
is availble from /topologies/metadata.
Example JSON response:
{
has_tmaster_location: true,
stmgrs_reg_summary: {
registered_stmgrs: [
"stmgr-1",
"stmgr-2"
],
absent_stmgrs: [ ]
},
has_scheduler_location: true,
has_physical_plan: true
}
"""
def initialize(self, tracker):
""" initialize """
self.tracker = tracker
# pylint: disable=dangerous-default-value, no-self-use, unused-argument
@tornado.gen.coroutine
def getStmgrsRegSummary(self, tmaster, callback=None):
"""
Get summary of stream managers registration summary
"""
if not tmaster or not tmaster.host or not tmaster.stats_port:
return
reg_request = tmaster_pb2.StmgrsRegistrationSummaryRequest()
request_str = reg_request.SerializeToString()
port = str(tmaster.stats_port)
host = tmaster.host
url = "http://{0}:{1}/stmgrsregistrationsummary".format(host, port)
request = tornado.httpclient.HTTPRequest(url,
body=request_str,
method='POST',
request_timeout=5)
Log.debug('Making HTTP call to fetch stmgrsregistrationsummary url: %s', url)
try:
client = tornado.httpclient.AsyncHTTPClient()
result = yield client.fetch(request)
Log.debug("HTTP call complete.")
except tornado.httpclient.HTTPError as e:
raise Exception(str(e))
# Check the response code - error if it is in 400s or 500s
responseCode = result.code
if responseCode >= 400:
message = "Error in getting exceptions from Tmaster, code: " + responseCode
Log.error(message)
raise tornado.gen.Return({
"message": message
})
# Parse the response from tmaster.
reg_response = tmaster_pb2.StmgrsRegistrationSummaryResponse()
reg_response.ParseFromString(result.body)
# Send response
ret = {}
for stmgr in reg_response.registered_stmgrs:
ret[stmgr] = True
for stmgr in reg_response.absent_stmgrs:
ret[stmgr] = False
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
topology_info = self.tracker.getTopologyInfo(topology_name, cluster, role, environ)
runtime_state = topology_info["runtime_state"]
runtime_state["topology_version"] = topology_info["metadata"]["release_version"]
topology = self.tracker.getTopologyByClusterRoleEnvironAndName(
cluster, role, environ, topology_name)
reg_summary = yield tornado.gen.Task(self.getStmgrsRegSummary, topology.tmaster)
for stmgr, reg in reg_summary.items():
runtime_state["stmgrs"].setdefault(stmgr, {})["is_registered"] = reg
self.write_success_response(runtime_state)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e)
| apache-2.0 |
JohnOmernik/pimeup | throne/throne.py | 1 | 11168 | #!/usr/bin/python
#Gateway
import time
import random
import sys
import cwiid
import json
import gevent
from collections import OrderedDict
import cStringIO
import alsaaudio
import wave
import requests
import os
import struct
import math
from dotstar import Adafruit_DotStar
import socket
WHATAMI = os.path.basename(__file__).replace(".py", "")
WHOAMI = socket.gethostname()
m = alsaaudio.Mixer('PCM')
current_volume = m.getvolume() # Get the current Volume
print("Cur Vol: %s " % current_volume)
m.setvolume(100) # Set the volume to 70%.
current_volume = m.getvolume() # Get the current Volume
print("Cur Vol: %s " % current_volume)
mesg = False
rpt_mode = 0
wiimote = None
connected = False
rumble = 0
numpixels = 264 # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = 23
clockpin = 24
fire_colors = [ "#001100", "#005500", "#00FF00", "#33FFFF", "#FFFFFF" ]
outtimes = {}
mydelays = [0.001]
#, 0.02, 0.03, 0.1, 0.15]
heat = []
heat = []
for x in range(numpixels):
heat.append(30)
COOLING = 15
num_colors = 100
my_colors = []
colors_dict = OrderedDict()
allcolors = []
fireplacestarttime = 0
soundstarttime = 0
curplay = 66
lasthb = 0
hbinterval = 30
fireplace = True
fireplacestart = False
soundstart = False
soundplaying = False
#Setting color to: 0xFF0000 # Green
#Setting color to: 0xCC00CC # Bright Teal
#Setting color to: 0x66CC00 # Orange
#Setting color to: 0x33FFFF # Magenta
#Setting color to: 0xFF00 # Red
#Setting color to: 0x330099 # Lightish Blue
#Setting color to: 0xFFFF00 # YEllow
#Setting color to: 0xFF # Bright Blue
#Setting color to: 0xFF9900 # YEllower Gren
#Setting color to: 0x33 # Dark BLue
strip = Adafruit_DotStar(numpixels, datapin, clockpin)
strip.setBrightness(255)
strip.begin() # Initialize pins for output
def main():
global strip
global allcolors
global firecolors
logevent("startup", "startup", "Just started and ready to run")
for x in range(len(fire_colors)):
if x == len(fire_colors) -1:
pass
else:
print("Adding gradient for %s (%s) to %s (%s) with %s colors" % (fire_colors[x], hex_to_RGB(fire_colors[x]), fire_colors[x+1], hex_to_RGB(fire_colors[x+1]), num_colors))
gtmp = linear_gradient(fire_colors[x], fire_colors[x+1], num_colors)
my_colors.append(gtmp['hex'])
colors_dict[fire_colors[x] + "_2_" + fire_colors[x+1]] = gtmp['hex']
for x in colors_dict:
for y in colors_dict[x]:
# print("Color: %s" % hex_to_RGB(y))
allcolors.append(y)
#Connect to address given on command-line, if present
print 'Put Wiimote in discoverable mode now (press 1+2)...'
global wiimote
global rpt_mode
global connected
global rumble
print("Trying Connection")
print ("Press 1+2")
while not connected:
try:
wiimote = cwiid.Wiimote()
print("Connected!")
connected = True
rumble ^= 1
wiimote.rumble = rumble
time.sleep(2)
rumble ^= 1
wiimote.rumble = rumble
logevent("wii", "connect", "Wii remote just synced up")
except:
print("Trying Again, please press 1+2")
time.sleep(2)
wiimote.mesg_callback = callback
print("For LED we enable Button")
rpt_mode ^= cwiid.RPT_BTN
# Enable the messages in callback
wiimote.enable(cwiid.FLAG_MESG_IFC);
wiimote.rpt_mode = rpt_mode
gevent.joinall([
gevent.spawn(normal),
gevent.spawn(FirePlace),
gevent.spawn(playSound),
])
def logevent(etype, edata, edesc):
global WHOAMI
global WHATAMI
curtime = int(time.time())
curts = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(curtime))
outrec = OrderedDict()
outrec['ts'] = curts
outrec['host'] = WHOAMI
outrec['script'] = WHATAMI
outrec['event_type'] = etype
outrec['event_data'] = edata
outrec['event_desc'] = edesc
sendlog(outrec, False)
outrec = None
def normal():
global strip
global lasthb
global hbinterval
global soundstart
global curplay
global fireplacestart
global fireplacestarttime
global soundstarttime
global heat
global outtimes
global soundplaying
try:
while True:
curtime = int(time.time())
if curtime - lasthb > hbinterval:
logevent("heartbeat", wiimote.state['battery'], "wii HB")
lasthb = curtime
gevent.sleep(0.001)
except KeyboardInterrupt:
print("Exiting")
setAllLEDS(strip, [0x000000])
strip.setBrightness(0)
strip.show()
sys.exit()
def playSound():
global soundstart
global fireplacestart
global soundplaying
sounds = [0, 0, 0]
channels = 2
rate = 44100
size = 1024
out_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
out_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
out_stream.setchannels(channels)
out_stream.setrate(rate)
out_stream.setperiodsize(size)
soundreset = False
soundfiles = ['/home/pi/tool_mantra.wav']
memsound = {}
print("Loading Sound files to memory")
for sf in soundfiles:
f = open(sf, "rb")
sfdata = f.read()
f.close()
memsound[sf] = cStringIO.StringIO(sfdata)
while True:
if soundstart == True:
if soundreset == False:
curfile = random.choice(soundfiles)
memsound[curfile].seek(0)
soundreset = True
soundstart = False
soundplaying = True
fireplacestart = True
data = memsound[curfile].read(size)
while data:
out_stream.write(data)
data = memsound[curfile].read(size)
gevent.sleep(0.001)
soundreset = False
soundplaying = False
else:
soundplaying = False
gevent.sleep(0.001)
def FirePlace():
global numpixels
global COOLING
global strip
global allcolors
global heat
global fireplacestart
global fireplace
# Every cycle there will be some random cololing
# Consider adding a degree of random whether a pixel cools
try:
while True:
#If we see start then reset all to 255
if fireplacestart == True:
for i in range(numpixels):
heat[i] = 255
fireplacestart = False
if fireplace == True:
for i in range(numpixels):
if random.randint(0, 255) < COOLING:
tval = heat[i] - random.randint(0, ((COOLING * 10) / numpixels) + 2)
heat[i] = tval
gevent.sleep(random.choice(mydelays))
# This is supposed to be a diffusing effect I think
# k = numpixels -3
# while k > 2:
# if random.randint(0, 255) * 2 < COOLING:
# tval = (heat[k-1] + heat[ k- 2 ] + heat[ k- 2] ) / 3
# heat[k] = tval
# k = k - 1
# gevent.sleep(random.choice(mydelays))
# Now, actually set the pixels based on a scaled representation of all pixels
for j in range(numpixels):
if heat[j] > 255:
heat[j] = 255
if heat[j] < 0:
heat[j] = 0
newcolor = int((heat[j] * len(allcolors)) / 256)
strip.setPixelColor(j, int(allcolors[newcolor].replace("#", ''), 16))
gevent.sleep(random.choice(mydelays))
strip.show()
gevent.sleep(random.choice(mydelays))
else:
gevent.sleep(0.001)
except KeyboardInterrupt:
print("")
print("exiting and shutting down strip")
setAllLEDS(strip, [0x000000])
sys.exit(0)
def sendlog(log, debug):
logurl = "http://hauntcontrol:5050/hauntlogs"
try:
r = requests.post(logurl, json=log)
if debug:
print("Posted to %s status code %s" % (logurl, r.status_code))
print(json.dumps(log))
except:
if debug:
print("Post to %s failed timed out?" % logurl)
print(json.dumps(log))
def setAllLEDS(strip, colorlist):
for x in range(numpixels):
strip.setPixelColor(x, colorlist[0])
strip.show()
def rms(frame):
SHORT_NORMALIZE = (1.0/32768.0)
CHUNK = 1024
swidth = 2
count = len(frame)/swidth
format = "%dh"%(count)
shorts = struct.unpack( format, frame )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
rms = math.pow(sum_squares/count,0.5);
return rms * 10000
def color_dict(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [ int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return color_dict(RGB_list)
def handle_buttons(buttons):
global heat
global strip
global soundstart
global soundplaying
if (buttons & cwiid.BTN_A):
print("soundplaying in A: %s" % soundplaying)
if soundplaying == False:
soundstart = True
logevent("index_change", "reset", "Reset the index to start loop again")
gevent.sleep(0.001)
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def callback(mesg_list, time):
for mesg in mesg_list:
if mesg[0] == cwiid.MESG_BTN:
handle_buttons(mesg[1])
else:
print 'Unknown Report'
if __name__ == "__main__":
main()
| apache-2.0 |
andresriancho/w3af-webui | src/w3af_webui/migrations/0006_auto__add_field_scan_show_report_time.py | 1 | 10565 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Scan.show_report_time'
db.add_column(u'scans', 'show_report_time', self.gf('django.db.models.fields.DateTimeField')(null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Scan.show_report_time'
db.delete_column(u'scans', 'show_report_time')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'w3af_webui.profile': {
'Meta': {'object_name': 'Profile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang_ui': ('django.db.models.fields.CharField', [], {'default': "'ru'", 'max_length': '4'}),
'list_per_page': ('django.db.models.fields.PositiveIntegerField', [], {'default': '50'}),
'notification': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'w3af_webui.profilestargets': {
'Meta': {'object_name': 'ProfilesTargets', 'db_table': "u'profiles_targets'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scan_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.ScanProfile']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.Target']", 'blank': 'True'})
},
'w3af_webui.profilestasks': {
'Meta': {'object_name': 'ProfilesTasks', 'db_table': "u'profiles_tasks'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scan_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.ScanProfile']"}),
'scan_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.ScanTask']", 'blank': 'True'})
},
'w3af_webui.scan': {
'Meta': {'object_name': 'Scan', 'db_table': "u'scans'"},
'data': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True'}),
'finish': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1991, 1, 1, 0, 0)', 'null': 'True'}),
'pid': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'null': 'True'}),
'result_message': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'null': 'True'}),
'scan_task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.ScanTask']"}),
'show_report_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 26, 19, 7, 15, 663038)'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'w3af_webui.scanprofile': {
'Meta': {'object_name': 'ScanProfile', 'db_table': "u'scan_profiles'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '240'}),
'short_comment': ('django.db.models.fields.CharField', [], {'max_length': '240', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'w3af_profile': ('django.db.models.fields.TextField', [], {'default': "'\\n'", 'blank': 'True'})
},
'w3af_webui.scantask': {
'Meta': {'object_name': 'ScanTask', 'db_table': "u'scan_tasks'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'cron': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'repeat_at': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'repeat_each': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'repeat_each_day': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'repeat_each_weekday': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.Target']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'w3af_webui.target': {
'Meta': {'object_name': 'Target', 'db_table': "u'targets'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_scan': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '240'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'w3af_webui.vulnerability': {
'Meta': {'object_name': 'Vulnerability', 'db_table': "u'vulnerabilities'"},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'http_transaction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'scan': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.Scan']"}),
'severity': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True'}),
'vuln_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['w3af_webui.VulnerabilityType']", 'null': 'True'})
},
'w3af_webui.vulnerabilitytype': {
'Meta': {'object_name': 'VulnerabilityType', 'db_table': "u'vulnerability_types'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'})
}
}
complete_apps = ['w3af_webui']
| gpl-2.0 |
Microsoft/ChakraCore | test/native-tests/test-python/helloWorld.py | 3 | 2388 | #-------------------------------------------------------------------------------------------------------
# Copyright (C) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
#-------------------------------------------------------------------------------------------------------
import sys
import os.path
from ctypes import *
if sys.platform == "darwin":
platform = "dylib"
else:
platform = "so"
build_type = sys.argv[1]
if len(sys.argv) > 2 and sys.argv[2] != None:
so_path = sys.argv[2]
else:
so_path = "../../../out/" + build_type + "/libChakraCore." + platform
if os.path.isfile(so_path):
chakraCore = CDLL(so_path)
else:
print platform + " file not found. It must be a static library"
sys.exit(0) # static build
script = create_string_buffer("(()=>{return \'Hello world!\';})()")
fileName = "sample.js"
runtime = c_void_p()
# Create Javascript Runtime.
chakraCore.JsCreateRuntime(0, 0, byref(runtime));
context = c_void_p()
# Create an execution context.
chakraCore.JsCreateContext(runtime, byref(context));
# Now set the current execution context.
chakraCore.JsSetCurrentContext(context);
fname = c_void_p();
# create JsValueRef from filename
chakraCore.JsCreateString(fileName, len(fileName), byref(fname));
scriptSource = c_void_p();
# Create ArrayBuffer from script source
chakraCore.JsCreateExternalArrayBuffer(script, len(script), 0, 0, byref(scriptSource));
jsResult = c_void_p();
# Run the script.
chakraCore.JsRun(scriptSource, 0, fname, 0, byref(jsResult));
# Convert script result to String in JavaScript; redundant if script returns a String
resultJSString = c_void_p()
chakraCore.JsConvertValueToString(jsResult, byref(resultJSString));
stringLength = c_size_t();
# Get buffer size needed for the result string
chakraCore.JsCopyString(resultJSString, 0, 0,byref(stringLength));
resultSTR = create_string_buffer(stringLength.value + 1); # buffer is big enough to store the result
# Get String from JsValueRef
chakraCore.JsCopyString(resultJSString, byref(resultSTR), stringLength.value, 0);
# Set `null-ending` to the end
resultSTRLastByte = (c_char * stringLength.value).from_address(addressof(resultSTR))
resultSTRLastByte = '\0';
print("Result from ChakraCore: ", resultSTR.value);
# Dispose runtime
chakraCore.JsDisposeRuntime(runtime);
| mit |
edxnercel/edx-platform | common/lib/xmodule/xmodule/exceptions.py | 171 | 1339 | class InvalidDefinitionError(Exception):
pass
class NotFoundError(Exception):
pass
class ProcessingError(Exception):
'''
An error occurred while processing a request to the XModule.
For example: if an exception occurs while checking a capa problem.
'''
pass
class InvalidVersionError(Exception):
"""
Tried to save an item with a location that a store cannot support (e.g., draft version
for a non-leaf node)
"""
def __init__(self, location):
super(InvalidVersionError, self).__init__()
self.location = location
class SerializationError(Exception):
"""
Thrown when a module cannot be exported to XML
"""
def __init__(self, location, msg):
super(SerializationError, self).__init__(msg)
self.location = location
class UndefinedContext(Exception):
"""
Tried to access an xmodule field which needs a different context (runtime) to have a value.
"""
pass
class HeartbeatFailure(Exception):
"""
Raised when heartbeat fails.
"""
def __unicode__(self, *args, **kwargs):
return self.message
def __init__(self, msg, service):
"""
In addition to a msg, provide the name of the service.
"""
self.service = service
super(HeartbeatFailure, self).__init__(msg)
| agpl-3.0 |
yograterol/django | tests/template_tests/filter_tests/test_length_is.py | 360 | 3204 | from django.template.defaultfilters import length_is
from django.test import SimpleTestCase
from ..utils import setup
class LengthIsTests(SimpleTestCase):
@setup({'length_is01': '{% if some_list|length_is:"4" %}Four{% endif %}'})
def test_length_is01(self):
output = self.engine.render_to_string('length_is01', {'some_list': ['4', None, True, {}]})
self.assertEqual(output, 'Four')
@setup({'length_is02': '{% if some_list|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is02(self):
output = self.engine.render_to_string('length_is02', {'some_list': ['4', None, True, {}, 17]})
self.assertEqual(output, 'Not Four')
@setup({'length_is03': '{% if mystring|length_is:"4" %}Four{% endif %}'})
def test_length_is03(self):
output = self.engine.render_to_string('length_is03', {'mystring': 'word'})
self.assertEqual(output, 'Four')
@setup({'length_is04': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is04(self):
output = self.engine.render_to_string('length_is04', {'mystring': 'Python'})
self.assertEqual(output, 'Not Four')
@setup({'length_is05': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is05(self):
output = self.engine.render_to_string('length_is05', {'mystring': ''})
self.assertEqual(output, 'Not Four')
@setup({'length_is06': '{% with var|length as my_length %}{{ my_length }}{% endwith %}'})
def test_length_is06(self):
output = self.engine.render_to_string('length_is06', {'var': 'django'})
self.assertEqual(output, '6')
# Boolean return value from length_is should not be coerced to a string
@setup({'length_is07': '{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}'})
def test_length_is07(self):
output = self.engine.render_to_string('length_is07', {})
self.assertEqual(output, 'Length not 0')
@setup({'length_is08': '{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}'})
def test_length_is08(self):
output = self.engine.render_to_string('length_is08', {})
self.assertEqual(output, 'Length is 1')
# Invalid uses that should fail silently.
@setup({'length_is09': '{{ var|length_is:"fish" }}'})
def test_length_is09(self):
output = self.engine.render_to_string('length_is09', {'var': 'django'})
self.assertEqual(output, '')
@setup({'length_is10': '{{ int|length_is:"1" }}'})
def test_length_is10(self):
output = self.engine.render_to_string('length_is10', {'int': 7})
self.assertEqual(output, '')
@setup({'length_is11': '{{ none|length_is:"1" }}'})
def test_length_is11(self):
output = self.engine.render_to_string('length_is11', {'none': None})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_empty_list(self):
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
def test_string(self):
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is('a', 10), False)
| bsd-3-clause |
MebiusHKU/flask-web | flask/lib/python2.7/site-packages/sqlalchemy/sql/annotation.py | 60 | 6136 | # sql/annotation.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The :class:`.Annotated` class and related routines; creates hash-equivalent
copies of SQL constructs which contain context-specific markers and
associations.
"""
from .. import util
from . import operators
class Annotated(object):
"""clones a ClauseElement and applies an 'annotations' dictionary.
Unlike regular clones, this clone also mimics __hash__() and
__cmp__() of the original element so that it takes its place
in hashed collections.
A reference to the original element is maintained, for the important
reason of keeping its hash value current. When GC'ed, the
hash value may be reused, causing conflicts.
"""
def __new__(cls, *args):
if not args:
# clone constructor
return object.__new__(cls)
else:
element, values = args
# pull appropriate subclass from registry of annotated
# classes
try:
cls = annotated_classes[element.__class__]
except KeyError:
cls = _new_annotation_type(element.__class__, cls)
return object.__new__(cls)
def __init__(self, element, values):
self.__dict__ = element.__dict__.copy()
self.__element = element
self._annotations = values
self._hash = hash(element)
def _annotate(self, values):
_values = self._annotations.copy()
_values.update(values)
return self._with_annotations(_values)
def _with_annotations(self, values):
clone = self.__class__.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone._annotations = values
return clone
def _deannotate(self, values=None, clone=True):
if values is None:
return self.__element
else:
_values = self._annotations.copy()
for v in values:
_values.pop(v, None)
return self._with_annotations(_values)
def _compiler_dispatch(self, visitor, **kw):
return self.__element.__class__._compiler_dispatch(
self, visitor, **kw)
@property
def _constructor(self):
return self.__element._constructor
def _clone(self):
clone = self.__element._clone()
if clone is self.__element:
# detect immutable, don't change anything
return self
else:
# update the clone with any changes that have occurred
# to this object's __dict__.
clone.__dict__.update(self.__dict__)
return self.__class__(clone, self._annotations)
def __hash__(self):
return self._hash
def __eq__(self, other):
if isinstance(self.__element, operators.ColumnOperators):
return self.__element.__class__.__eq__(self, other)
else:
return hash(other) == hash(self)
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
# so that the resulting objects are pickleable.
annotated_classes = {}
def _deep_annotate(element, annotations, exclude=None):
"""Deep copy the given ClauseElement, annotating each element
with the given annotations dictionary.
Elements within the exclude collection will be cloned but not annotated.
"""
def clone(elem):
if exclude and \
hasattr(elem, 'proxy_set') and \
elem.proxy_set.intersection(exclude):
newelem = elem._clone()
elif annotations != elem._annotations:
newelem = elem._annotate(annotations)
else:
newelem = elem
newelem._copy_internals(clone=clone)
return newelem
if element is not None:
element = clone(element)
return element
def _deep_deannotate(element, values=None):
"""Deep copy the given element, removing annotations."""
cloned = util.column_dict()
def clone(elem):
# if a values dict is given,
# the elem must be cloned each time it appears,
# as there may be different annotations in source
# elements that are remaining. if totally
# removing all annotations, can assume the same
# slate...
if values or elem not in cloned:
newelem = elem._deannotate(values=values, clone=True)
newelem._copy_internals(clone=clone)
if not values:
cloned[elem] = newelem
return newelem
else:
return cloned[elem]
if element is not None:
element = clone(element)
return element
def _shallow_annotate(element, annotations):
"""Annotate the given ClauseElement and copy its internals so that
internal objects refer to the new annotated object.
Basically used to apply a "dont traverse" annotation to a
selectable, without digging throughout the whole
structure wasting time.
"""
element = element._annotate(annotations)
element._copy_internals()
return element
def _new_annotation_type(cls, base_cls):
if issubclass(cls, Annotated):
return cls
elif cls in annotated_classes:
return annotated_classes[cls]
for super_ in cls.__mro__:
# check if an Annotated subclass more specific than
# the given base_cls is already registered, such
# as AnnotatedColumnElement.
if super_ in annotated_classes:
base_cls = annotated_classes[super_]
break
annotated_classes[cls] = anno_cls = type(
"Annotated%s" % cls.__name__,
(base_cls, cls), {})
globals()["Annotated%s" % cls.__name__] = anno_cls
return anno_cls
def _prepare_annotations(target_hierarchy, base_cls):
stack = [target_hierarchy]
while stack:
cls = stack.pop()
stack.extend(cls.__subclasses__())
_new_annotation_type(cls, base_cls)
| bsd-3-clause |
jakevdp/networkx | examples/algorithms/davis_club.py | 44 | 1064 | #!/usr/bin/env python
"""
Davis Southern Club Women
Shows how to make unipartite projections of the graph and compute the
properties of those graphs.
These data were collected by Davis et al. in the 1930s.
They represent observed attendance at 14 social events by 18 Southern women.
The graph is bipartite (clubs, women).
"""
import networkx as nx
import networkx.algorithms.bipartite as bipartite
G = nx.davis_southern_women_graph()
women = G.graph['top']
clubs = G.graph['bottom']
print("Biadjacency matrix")
print(bipartite.biadjacency_matrix(G,women,clubs))
# project bipartite graph onto women nodes
W = bipartite.projected_graph(G, women)
print('')
print("#Friends, Member")
for w in women:
print('%d %s' % (W.degree(w),w))
# project bipartite graph onto women nodes keeping number of co-occurence
# the degree computed is weighted and counts the total number of shared contacts
W = bipartite.weighted_projected_graph(G, women)
print('')
print("#Friend meetings, Member")
for w in women:
print('%d %s' % (W.degree(w,weight='weight'),w))
| bsd-3-clause |
MadDogTechnology/kops | vendor/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
KenkoGeek/2book | tobook/tobook/settings.py | 1 | 3747 | """
Django settings for tobook project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qaa1vsyq9*z-d350cjb@k8&4()*3t)%6_bj-vz4=tq1hp=0hh3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'material.admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'material.theme.cyan',
'material',
'places',
'object2book',
'booking',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tobook.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tobook.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'to-book',
'USER': 'tobook',
'PASSWORD': '123456',
'HOST': '172.17.0.2',
'PORT': '',
}
}
# Email smtp configurations
"""Declare enviroment variables first to set this"""
EMAIL_HOST = os.environ.get('SMTP_HOST')
EMAIL_PORT = os.environ.get('SMTP_PORT')
EMAIL_HOST_USER = os.environ.get('SMTP_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('SMTP_HOST_PASSWD')
FROM_EMAIL = os.environ.get('SMTP_FROM_ADDR')
EMAIL_USE_TLS = True
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
PLACES_MAPS_API_KEY='AIzaSyAVDsYNHfwpeiumJO30Kghw0RjMGwMObT8'
MAP_WIDGET_HEIGHT=480
MAP_OPTIONS={}
MARKER_OPTIONS={}
| mit |
abhijeet9920/python_project | develop/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/euckrfreq.py | 3121 | 45978 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
| mit |
ekasitk/sahara | sahara/conductor/api.py | 1 | 17877 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles all requests to the conductor service."""
from oslo_config import cfg
from oslo_log import log as logging
from sahara.conductor import manager
from sahara.conductor import resource as r
conductor_opts = [
cfg.BoolOpt('use_local',
default=True,
help='Perform sahara-conductor operations locally.'),
]
conductor_group = cfg.OptGroup(name='conductor',
title='Conductor Options')
CONF = cfg.CONF
CONF.register_group(conductor_group)
CONF.register_opts(conductor_opts, conductor_group)
LOG = logging.getLogger(__name__)
def _get_id(obj):
"""Return object id.
Allows usage of both an object or an object's ID as a parameter when
dealing with relationships.
"""
try:
return obj.id
except AttributeError:
return obj
class LocalApi(object):
"""A local version of the conductor API.
It does database updates locally instead of via RPC.
"""
def __init__(self):
self._manager = manager.ConductorManager()
# Cluster ops
@r.wrap(r.ClusterResource)
def cluster_get(self, context, cluster, show_progress=False):
"""Return the cluster or None if it does not exist."""
return self._manager.cluster_get(
context, _get_id(cluster), show_progress)
@r.wrap(r.ClusterResource)
def cluster_get_all(self, context, **kwargs):
"""Get all clusters filtered by **kwargs.
e.g. cluster_get_all(plugin_name='vanilla', hadoop_version='1.1')
"""
return self._manager.cluster_get_all(context, **kwargs)
@r.wrap(r.ClusterResource)
def cluster_create(self, context, values):
"""Create a cluster from the values dictionary.
:returns: the created cluster.
"""
return self._manager.cluster_create(context, values)
@r.wrap(r.ClusterResource)
def cluster_update(self, context, cluster, values):
"""Update the cluster with the given values dictionary.
:returns: the updated cluster.
"""
return self._manager.cluster_update(context, _get_id(cluster),
values)
def cluster_destroy(self, context, cluster):
"""Destroy the cluster or raise if it does not exist.
:returns: None.
"""
self._manager.cluster_destroy(context, _get_id(cluster))
# Node Group ops
def node_group_add(self, context, cluster, values):
"""Create a node group from the values dictionary.
:returns: ID of the created node group.
"""
return self._manager.node_group_add(context, _get_id(cluster), values)
def node_group_update(self, context, node_group, values):
"""Update the node group with the given values dictionary.
:returns: None.
"""
self._manager.node_group_update(context, _get_id(node_group), values)
def node_group_remove(self, context, node_group):
"""Destroy the node group or raise if it does not exist.
:returns: None.
"""
self._manager.node_group_remove(context, _get_id(node_group))
# Instance ops
def instance_add(self, context, node_group, values):
"""Create an instance from the values dictionary.
:returns: ID of the created instance.
"""
return self._manager.instance_add(context, _get_id(node_group), values)
def instance_update(self, context, instance, values):
"""Update the instance with the given values dictionary.
:returns: None.
"""
self._manager.instance_update(context, _get_id(instance), values)
def instance_remove(self, context, instance):
"""Destroy the instance or raise if it does not exist.
:returns: None.
"""
self._manager.instance_remove(context, _get_id(instance))
# Volumes ops
def append_volume(self, context, instance, volume_id):
"""Append volume_id to instance."""
self._manager.append_volume(context, _get_id(instance), volume_id)
def remove_volume(self, context, instance, volume_id):
"""Remove volume_id in instance."""
self._manager.remove_volume(context, _get_id(instance), volume_id)
# Cluster Template ops
@r.wrap(r.ClusterTemplateResource)
def cluster_template_get(self, context, cluster_template):
"""Return the cluster template or None if it does not exist."""
return self._manager.cluster_template_get(context,
_get_id(cluster_template))
@r.wrap(r.ClusterTemplateResource)
def cluster_template_get_all(self, context, **kwargs):
"""Get all cluster templates filtered by **kwargs.
e.g. cluster_template_get_all(plugin_name='vanilla',
hadoop_version='1.1')
"""
return self._manager.cluster_template_get_all(context, **kwargs)
@r.wrap(r.ClusterTemplateResource)
def cluster_template_create(self, context, values):
"""Create a cluster template from the values dictionary.
:returns: the created cluster template
"""
return self._manager.cluster_template_create(context, values)
def cluster_template_destroy(self, context, cluster_template,
ignore_default=False):
"""Destroy the cluster template or raise if it does not exist.
:returns: None
"""
self._manager.cluster_template_destroy(context,
_get_id(cluster_template),
ignore_default)
@r.wrap(r.ClusterTemplateResource)
def cluster_template_update(self, context, id, cluster_template,
ignore_default=False):
"""Update the cluster template or raise if it does not exist.
:returns: the updated cluster template
"""
return self._manager.cluster_template_update(context,
id,
cluster_template,
ignore_default)
# Node Group Template ops
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_get(self, context, node_group_template):
"""Return the node group template or None if it does not exist."""
return self._manager.node_group_template_get(
context, _get_id(node_group_template))
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_get_all(self, context, **kwargs):
"""Get all node group templates filtered by **kwargs.
e.g. node_group_template_get_all(plugin_name='vanilla',
hadoop_version='1.1')
"""
return self._manager.node_group_template_get_all(context, **kwargs)
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_create(self, context, values):
"""Create a node group template from the values dictionary.
:returns: the created node group template
"""
return self._manager.node_group_template_create(context, values)
def node_group_template_destroy(self, context, node_group_template,
ignore_default=False):
"""Destroy the node group template or raise if it does not exist.
:returns: None
"""
self._manager.node_group_template_destroy(context,
_get_id(node_group_template),
ignore_default)
@r.wrap(r.NodeGroupTemplateResource)
def node_group_template_update(self, context, id, values,
ignore_default=False):
"""Update a node group template from the values dictionary.
:returns: the updated node group template
"""
return self._manager.node_group_template_update(context, id, values,
ignore_default)
# Data Source ops
@r.wrap(r.DataSource)
def data_source_get(self, context, data_source):
"""Return the Data Source or None if it does not exist."""
return self._manager.data_source_get(context, _get_id(data_source))
@r.wrap(r.DataSource)
def data_source_get_all(self, context, **kwargs):
"""Get all Data Sources filtered by **kwargs.
e.g. data_source_get_all(name='myfile', type='swift')
"""
return self._manager.data_source_get_all(context, **kwargs)
def data_source_count(self, context, **kwargs):
"""Count Data Sources filtered by **kwargs.
Uses sqlalchemy "in_" clause for any tuple values
Uses sqlalchemy "like" clause for any string values containing %
"""
return self._manager.data_source_count(context, **kwargs)
@r.wrap(r.DataSource)
def data_source_create(self, context, values):
"""Create a Data Source from the values dictionary."""
return self._manager.data_source_create(context, values)
def data_source_destroy(self, context, data_source):
"""Destroy the Data Source or raise if it does not exist."""
self._manager.data_source_destroy(context, _get_id(data_source))
@r.wrap(r.DataSource)
def data_source_update(self, context, id, values):
"""Update an existing Data Source"""
return self._manager.data_source_update(context, id, values)
# JobExecution ops
@r.wrap(r.JobExecution)
def job_execution_get(self, context, job_execution):
"""Return the JobExecution or None if it does not exist."""
return self._manager.job_execution_get(context,
_get_id(job_execution))
@r.wrap(r.JobExecution)
def job_execution_get_all(self, context, **kwargs):
"""Get all JobExecutions filtered by **kwargs.
kwargs key values may be the names of fields in a JobExecution
plus the following special values with the indicated meaning:
'cluster.name' -- name of the Cluster referenced by the JobExecution
'job.name' -- name of the Job referenced by the JobExecution
'status' -- JobExecution['info']['status']
e.g. job_execution_get_all(cluster_id=12, input_id=123)
job_execution_get_all(**{'cluster.name': 'test',
'job.name': 'wordcount'})
"""
return self._manager.job_execution_get_all(context, **kwargs)
def job_execution_count(self, context, **kwargs):
"""Count number of JobExecutions filtered by **kwargs.
e.g. job_execution_count(cluster_id=12, input_id=123)
"""
return self._manager.job_execution_count(context, **kwargs)
@r.wrap(r.JobExecution)
def job_execution_create(self, context, values):
"""Create a JobExecution from the values dictionary."""
return self._manager.job_execution_create(context, values)
@r.wrap(r.JobExecution)
def job_execution_update(self, context, job_execution, values):
"""Update the JobExecution or raise if it does not exist."""
return self._manager.job_execution_update(context,
_get_id(job_execution),
values)
def job_execution_destroy(self, context, job_execution):
"""Destroy the JobExecution or raise if it does not exist."""
self._manager.job_execution_destroy(context, _get_id(job_execution))
# Job ops
@r.wrap(r.Job)
def job_get(self, context, job):
"""Return the Job or None if it does not exist."""
return self._manager.job_get(context, _get_id(job))
@r.wrap(r.Job)
def job_get_all(self, context, **kwargs):
"""Get all Jobs filtered by **kwargs.
e.g. job_get_all(name='myjob', type='MapReduce')
"""
return self._manager.job_get_all(context, **kwargs)
@r.wrap(r.Job)
def job_create(self, context, values):
"""Create a Job from the values dictionary."""
return self._manager.job_create(context, values)
def job_update(self, context, job, values):
"""Update the Job or raise if it does not exist."""
return self._manager.job_update(context, _get_id(job),
values)
def job_destroy(self, context, job):
"""Destroy the Job or raise if it does not exist."""
self._manager.job_destroy(context, _get_id(job))
def job_main_name(self, context, job):
"""Return the name of the first main JobBinary or None.
At present the 'mains' element is expected to contain a single element.
In the future if 'mains' contains more than one element we will need
a scheme or convention for retrieving a name from the list of binaries.
:param job: This is expected to be a Job object
"""
if job.mains:
binary = self.job_binary_get(context, job.mains[0])
if binary is not None:
return binary["name"]
return None
def job_lib_names(self, context, job):
"""Return the name of all job lib binaries or an empty list.
:param job: This is expected to be a Job object
"""
lib_ids = job.libs or []
binaries = (self.job_binary_get(context, lib_id) for lib_id in lib_ids)
return [binary["name"] for binary in binaries if binary is not None]
# JobBinary ops
@r.wrap(r.JobBinary)
def job_binary_get_all(self, context, **kwargs):
"""Get all JobBinarys filtered by **kwargs.
e.g. job_binary_get_all(name='wordcount.jar')
"""
return self._manager.job_binary_get_all(context, **kwargs)
@r.wrap(r.JobBinary)
def job_binary_get(self, context, job_binary):
"""Return the JobBinary or None if it does not exist."""
return self._manager.job_binary_get(context, _get_id(job_binary))
@r.wrap(r.JobBinary)
def job_binary_create(self, context, values):
"""Create a JobBinary from the values dictionary."""
return self._manager.job_binary_create(context, values)
def job_binary_destroy(self, context, job_binary):
"""Destroy the JobBinary or raise if it does not exist."""
self._manager.job_binary_destroy(context, _get_id(job_binary))
@r.wrap(r.JobBinary)
def job_binary_update(self, context, id, values):
"""Update a JobBinary from the values dictionary."""
return self._manager.job_binary_update(context, id, values)
# JobBinaryInternal ops
@r.wrap(r.JobBinaryInternal)
def job_binary_internal_get_all(self, context, **kwargs):
"""Get all JobBinaryInternals filtered by **kwargs.
e.g. cluster_get_all(name='wordcount.jar')
"""
return self._manager.job_binary_internal_get_all(context, **kwargs)
@r.wrap(r.JobBinaryInternal)
def job_binary_internal_get(self, context, job_binary_internal):
"""Return the JobBinaryInternal or None if it does not exist."""
return self._manager.job_binary_internal_get(
context,
_get_id(job_binary_internal))
@r.wrap(r.JobBinaryInternal)
def job_binary_internal_create(self, context, values):
"""Create a JobBinaryInternal from the values dictionary."""
return self._manager.job_binary_internal_create(context, values)
def job_binary_internal_destroy(self, context, job_binary_internal_id):
"""Destroy the JobBinaryInternal or raise if it does not exist."""
self._manager.job_binary_internal_destroy(
context,
_get_id(job_binary_internal_id))
def job_binary_internal_get_raw_data(self, context,
job_binary_internal_id):
"""Return the binary data field from a JobBinaryInternal."""
return self._manager.job_binary_internal_get_raw_data(
context,
job_binary_internal_id)
# Events ops
def cluster_provision_step_add(self, context, cluster_id, values):
"""Create a provisioning step assigned to cluster from values dict."""
return self._manager.cluster_provision_step_add(
context, cluster_id, values)
def cluster_provision_step_update(self, context, provision_step):
"""Update the cluster provisioning step."""
return self._manager.cluster_provision_step_update(
context, provision_step)
def cluster_provision_progress_update(self, context, cluster_id):
"""Return cluster with provision progress updated field."""
return self._manager.cluster_provision_progress_update(
context, cluster_id)
def cluster_event_add(self, context, provision_step, values):
"""Assign new event to the specified provision step."""
return self._manager.cluster_event_add(
context, provision_step, values)
class RemoteApi(LocalApi):
"""Conductor API that does updates via RPC to the ConductorManager."""
# TODO(slukjanov): it should override _manager and only necessary functions
| apache-2.0 |
sdague/home-assistant | homeassistant/components/canary/config_flow.py | 9 | 3968 | """Config flow for Canary."""
import logging
from typing import Any, Dict, Optional
from canary.api import Api
from requests import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.config_entries import CONN_CLASS_CLOUD_POLL, ConfigFlow, OptionsFlow
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS, DEFAULT_TIMEOUT
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
def validate_input(hass: HomeAssistantType, data: dict) -> Dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
# constructor does login call
Api(
data[CONF_USERNAME],
data[CONF_PASSWORD],
data.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
return True
class CanaryConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Canary."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_CLOUD_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return CanaryOptionsFlowHandler(config_entry)
async def async_step_import(
self, user_input: Optional[ConfigType] = None
) -> Dict[str, Any]:
"""Handle a flow initiated by configuration file."""
return await self.async_step_user(user_input)
async def async_step_user(
self, user_input: Optional[ConfigType] = None
) -> Dict[str, Any]:
"""Handle a flow initiated by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
errors = {}
default_username = ""
if user_input is not None:
if CONF_TIMEOUT not in user_input:
user_input[CONF_TIMEOUT] = DEFAULT_TIMEOUT
default_username = user_input[CONF_USERNAME]
try:
await self.hass.async_add_executor_job(
validate_input, self.hass, user_input
)
except (ConnectTimeout, HTTPError):
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
else:
return self.async_create_entry(
title=user_input[CONF_USERNAME],
data=user_input,
)
data_schema = {
vol.Required(CONF_USERNAME, default=default_username): str,
vol.Required(CONF_PASSWORD): str,
}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(data_schema),
errors=errors or {},
)
class CanaryOptionsFlowHandler(OptionsFlow):
"""Handle Canary client options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input: Optional[ConfigType] = None):
"""Manage Canary options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = {
vol.Optional(
CONF_FFMPEG_ARGUMENTS,
default=self.config_entry.options.get(
CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS
),
): str,
vol.Optional(
CONF_TIMEOUT,
default=self.config_entry.options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
): int,
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
| apache-2.0 |
marcosfede/algorithms | adventofcode/2018/21/d21.py | 1 | 2031 |
def addr(r, a, b):
return r[a]+r[b]
def addi(r, a, b):
return r[a]+b
def mulr(r, a, b):
return r[a]*r[b]
def muli(r, a, b):
return r[a] * b
def banr(r, a, b):
return r[a] & r[b]
def bani(r, a, b):
return r[a] & b
def borr(r, a, b):
return r[a] | r[b]
def bori(r, a, b):
return r[a] | b
def setr(r, a, b):
return r[a]
def seti(r, a, b):
return a
def gtir(r, a, b):
return 1 if a > r[b] else 0
def grti(r, a, b):
return 1 if r[a] > b else 0
def gtrr(r, a, b):
return 1 if r[a] > r[b] else 0
def eqir(r, a, b):
return 1 if a == r[b] else 0
def eqri(r, a, b):
return 1 if r[a] == b else 0
def eqrr(r, a, b):
return 1 if r[a] == r[b] else 0
def apply_opcode(opcode, registers, a, b, c):
newregisters = registers[:]
newregisters[c] = opcode(registers, a, b)
return newregisters
opcodes = [addr, addi, mulr, muli, banr, bani, borr,
bori, setr, seti, gtir, grti, gtrr, eqir, eqri, eqrr]
opcodes_by_name = {op.__name__: op for op in opcodes}
program = []
with open('./21/input.txt') as f:
pregister = int(f.readline()[4])
for line in f:
op, a, b, c = line.split(' ')
program.append((op, int(a), int(b), int(c)))
# p1
pointer = 0
registers = [0, 0, 0, 0, 0, 0]
seen = set()
while 0 <= pointer < len(program):
registers[pregister] = pointer
op, a, b, c = program[pointer]
registers = apply_opcode(opcodes_by_name[op], registers, a, b, c)
pointer = registers[pregister]
pointer += 1
if pointer == 28:
if registers[5] in seen:
print(registers[5])
break
seen.add(registers[5])
print(registers[5])
print(registers[0])
###
# so basically inspecing the assembly code you realize that the only line where register 0 is used is on line 28 which sets a termination condition.
# when r[0] == r[5] the program halts
# p1 is the value of r[5] when it first reaches l28,
# p2 is the last value of r[5] before it cycles indefinitely
| gpl-3.0 |
espadrine/opera | chromium/src/third_party/python_26/Lib/site-packages/win32/Demos/rastest.py | 17 | 4789 | # rastest.py - test/demonstrate the win32ras module.
# Much of the code here contributed by Jethro Wright.
import sys
import string
import os
import win32ras
# Build a little dictionary of RAS states to decent strings.
# eg win32ras.RASCS_OpenPort -> "OpenPort"
stateMap = {}
for name, val in win32ras.__dict__.items():
if name[:6]=="RASCS_":
stateMap[val] = name[6:]
# Use a lock so the callback can tell the main thread when it is finished.
import win32event
callbackEvent = win32event.CreateEvent(None, 0, 0, None)
def Callback( hras, msg, state, error, exterror):
# print "Callback called with ", hras, msg, state, error, exterror
stateName = stateMap.get(state, "Unknown state?")
print "Status is %s (%04lx), error code is %d" % (stateName, state, error)
finished = state in [win32ras.RASCS_Connected]
if finished:
win32event.SetEvent(callbackEvent)
if error != 0 or int( state ) == win32ras.RASCS_Disconnected:
# we know for sure this is a good place to hangup....
print "Detected call failure: %s" % win32ras.GetErrorString( error )
HangUp( hras )
win32event.SetEvent(callbackEvent)
def ShowConnections():
print "All phone-book entries:"
for (name,) in win32ras.EnumEntries():
print " ", name
print "Current Connections:"
for con in win32ras.EnumConnections():
print " ", con
def EditEntry(entryName):
try:
win32ras.EditPhonebookEntry(0,None,entryName)
except win32ras.error, (rc, function, msg):
print "Can not edit/find the RAS entry -", msg
def HangUp( hras ):
# trap potential, irrelevant errors from win32ras....
try:
win32ras.HangUp( hras )
except:
print "Tried to hang up gracefully on error, but didn't work...."
return None
def Connect(entryName, bUseCallback):
if bUseCallback:
theCallback = Callback
win32event.ResetEvent(callbackEvent)
else:
theCallback = None
# in order to *use* the username/password of a particular dun entry, one must
# explicitly get those params under win95....
try:
dp, b = win32ras.GetEntryDialParams( None, entryName )
except:
print "Couldn't find DUN entry: %s" % entryName
else:
hras, rc = win32ras.Dial(None, None, (entryName, "", "", dp[ 3 ], dp[ 4 ], ""),theCallback)
# hras, rc = win32ras.Dial(None, None, (entryName, ),theCallback)
# print hras, rc
if not bUseCallback and rc <> 0:
print "Could not dial the RAS connection:", win32ras.GetErrorString(rc)
hras = HangUp( hras )
# don't wait here if there's no need to....
elif bUseCallback and win32event.WaitForSingleObject(callbackEvent, 60000)!=win32event.WAIT_OBJECT_0:
print "Gave up waiting for the process to complete!"
# sdk docs state one must explcitly hangup, even if there's an error....
try:
cs = win32ras.GetConnectStatus( hras )
except:
# on error, attempt a hang up anyway....
hras = HangUp( hras )
else:
if int( cs[ 0 ] ) == win32ras.RASCS_Disconnected:
hras = HangUp( hras )
return hras, rc
def Disconnect( rasEntry ):
# Need to find the entry
name = string.lower( rasEntry )
for hcon, entryName, devName, devType in win32ras.EnumConnections():
if string.lower( entryName ) == name:
win32ras.HangUp( hcon )
print "Disconnected from", rasEntry
break
else:
print "Could not find an open connection to", entryName
usage = """
Usage: %s [-s] [-l] [-c connection] [-d connection]
-l : List phone-book entries and current connections.
-s : Show status while connecting/disconnecting (uses callbacks)
-c : Connect to the specified phonebook name.
-d : Disconnect from the specified phonebook name.
-e : Edit the specified phonebook entry.
"""
def main():
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "slc:d:e:")
except getopt.error, why:
print why
print usage % (os.path.basename(sys.argv[0],))
return
bCallback = 0
if args or not opts:
print usage % (os.path.basename(sys.argv[0],))
return
for opt, val in opts:
if opt=="-s":
bCallback = 1
if opt=="-l":
ShowConnections()
if opt=="-c":
hras, rc = Connect(val, bCallback)
if hras != None:
print "hras: 0x%8lx, rc: 0x%04x" % ( hras, rc )
if opt=="-d":
Disconnect(val)
if opt=="-e":
EditEntry(val)
if __name__=='__main__':
main()
| bsd-3-clause |
MER-GROUP/intellij-community | python/lib/Lib/site-packages/django/contrib/messages/tests/user_messages.py | 241 | 2619 | from django import http
from django.contrib.auth.models import User
from django.contrib.messages.storage.user_messages import UserMessagesStorage,\
LegacyFallbackStorage
from django.contrib.messages.tests.base import skipUnlessAuthIsInstalled
from django.contrib.messages.tests.cookie import set_cookie_data
from django.contrib.messages.tests.fallback import FallbackTest
from django.test import TestCase
class UserMessagesTest(TestCase):
def setUp(self):
self.user = User.objects.create(username='tester')
def test_add(self):
storage = UserMessagesStorage(http.HttpRequest())
self.assertRaises(NotImplementedError, storage.add, 'Test message 1')
def test_get_anonymous(self):
# Ensure that the storage still works if no user is attached to the
# request.
storage = UserMessagesStorage(http.HttpRequest())
self.assertEqual(len(storage), 0)
def test_get(self):
storage = UserMessagesStorage(http.HttpRequest())
storage.request.user = self.user
self.user.message_set.create(message='test message')
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'test message')
UserMessagesTest = skipUnlessAuthIsInstalled(UserMessagesTest)
class LegacyFallbackTest(FallbackTest, TestCase):
storage_class = LegacyFallbackStorage
def setUp(self):
super(LegacyFallbackTest, self).setUp()
self.user = User.objects.create(username='tester')
def get_request(self, *args, **kwargs):
request = super(LegacyFallbackTest, self).get_request(*args, **kwargs)
request.user = self.user
return request
def test_get_legacy_only(self):
request = self.get_request()
storage = self.storage_class(request)
self.user.message_set.create(message='user message')
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 1)
self.assertEqual(list(storage)[0].message, 'user message')
def test_get_legacy(self):
request = self.get_request()
storage = self.storage_class(request)
cookie_storage = self.get_cookie_storage(storage)
self.user.message_set.create(message='user message')
set_cookie_data(cookie_storage, ['cookie'])
# Test that the message actually contains what we expect.
self.assertEqual(len(storage), 2)
self.assertEqual(list(storage)[0].message, 'user message')
self.assertEqual(list(storage)[1], 'cookie')
LegacyFallbackTest = skipUnlessAuthIsInstalled(LegacyFallbackTest)
| apache-2.0 |
imtapps/django-imt-fork | tests/regressiontests/string_lookup/models.py | 113 | 1457 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __str__(self):
return "Foo %s" % self.name
@python_2_unicode_compatible
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, related_name='normal_foo')
fwd = models.ForeignKey("Whiz")
back = models.ForeignKey("Foo")
def __str__(self):
return "Bar %s" % self.place.name
@python_2_unicode_compatible
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Whiz %s" % self.name
@python_2_unicode_compatible
class Child(models.Model):
parent = models.OneToOneField('Base')
name = models.CharField(max_length=50)
def __str__(self):
return "Child %s" % self.name
@python_2_unicode_compatible
class Base(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "Base %s" % self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.IPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
| bsd-3-clause |
Phoenix1369/site | judge/models/interface.py | 1 | 3940 | import re
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.fields import TreeForeignKey
from mptt.models import MPTTModel
from judge.models.problem import Problem
from judge.models.profile import Profile
__all__ = ['MiscConfig', 'validate_regex', 'NavigationBar', 'BlogPost', 'Solution']
class MiscConfig(models.Model):
key = models.CharField(max_length=30, db_index=True)
value = models.TextField(blank=True)
def __unicode__(self):
return self.key
class Meta:
verbose_name = _('configuration item')
verbose_name_plural = _('miscellaneous configuration')
def validate_regex(regex):
try:
re.compile(regex, re.VERBOSE)
except re.error as e:
raise ValidationError('Invalid regex: %s' % e.message)
class NavigationBar(MPTTModel):
class Meta:
verbose_name = _('navigation item')
verbose_name_plural = _('navigation bar')
class MPTTMeta:
order_insertion_by = ['order']
order = models.PositiveIntegerField(db_index=True, verbose_name=_('order'))
key = models.CharField(max_length=10, unique=True, verbose_name=_('identifier'))
label = models.CharField(max_length=20, verbose_name=_('label'))
path = models.CharField(max_length=255, verbose_name=_('link path'))
regex = models.TextField(verbose_name=_('highlight regex'), validators=[validate_regex])
parent = TreeForeignKey('self', verbose_name=_('parent item'), null=True, blank=True, related_name='children')
def __unicode__(self):
return self.label
@property
def pattern(self, cache={}):
# A cache with a bad policy is an alias for memory leak
# Thankfully, there will never be too many regexes to cache.
if self.regex in cache:
return cache[self.regex]
else:
pattern = cache[self.regex] = re.compile(self.regex, re.VERBOSE)
return pattern
class BlogPost(models.Model):
title = models.CharField(verbose_name=_('post title'), max_length=100)
authors = models.ManyToManyField(Profile, verbose_name=_('authors'), blank=True)
slug = models.SlugField(verbose_name=_('slug'))
visible = models.BooleanField(verbose_name=_('public visibility'), default=False)
sticky = models.BooleanField(verbose_name=_('sticky'), default=False)
publish_on = models.DateTimeField(verbose_name=_('publish after'))
content = models.TextField(verbose_name=_('post content'))
summary = models.TextField(verbose_name=_('post summary'), blank=True)
og_image = models.CharField(verbose_name=_('openGraph image'), default='', max_length=150, blank=True)
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('blog_post', args=(self.id, self.slug))
class Meta:
permissions = (
('see_hidden_post', 'See hidden posts'),
)
verbose_name = _('blog post')
verbose_name_plural = _('blog posts')
class Solution(models.Model):
url = models.CharField('URL', max_length=100, db_index=True, blank=True)
title = models.CharField(max_length=200)
is_public = models.BooleanField(default=False)
publish_on = models.DateTimeField()
content = models.TextField()
authors = models.ManyToManyField(Profile, blank=True)
problem = models.ForeignKey(Problem, on_delete=models.SET_NULL, verbose_name=_('associated problem'),
null=True, blank=True)
def get_absolute_url(self):
return reverse('solution', args=[self.url])
def __unicode__(self):
return self.title
class Meta:
permissions = (
('see_private_solution', 'See hidden solutions'),
)
verbose_name = _('solution')
verbose_name_plural = _('solutions')
| agpl-3.0 |
bccp/nbodykit | nbodykit/tutorials/halos.py | 2 | 4020 | from nbodykit.source.catalog import HaloCatalog, HDFCatalog
from nbodykit import CurrentMPIComm, transform
from nbodykit.cosmology import Cosmology
class DemoHaloCatalog(HaloCatalog):
"""
Create a demo catalog of halos using one of the built-in :mod:`halotools`
catalogs.
.. note::
The first request for a particular catalog will download the data
and cache in the ``~/.astropy/cache/halotools`` directory.
Parameters
----------
simname : string
Nickname of the simulation. Currently supported simulations are
Bolshoi (simname = ``bolshoi``), Consuelo (simname = ``consuelo``),
MultiDark (simname = ``multidark``), and Bolshoi-Planck (simname = ``bolplanck``).
halo_finder : string
Nickname of the halo-finder, e.g. ``rockstar`` or ``bdm``.
redshift : float
Redshift of the requested snapshot.
Must match one of the available snapshots within ``dz_tol=0.1``,
or a prompt will be issued providing the nearest
available snapshots to choose from.
Examples
--------
>>> from nbodykit.tutorials import DemoHaloCatalog
>>> halos = DemoHaloCatalog('bolshoi', 'rockstar', 0.5)
>>> print(halos.columns)
"""
@CurrentMPIComm.enable
def __init__(self, simname, halo_finder, redshift, comm=None):
from halotools.sim_manager import CachedHaloCatalog, DownloadManager
from halotools.sim_manager.supported_sims import supported_sim_dict
# do seme setup
self.comm = comm
meta_cols = ['Lbox', 'redshift', 'particle_mass']
# try to automatically load from the Halotools cache
exception = None
if self.comm.rank == 0:
kws = {'simname':simname, 'halo_finder':halo_finder, 'redshift':redshift}
try:
cached_halos = CachedHaloCatalog(dz_tol=0.1, **kws)
fname = cached_halos.fname # the filename to load
meta = {k:getattr(cached_halos, k) for k in meta_cols}
except Exception as e:
# try to download on the root rank
try:
# download
dl = DownloadManager()
dl.download_processed_halo_table(dz_tol=0.1, **kws)
# access the cached halo catalog and get fname attribute
# NOTE: this does not read the data
cached_halos = CachedHaloCatalog(dz_tol=0.1, **kws)
fname = cached_halos.fname
meta = {k:getattr(cached_halos, k) for k in meta_cols}
except Exception as e:
exception = e
else:
fname = None
meta = None
# re-raise a download error on all ranks if it occurred
exception = self.comm.bcast(exception, root=0)
if exception is not None:
raise exception
# broadcast the file we are loading
fname = self.comm.bcast(fname, root=0)
meta = self.comm.bcast(meta, root=0)
# initialize an HDF catalog and add Position/Velocity
cat = HDFCatalog(fname, comm=comm)
cat['Position'] = transform.StackColumns(cat['halo_x'], cat['halo_y'], cat['halo_z'])
cat['Velocity'] = transform.StackColumns(cat['halo_vx'], cat['halo_vy'], cat['halo_vz'])
# get the cosmology from Halotools
cosmo = supported_sim_dict[simname]().cosmology # this is astropy cosmology
cosmo = Cosmology.from_astropy(cosmo)
# initialize the HaloCatalog
HaloCatalog.__init__(self, cat, cosmo, meta['redshift'], mdef='vir', mass='halo_mvir')
# add some meta-data
# NOTE: all Halotools catalogs have to these attributes
self.attrs['BoxSize'] = meta['Lbox']
self.attrs['redshift'] = meta['redshift']
self.attrs['particle_mass'] = meta['particle_mass']
# save the cosmology
self.cosmo = cosmo
self.attrs['cosmo'] = dict(self.cosmo)
| gpl-3.0 |
stef1927/cassandra-dtest | thrift_test.py | 5 | 148047 | import re
import struct
import time
import uuid
import pytest
import logging
import codecs
from thrift.protocol import TBinaryProtocol
from thrift.Thrift import TApplicationException
from thrift.transport import TSocket, TTransport
from tools.assertions import assert_length_equal
from tools.misc import ImmutableMapping
from dtest_setup_overrides import DTestSetupOverrides
from dtest import Tester
from thrift_bindings.thrift010 import Cassandra
from thrift_bindings.thrift010.Cassandra import (CfDef, Column, ColumnDef,
ColumnOrSuperColumn, ColumnParent,
ColumnPath, ColumnSlice,
ConsistencyLevel, CounterColumn,
Deletion, IndexExpression,
IndexOperator, IndexType,
InvalidRequestException, KeyRange,
KeySlice, KsDef, MultiSliceRequest,
Mutation, NotFoundException,
SlicePredicate, SliceRange,
SuperColumn)
from tools.assertions import (assert_all, assert_none, assert_one)
MAX_TTL = 20 * 365 * 24 * 60 * 60 # 20 years in seconds
since = pytest.mark.since
logger = logging.getLogger(__name__)
utf8encoder = codecs.getencoder('utf-8')
def utf8encode(str):
return utf8encoder(str)[0]
def get_thrift_client(host='127.0.0.1', port=9160):
socket = TSocket.TSocket(host, port)
transport = TTransport.TFramedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Cassandra.Client(protocol)
client.transport = transport
return client
client = None
pid_fname = "system_test.pid"
def pid():
return int(open(pid_fname).read())
@since('2.0', max_version='4')
class TestThrift(Tester):
@pytest.fixture(scope='function', autouse=True)
def fixture_dtest_setup_overrides(self, dtest_config):
dtest_setup_overrides = DTestSetupOverrides()
"""
@jira_ticket CASSANDRA-7653
"""
dtest_setup_overrides.cluster_options = ImmutableMapping(
{'partitioner': 'org.apache.cassandra.dht.ByteOrderedPartitioner',
'start_rpc': 'true'})
return dtest_setup_overrides
@pytest.fixture(scope='function', autouse=True)
def fixture_set_cluster_settings(self, fixture_dtest_setup):
fixture_dtest_setup.cluster.populate(1)
node1, = fixture_dtest_setup.cluster.nodelist()
# If vnodes are not used, we must set our own initial_token
# Because ccm will not set a hex token for ByteOrderedPartitioner
# automatically. It does not matter what token we set as we only
# ever use one node.
if not fixture_dtest_setup.dtest_config.use_vnodes:
node1.set_configuration_options(values={'initial_token': 'abcd'})
# CASSANDRA-14092 - prevent max ttl tests from failing
fixture_dtest_setup.cluster.start(jvm_args=['-Dcassandra.expiration_date_overflow_policy=CAP',
'-Dcassandra.expiration_overflow_warning_interval_minutes=0'],
wait_for_binary_proto=True)
fixture_dtest_setup.cluster.nodelist()[0].watch_log_for("Listening for thrift clients") # Wait for the thrift port to open
time.sleep(0.1)
# this is ugly, but the whole test module is written against a global client
global client
client = get_thrift_client()
client.transport.open()
self.define_schema()
yield client
client.transport.close()
def define_schema(self):
keyspace1 = Cassandra.KsDef('Keyspace1', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'},
cf_defs=[
Cassandra.CfDef('Keyspace1', 'Standard1'),
Cassandra.CfDef('Keyspace1', 'Standard2'),
Cassandra.CfDef('Keyspace1', 'Standard3', column_metadata=[Cassandra.ColumnDef(utf8encode('c1'), 'AsciiType'), Cassandra.ColumnDef(utf8encode('c2'), 'AsciiType')]),
Cassandra.CfDef('Keyspace1', 'Standard4', column_metadata=[Cassandra.ColumnDef(utf8encode('c1'), 'AsciiType')]),
Cassandra.CfDef('Keyspace1', 'StandardLong1', comparator_type='LongType'),
Cassandra.CfDef('Keyspace1', 'StandardInteger1', comparator_type='IntegerType'),
Cassandra.CfDef('Keyspace1', 'StandardComposite', comparator_type='CompositeType(AsciiType, AsciiType)'),
Cassandra.CfDef('Keyspace1', 'Super1', column_type='Super', subcomparator_type='LongType'),
Cassandra.CfDef('Keyspace1', 'Super2', column_type='Super', subcomparator_type='LongType'),
Cassandra.CfDef('Keyspace1', 'Super3', column_type='Super', comparator_type='LongType', subcomparator_type='UTF8Type'),
Cassandra.CfDef('Keyspace1', 'Counter1', default_validation_class='CounterColumnType'),
Cassandra.CfDef('Keyspace1', 'SuperCounter1', column_type='Super', default_validation_class='CounterColumnType'),
Cassandra.CfDef('Keyspace1', 'Indexed1', column_metadata=[Cassandra.ColumnDef(utf8encode('birthdate'), 'LongType', Cassandra.IndexType.KEYS, 'birthdate_index')]),
Cassandra.CfDef('Keyspace1', 'Indexed2', comparator_type='TimeUUIDType', column_metadata=[Cassandra.ColumnDef(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, 'LongType', Cassandra.IndexType.KEYS)]),
Cassandra.CfDef('Keyspace1', 'Indexed3', comparator_type='TimeUUIDType', column_metadata=[Cassandra.ColumnDef(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, 'UTF8Type', Cassandra.IndexType.KEYS)]),
Cassandra.CfDef('Keyspace1', 'Indexed4', column_metadata=[Cassandra.ColumnDef(utf8encode('a'), 'LongType', Cassandra.IndexType.KEYS, 'a_index'), Cassandra.ColumnDef(utf8encode('z'), 'UTF8Type')]),
Cassandra.CfDef('Keyspace1', 'Expiring', default_time_to_live=2),
Cassandra.CfDef('Keyspace1', 'ExpiringMaxTTL', default_time_to_live=MAX_TTL)
])
keyspace2 = Cassandra.KsDef('Keyspace2', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'},
cf_defs=[
Cassandra.CfDef('Keyspace2', 'Standard1'),
Cassandra.CfDef('Keyspace2', 'Standard3'),
Cassandra.CfDef('Keyspace2', 'Super3', column_type='Super', subcomparator_type='BytesType'),
Cassandra.CfDef('Keyspace2', 'Super4', column_type='Super', subcomparator_type='TimeUUIDType'), ])
for ks in [keyspace1, keyspace2]:
client.system_add_keyspace(ks)
def i64(n):
return _i64(n)
def i32(n):
return _i32(n)
def i16(n):
return _i16(n)
def composite(item1, item2=None, eoc=b'\x00'):
if isinstance(item1, str):
item1 = utf8encode(item1)
if isinstance(item2, str):
item2 = utf8encode(item2)
if isinstance(eoc, str):
eoc = utf8encode(eoc)
packed = _i16(len(item1)) + item1 + eoc
if item2 is not None:
packed += _i16(len(item2)) + item2
packed += eoc
return packed
def _i64(n):
return struct.pack('>q', n) # big endian = network order
def _i32(n):
return struct.pack('>i', n) # big endian = network order
def _i16(n):
return struct.pack('>h', n) # big endian = network order
_SIMPLE_COLUMNS = [Column(utf8encode('c1'), utf8encode('value1'), 0),
Column(utf8encode('c2'), utf8encode('value2'), 0)]
_SUPER_COLUMNS = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'), columns=[Column(_i64(5), utf8encode('value5'), 0),
Column(_i64(6), utf8encode('value6'), 0)])]
def _assert_column(column_family, key, column, value, ts=0):
if isinstance(key, str):
key = utf8encode(key)
if isinstance(value, str):
value = utf8encode(value)
try:
assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
except NotFoundException:
raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value))
def _assert_columnpath_exists(key, column_path):
if isinstance(key, str):
key = utf8encode(key)
try:
assert client.get(key, column_path, ConsistencyLevel.ONE)
except NotFoundException:
raise Exception('expected %s with %s but was not present.' % (key, column_path))
def _assert_no_columnpath(key, column_path):
if isinstance(key, str):
key = utf8encode(key)
try:
client.get(key, column_path, ConsistencyLevel.ONE)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def _insert_simple():
return _insert_multi([utf8encode('key1')])
def _insert_multi(keys):
CL = ConsistencyLevel.ONE
for key in keys:
if isinstance(key, str):
key = utf8encode(key)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('c2'), utf8encode('value2'), 0), CL)
def _insert_batch():
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
client.batch_mutate({utf8encode('key1'): cfmap}, ConsistencyLevel.ONE)
def _big_slice(key, column_parent):
if isinstance(key, str):
key = utf8encode(key)
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
def _big_multislice(keys, column_parent):
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
def _verify_batch():
_verify_simple()
L = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard2'))]
assert L == _SIMPLE_COLUMNS, L
def _verify_simple():
assert client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c1')), ConsistencyLevel.ONE).column == Column(utf8encode('c1'), utf8encode('value1'), 0)
L = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard1'))]
assert L == _SIMPLE_COLUMNS, L
def _insert_super(key='key1'):
if isinstance(key, str):
key = utf8encode(key)
client.insert(key, ColumnParent('Super1', utf8encode('sc1')), Column(_i64(4), utf8encode('value4'), 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', utf8encode('sc2')), Column(_i64(6), utf8encode('value6'), 0), ConsistencyLevel.ONE)
def _insert_range():
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c2'), utf8encode('value2'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c3'), utf8encode('value3'), 0), ConsistencyLevel.ONE)
def _verify_range():
p = SlicePredicate(slice_range=SliceRange(utf8encode('c1'), utf8encode('c2'), False, 1000))
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == utf8encode('c1')
assert result[1].column.name == utf8encode('c2')
p = SlicePredicate(slice_range=SliceRange(utf8encode('c3'), utf8encode('c2'), True, 1000))
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == utf8encode('c3')
assert result[1].column.name == utf8encode('c2')
p = SlicePredicate(slice_range=SliceRange(utf8encode('a'), utf8encode('z'), False, 1000))
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange(utf8encode('a'), utf8encode('z'), False, 2))
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _set_keyspace(keyspace):
client.set_keyspace(keyspace)
def _insert_super_range():
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1')), Column(_i64(4), utf8encode('value4'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(6), utf8encode('value6'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc3')), Column(_i64(7), utf8encode('value7'), 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_super_range():
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc2'), utf8encode('sc3'), False, 2))
result = client.get_slice(utf8encode('key1'), ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == utf8encode('sc2')
assert result[1].super_column.name == utf8encode('sc3')
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc3'), utf8encode('sc2'), True, 2))
result = client.get_slice(utf8encode('key1'), ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == utf8encode('sc3')
assert result[1].super_column.name == utf8encode('sc2')
def _verify_super(supercf='Super1', key='key1'):
if isinstance(key, str):
key = utf8encode(key)
assert client.get(key, ColumnPath(supercf, utf8encode('sc1'), _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), utf8encode('value4'), 0)
slice = [result.super_column
for result in _big_slice(key, ColumnParent('Super1'))]
assert slice == _SUPER_COLUMNS, slice
def _expect_exception(fn, type_):
try:
r = fn()
except type_ as t:
return t
else:
raise Exception('expected %s; got %s' % (type_.__name__, r))
def _expect_missing(fn):
_expect_exception(fn, NotFoundException)
def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
kr = KeyRange(start, end, count=count, row_filter=row_filter)
return client.get_range_slices(parent, predicate, kr, cl)
def _insert_six_columns(key='abc'):
if isinstance(key, str):
key = utf8encode(key)
CL = ConsistencyLevel.ONE
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('a'), utf8encode('1'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('b'), utf8encode('2'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('c'), utf8encode('3'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('d'), utf8encode('4'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('e'), utf8encode('5'), 0), CL)
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('f'), utf8encode('6'), 0), CL)
def _big_multi_slice(key='abc'):
if isinstance(key, str):
key = utf8encode(key)
c1 = ColumnSlice()
c1.start = utf8encode('a')
c1.finish = utf8encode('c')
c2 = ColumnSlice()
c2.start = utf8encode('e')
c2.finish = utf8encode('f')
m = MultiSliceRequest()
m.key = key
m.column_parent = ColumnParent('Standard1')
m.column_slices = [c1, c2]
m.reversed = False
m.count = 10
m.consistency_level = ConsistencyLevel.ONE
return client.get_multi_slice(m)
_MULTI_SLICE_COLUMNS = [Column(utf8encode('a'), utf8encode('1'), 0), Column(utf8encode('b'), utf8encode('2'), 0), Column(utf8encode('c'), utf8encode('3'), 0), Column(utf8encode('e'), utf8encode('5'), 0), Column(utf8encode('f'), utf8encode('6'), 0)]
@since('2.0', max_version='4')
class TestMutations(TestThrift):
def truncate_all(self, *table_names):
for table in table_names:
client.truncate(table)
def test_insert(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
_verify_simple()
def test_empty_slice(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard2', 'Super1')
assert _big_slice(utf8encode('key1'), ColumnParent('Standard2')) == []
assert _big_slice(utf8encode('key1'), ColumnParent('Super1')) == []
def test_cas(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard3', 'Standard4')
def cas(expected, updates, column_family):
return client.cas(utf8encode('key1'), column_family, expected, updates, ConsistencyLevel.SERIAL, ConsistencyLevel.QUORUM)
def test_cas_operations(first_columns, second_columns, column_family):
# partition should be empty, so cas expecting any existing values should fail
cas_result = cas(first_columns, first_columns, column_family)
assert not cas_result.success
assert len(cas_result.current_values) == 0, cas_result
# cas of empty columns -> first_columns should succeed
# and the reading back from the table should match first_columns
assert cas([], first_columns, column_family).success
result = [cosc.column for cosc in _big_slice(utf8encode('key1'), ColumnParent(column_family))]
# CAS will use its own timestamp, so we can't just compare result == _SIMPLE_COLUMNS
assert dict((c.name, c.value) for c in result) == dict((ex.name, ex.value) for ex in first_columns)
# now that the partition has been updated, repeating the
# operation which expects it to be empty should not succeed
cas_result = cas([], first_columns, column_family)
assert not cas_result.success
# When we CAS for non-existence, current_values is the first live column of the row
assert dict((c.name, c.value) for c in cas_result.current_values) == {first_columns[0].name: first_columns[0].value}, cas_result
# CL.SERIAL for reads
assert client.get(utf8encode('key1'), ColumnPath(column_family, column=first_columns[0].name), ConsistencyLevel.SERIAL).column.value == first_columns[0].value
# cas first_columns -> second_columns should succeed
assert cas(first_columns, second_columns, column_family).success
# as before, an operation with an incorrect expectation should fail
cas_result = cas(first_columns, second_columns, column_family)
assert not cas_result.success
updated_columns = [Column(utf8encode('c1'), utf8encode('value101'), 1),
Column(utf8encode('c2'), utf8encode('value102'), 1)]
logger.debug("Testing CAS operations on dynamic cf")
test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard1')
logger.debug("Testing CAS operations on static cf")
test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard3')
logger.debug("Testing CAS on mixed static/dynamic cf")
test_cas_operations(_SIMPLE_COLUMNS, updated_columns, 'Standard4')
def test_missing_super(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc1'), _i64(1)), ConsistencyLevel.ONE))
_insert_super()
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc1'), _i64(1)), ConsistencyLevel.ONE))
def test_count(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2', 'Super1')
_insert_simple()
_insert_super()
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
assert client.get_count(utf8encode('key1'), ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
assert client.get_count(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
assert client.get_count(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), p, ConsistencyLevel.ONE) == 2
assert client.get_count(utf8encode('key1'), ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
# Let's make that a little more interesting
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c3'), utf8encode('value3'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c4'), utf8encode('value4'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c5'), utf8encode('value5'), 0), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange(utf8encode('c2'), utf8encode('c4'), False, 1000))
assert client.get_count(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
def test_count_paging(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
# Exercise paging
column_parent = ColumnParent('Standard1')
# Paging for small columns starts at 1024 columns
columns_to_insert = [Column(utf8encode('c%d' % (i,)), utf8encode('value%d' % (i,)), 0) for i in range(3, 1026)]
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
client.batch_mutate({utf8encode('key1') : cfmap}, ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 2000))
assert client.get_count(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE) == 1025
# Ensure that the count limit isn't clobbered
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 10))
assert client.get_count(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
# test get_count() to work correctly with 'count' settings around page size (CASSANDRA-4833)
def test_count_around_page_size(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
def slice_predicate(count):
return SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, count))
key = utf8encode('key1')
parent = ColumnParent('Standard1')
cl = ConsistencyLevel.ONE
for i in range(0, 3050):
client.insert(key, parent, Column(utf8encode(str(i)), utf8encode(''), 0), cl)
# same as page size
assert client.get_count(key, parent, slice_predicate(1024), cl) == 1024
# 1 above page size
assert client.get_count(key, parent, slice_predicate(1025), cl) == 1025
# above number or columns
assert client.get_count(key, parent, slice_predicate(4000), cl) == 3050
# same as number of columns
assert client.get_count(key, parent, slice_predicate(3050), cl) == 3050
# 1 above number of columns
assert client.get_count(key, parent, slice_predicate(3051), cl) == 3050
def test_super_insert(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super()
_verify_super()
def test_super_get(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super()
result = client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2')), ConsistencyLevel.ONE).super_column
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super()
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1))
column_parent = ColumnParent('Super1', utf8encode('sc2'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(5), utf8encode('value5'), 0)], slice
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), True, 1))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(6), utf8encode('value6'), 0)], slice
def test_long_order(self):
_set_keyspace('Keyspace1')
self.truncate_all('StandardLong1')
def long_xrange(start, stop, step):
i = start
while i < stop:
yield i
i += step
L = []
for i in long_xrange(0, 104294967296, 429496729):
name = _i64(i)
client.insert(utf8encode('key1'), ColumnParent('StandardLong1'), Column(name, utf8encode('v'), 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice(utf8encode('key1'), ColumnParent('StandardLong1'))]
assert slice == L, slice
def test_integer_order(self):
_set_keyspace('Keyspace1')
self.truncate_all('StandardInteger1')
def long_xrange(start, stop, step):
i = start
while i >= stop:
yield i
i -= step
L = []
for i in long_xrange(104294967296, 0, 429496729):
name = _i64(i)
client.insert(utf8encode('key1'), ColumnParent('StandardInteger1'), Column(name, utf8encode('v'), 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice(utf8encode('key1'), ColumnParent('StandardInteger1'))]
L.sort()
assert slice == L, slice
def test_time_uuid(self):
_set_keyspace('Keyspace2')
self.truncate_all('Super4')
import uuid
L = []
# 100 isn't enough to fail reliably if the comparator is borked
for i in range(500):
L.append(uuid.uuid1())
client.insert(utf8encode('key1'), ColumnParent('Super4', utf8encode('sc1')), Column(L[-1].bytes, utf8encode('value%s' % i), i), ConsistencyLevel.ONE)
slice = _big_slice(utf8encode('key1'), ColumnParent('Super4', utf8encode('sc1')))
assert len(slice) == 500, len(slice)
for i in range(500):
u = slice[i].column
assert u.value == utf8encode('value%s' % i)
assert u.name == L[i].bytes
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), True, 1))
column_parent = ColumnParent('Super4', utf8encode('sc1'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[-1].bytes, utf8encode('value499'), 499)], slice
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', utf8encode('sc1'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[0].bytes, utf8encode('value0'), 0),
Column(L[1].bytes, utf8encode('value1'), 1),
Column(L[2].bytes, utf8encode('value2'), 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, utf8encode(''), True, 1000))
column_parent = ColumnParent('Super4', utf8encode('sc1'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, utf8encode('value2'), 2),
Column(L[1].bytes, utf8encode('value1'), 1),
Column(L[0].bytes, utf8encode('value0'), 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, utf8encode(''), False, 1))
column_parent = ColumnParent('Super4', utf8encode('sc1'))
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, utf8encode('value2'), 2)], slice
def test_long_remove(self):
_set_keyspace('Keyspace1')
self.truncate_all('StandardLong1')
column_parent = ColumnParent('StandardLong1')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1))
for i in range(10):
parent = ColumnParent('StandardLong1')
client.insert(utf8encode('key1'), parent, Column(_i64(i), utf8encode('value1'), 10 * i), ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice(utf8encode('key1'), column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert(utf8encode('key1'), parent, Column(_i64(i), utf8encode('value2'), 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), utf8encode('value2'), 10 * i + 2)], (slice, i)
def test_integer_remove(self):
_set_keyspace('Keyspace1')
self.truncate_all('StandardInteger1')
column_parent = ColumnParent('StandardInteger1')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1))
for i in range(10):
parent = ColumnParent('StandardInteger1')
client.insert(utf8encode('key1'), parent, Column(_i64(i), utf8encode('value1'), 10 * i), ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice(utf8encode('key1'), column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert(utf8encode('key1'), parent, Column(_i64(i), utf8encode('value2'), 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice(utf8encode('key1'), column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), utf8encode('value2'), 10 * i + 2)], (slice, i)
def test_batch_insert(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2')
_insert_batch()
_verify_batch()
def test_batch_mutate_standard_columns(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2')
column_families = ['Standard1', 'Standard2']
keys = [utf8encode('key_%d' % i) for i in range(27, 32)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, utf8encode('c1'), utf8encode('value1'))
def test_batch_mutate_remove_standard_columns(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2')
column_families = ['Standard1', 'Standard2']
keys = [utf8encode('key_%d' % i) for i in range(11, 21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_standard_row(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Standard2')
column_families = ['Standard1', 'Standard2']
keys = [utf8encode('key_%d' % i) for i in range(11, 21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20))]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_super_columns_with_standard_under(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1', 'Super2')
column_families = ['Super1', 'Super2']
keys = [utf8encode('key_%d' % i) for i in range(11, 21)]
_insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc.columns:
names.append(c.name)
mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
keys = [utf8encode('key_%d' % i) for i in range(17, 21)]
for key in keys:
_insert_super(key)
mutations = []
for sc in _SUPER_COLUMNS:
mutations.append(Mutation(deletion=Deletion(20,
super_column=sc.name)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_super_columns_entire_row(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
keys = [utf8encode('key_%d' % i) for i in range(17, 21)]
for key in keys:
_insert_super(key)
mutations = []
mutations.append(Mutation(deletion=Deletion(20)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
# known failure: see CASSANDRA-10046
def test_batch_mutate_remove_slice_standard(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
columns = [Column(utf8encode('c1'), utf8encode('value1'), 0),
Column(utf8encode('c2'), utf8encode('value2'), 0),
Column(utf8encode('c3'), utf8encode('value3'), 0),
Column(utf8encode('c4'), utf8encode('value4'), 0),
Column(utf8encode('c5'), utf8encode('value5'), 0)]
for column in columns:
client.insert(utf8encode('key'), ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start=utf8encode('c2'), finish=utf8encode('c4'))))
client.batch_mutate({utf8encode('key'): {'Standard1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c1')))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c2')))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c3')))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c4')))
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Standard1', column=utf8encode('c5')))
# known failure: see CASSANDRA-10046
def test_batch_mutate_remove_slice_of_entire_supercolumns(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
columns = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(1), utf8encode('value1'), 0)]),
SuperColumn(name=utf8encode('sc2'),
columns=[Column(_i64(2), utf8encode('value2') , 0), Column(_i64(3), utf8encode('value3') , 0)]),
SuperColumn(name=utf8encode('sc3'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc4'),
columns=[Column(_i64(5), utf8encode('value5') , 0), Column(_i64(6), utf8encode('value6') , 0)]),
SuperColumn(name=utf8encode('sc5'), columns=[Column(_i64(7), utf8encode('value7'), 0)])]
for column in columns:
for subcolumn in column.columns:
client.insert(utf8encode('key'), ColumnParent('Super1', column.name), subcolumn, ConsistencyLevel.ONE)
d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start=utf8encode('sc2') , finish=utf8encode('sc4') )))
client.batch_mutate({utf8encode('key'): {'Super1': [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(1)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc2'), column=_i64(2)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc2'), column=_i64(3)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc3'), column=_i64(4)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc4'), column=_i64(5)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc4'), column=_i64(6)))
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc5'), column=_i64(7)))
@since('1.0', '2.2')
@pytest.mark.skip(reason="Runs but fails and looks like it actually should fail since 8099?")
def test_batch_mutate_remove_slice_part_of_supercolumns(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
columns = [Column(_i64(1), utf8encode('value1'), 0),
Column(_i64(2), utf8encode('value2'), 0),
Column(_i64(3), utf8encode('value3'), 0),
Column(_i64(4), utf8encode('value4'), 0),
Column(_i64(5), utf8encode('value5'), 0)]
for column in columns:
client.insert(utf8encode('key'), ColumnParent('Super1', utf8encode('sc1')), column, ConsistencyLevel.ONE)
r = SliceRange(start=_i64(2), finish=_i64(4))
d = Deletion(1, super_column=utf8encode('sc1') , predicate=SlicePredicate(slice_range=r))
client.batch_mutate({utf8encode('key'): {'Super1' : [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(1)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(2)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(3)))
_assert_no_columnpath(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(4)))
_assert_columnpath_exists(utf8encode('key'), ColumnPath('Super1', super_column=utf8encode('sc1'), column=_i64(5)))
def test_batch_mutate_insertions_and_deletions(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1', 'Super2')
first_insert = SuperColumn(utf8encode("sc1"),
columns=[Column(_i64(20), utf8encode('value20'), 3),
Column(_i64(21), utf8encode('value21'), 3)])
second_insert = SuperColumn(utf8encode("sc1"),
columns=[Column(_i64(20), utf8encode('value20'), 3),
Column(_i64(21), utf8encode('value21'), 3)])
first_deletion = {'super_column': utf8encode("sc1"),
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
second_deletion = {'super_column': utf8encode("sc2"),
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
keys = [utf8encode('key_30'), utf8encode('key_31')]
for key in keys:
sc = SuperColumn(utf8encode('sc1'), [Column(_i64(22), utf8encode('value22'), 0),
Column(_i64(23), utf8encode('value23'), 0)])
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
sc2 = SuperColumn(utf8encode('sc2'), [Column(_i64(22), utf8encode('value22'), 0),
Column(_i64(23), utf8encode('value23'), 0)])
cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
cfmap3 = {
'Super1': [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
Mutation(deletion=Deletion(3, **first_deletion))],
'Super2': [Mutation(deletion=Deletion(2, **second_deletion)),
Mutation(ColumnOrSuperColumn(super_column=second_insert))]
}
keyed_mutations = dict((key, cfmap3) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for key in keys:
for c in [_i64(22), _i64(23)]:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=utf8encode('sc1'), column=c))
_assert_no_columnpath(key, ColumnPath('Super2', super_column=utf8encode('sc2'), column=c))
for c in [_i64(20), _i64(21)]:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=utf8encode('sc1'), column=c))
_assert_columnpath_exists(key, ColumnPath('Super2', super_column=utf8encode('sc1'), column=c))
def test_bad_system_calls(self):
def duplicate_index_names():
_set_keyspace('Keyspace1')
cd1 = ColumnDef(utf8encode('foo'), 'BytesType', IndexType.KEYS, 'i')
cd2 = ColumnDef(utf8encode('bar'), 'BytesType', IndexType.KEYS, 'i')
cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
client.system_add_column_family(cf)
_expect_exception(duplicate_index_names, InvalidRequestException)
def test_bad_batch_calls(self):
# mutate_does_not_accept_cosc_and_deletion_in_same_mutation
def too_full():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column(utf8encode("foo"), utf8encode('bar'), 0))
dele = Deletion(2, predicate=SlicePredicate(column_names=[utf8encode('baz')]))
client.batch_mutate({utf8encode('key_34'): {'Standard1': [Mutation(col, dele)]}},
ConsistencyLevel.ONE)
_expect_exception(too_full, InvalidRequestException)
# test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
def bad_cf():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column(utf8encode("foo"), utf8encode('bar'), 0))
client.batch_mutate({utf8encode('key_36'): {'Undefined': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# test_batch_mutate_does_not_accept_deletion_on_undefined_cf
def bad_cf_2():
_set_keyspace('Keyspace1')
d = Deletion(2, predicate=SlicePredicate(column_names=[utf8encode('baz')]))
client.batch_mutate({utf8encode('key_37'): {'Undefined': [Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf_2, InvalidRequestException)
# a column value that does not match the declared validator
def send_string_instead_of_long():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column(utf8encode('birthdate'), utf8encode('bar'), 0))
client.batch_mutate({utf8encode('key_38'): {'Indexed1': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(send_string_instead_of_long, InvalidRequestException)
def test_column_name_lengths(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode(''), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 1), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 127), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 128), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 129), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 255), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 256), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * 257), utf8encode('value'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * (2 ** 16 - 1)), utf8encode('value'), 0), ConsistencyLevel.ONE)
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('x' * (2 ** 16)), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
def test_bad_calls(self):
_set_keyspace('Keyspace1')
# missing arguments
_expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Standard1', utf8encode('x')), Column(utf8encode('y'), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
# no supercolumn in a super CF
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Super1'), Column(utf8encode('y'), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
# column but no supercolumn in remove
_expect_exception(lambda: client.remove(utf8encode('key1'), ColumnPath('Super1', column=utf8encode('x')), 0, ConsistencyLevel.ONE), InvalidRequestException)
# super column in non-super CF
_expect_exception(lambda: client.remove(utf8encode('key1'), ColumnPath('Standard1', utf8encode('y'), utf8encode('x')), 0, ConsistencyLevel.ONE), InvalidRequestException)
# key too long
_expect_exception(lambda: client.get(utf8encode('x' * 2 ** 16), ColumnPath('Standard1', column=utf8encode('c1')), ConsistencyLevel.ONE), InvalidRequestException)
# empty key
_expect_exception(lambda: client.get(utf8encode(''), ColumnPath('Standard1', column=utf8encode('c1')), ConsistencyLevel.ONE), InvalidRequestException)
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
_expect_exception(lambda: client.batch_mutate({utf8encode(''): cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
# empty column name
_expect_exception(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('')), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify column name
_expect_exception(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', utf8encode('x'), utf8encode('y')), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify supercolumn name
_expect_exception(lambda: client.get(utf8encode('key1'), ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
# invalid CF
_expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=[utf8encode(''), utf8encode('')]), utf8encode(''), utf8encode(''), 5, ConsistencyLevel.ONE), InvalidRequestException)
# 'x' is not a valid Long
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1')), Column(utf8encode('x'), utf8encode('value'), 0), ConsistencyLevel.ONE), InvalidRequestException)
# start is not a valid Long
p = SlicePredicate(slice_range=SliceRange(utf8encode('x'), utf8encode(''), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start is not a valid Long, supercolumn version
p = SlicePredicate(slice_range=SliceRange(utf8encode('x'), utf8encode(''), False, 1))
column_parent = ColumnParent('Super1', utf8encode('sc1'))
_expect_exception(lambda: client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, supercolumn version
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('Super1', utf8encode('sc1'))
_expect_exception(lambda: client.get_slice(utf8encode('key1'), column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, key version
_expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('')]), utf8encode('z'), utf8encode('a'), 1, ConsistencyLevel.ONE), InvalidRequestException)
# ttl must be greater or equals to zero
column = Column(utf8encode('cttl1'), utf8encode('value1'), 0, -1)
_expect_exception(lambda: client.insert(utf8encode('key1'), ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
InvalidRequestException)
# don't allow super_column in Deletion for standard Columntest_expiration_with_default_ttl_and_zero_ttl
deletion = Deletion(1, utf8encode('supercolumn'), None)
mutation = Mutation(deletion=deletion)
mutations = {utf8encode('key'): {'Standard1': [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
InvalidRequestException)
# 'x' is not a valid long
deletion = Deletion(1, utf8encode('x'), None)
mutation = Mutation(deletion=deletion)
mutations = {utf8encode('key'): {'Super3': [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
# counters don't support ANY
_expect_exception(lambda: client.add(utf8encode('key1'), ColumnParent('Counter1', utf8encode('x')), CounterColumn(utf8encode('y'), 1), ConsistencyLevel.ANY), InvalidRequestException)
def test_batch_insert_super(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1', 'Super2')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({utf8encode('key1'): cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_cf_remove_column(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c1')), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c1')), ConsistencyLevel.ONE))
assert client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c2')), ConsistencyLevel.ONE).column \
== Column(utf8encode('c2'), utf8encode('value2'), 0)
assert _big_slice(utf8encode('key1'), ColumnParent('Standard1')) \
== [ColumnOrSuperColumn(column=Column(utf8encode('c2'), utf8encode('value2'), 0))]
# New insert, make sure it shows up post-remove:
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c3'), utf8encode('value3'), 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard1'))]
assert columns == [Column(utf8encode('c2'), utf8encode('value2'), 0), Column(utf8encode('c3'), utf8encode('value3'), 0)], columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard1'))]
assert columns == [Column(utf8encode('c2'), utf8encode('value2'), 0), Column(utf8encode('c3'), utf8encode('value3'), 0)], columns
# Next, w/ a newer timestamp; it should come back:
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 2), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice(utf8encode('key1'), ColumnParent('Standard1'))]
assert columns == [Column(utf8encode('c1'), utf8encode('value1'), 2), Column(utf8encode('c2'), utf8encode('value2'), 0), Column(utf8encode('c3'), utf8encode('value3'), 0)], columns
def test_cf_remove(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
_insert_simple()
_insert_super()
# Remove the key1:Standard1 cf; verify super is unaffected
client.remove(utf8encode('key1'), ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
assert _big_slice(utf8encode('key1'), ColumnParent('Standard1')) == []
_verify_super()
# Test resurrection. First, re-insert a value w/ older timestamp,
# and make sure it stays removed:
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 0), ConsistencyLevel.ONE)
assert _big_slice(utf8encode('key1'), ColumnParent('Standard1')) == []
# Next, w/ a newer timestamp; it should come back:
client.insert(utf8encode('key1'), ColumnParent('Standard1'), Column(utf8encode('c1'), utf8encode('value1'), 4), ConsistencyLevel.ONE)
result = _big_slice(utf8encode('key1'), ColumnParent('Standard1'))
assert result == [ColumnOrSuperColumn(column=Column(utf8encode('c1'), utf8encode('value1'), 4))], result
# check removing the entire super cf, too.
client.remove(utf8encode('key1'), ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
assert _big_slice(utf8encode('key1'), ColumnParent('Super1')) == []
assert _big_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1'))) == []
def test_super_cf_remove_and_range_slice(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
client.insert(utf8encode('key3'), ColumnParent('Super1', utf8encode('sc1')), Column(_i64(1), utf8encode('v1'), 0), ConsistencyLevel.ONE)
client.remove(utf8encode('key3'), ColumnPath('Super1', utf8encode('sc1')), 5, ConsistencyLevel.ONE)
rows = {}
for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000)), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE):
scs = [cosc.super_column for cosc in row.columns]
rows[row.key] = scs
assert rows == {utf8encode('key3'): []}, rows
def test_super_cf_remove_column(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2'), _i64(5)), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2'), _i64(5)), ConsistencyLevel.ONE))
super_columns = [result.super_column for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'), columns=[Column(_i64(6), utf8encode('value6'), 0)])]
_verify_simple()
# New insert, make sure it shows up post-remove:
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(7), utf8encode('value7'), 0), ConsistencyLevel.ONE)
super_columns_expected = [SuperColumn(name=utf8encode('sc1'),
columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'),
columns=[Column(_i64(6), utf8encode('value6'), 0), Column(_i64(7), utf8encode('value7'), 0)])]
super_columns = [result.super_column for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 0), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 6), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'), columns=[Column(_i64(5), utf8encode('value5'), 6),
Column(_i64(6), utf8encode('value6'), 0),
Column(_i64(7), utf8encode('value7'), 0)])]
assert super_columns == super_columns_expected, super_columns
# shouldn't be able to specify a column w/o a super column for remove
cp = ColumnPath(column_family='Super1', column=utf8encode('sc2'))
e = _expect_exception(lambda: client.remove(utf8encode('key1'), cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("column cannot be specified without") >= 0
def test_super_cf_remove_supercolumn(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2')), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc2'), _i64(5)), ConsistencyLevel.ONE))
super_columns = _big_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')))
assert super_columns == [], super_columns
super_columns_expected = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)])]
super_columns = [result.super_column
for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
_verify_simple()
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 1), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(5), utf8encode('value5'), 6), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice(utf8encode('key1'), ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name=utf8encode('sc1'), columns=[Column(_i64(4), utf8encode('value4'), 0)]),
SuperColumn(name=utf8encode('sc2'), columns=[Column(_i64(5), utf8encode('value5'), 6)])]
assert super_columns == super_columns_expected, super_columns
# check slicing at the subcolumn level too
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
columns = [result.column
for result in client.get_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), p, ConsistencyLevel.ONE)]
assert columns == [Column(_i64(5), utf8encode('value5'), 6)], columns
def test_super_cf_resurrect_subcolumn(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
key = utf8encode('vijay')
client.insert(key, ColumnParent('Super1', utf8encode('sc1')), Column(_i64(4), utf8encode('value4'), 0), ConsistencyLevel.ONE)
client.remove(key, ColumnPath('Super1', utf8encode('sc1')), 1, ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', utf8encode('sc1')), Column(_i64(4), utf8encode('value4'), 2), ConsistencyLevel.ONE)
result = client.get(key, ColumnPath('Super1', utf8encode('sc1')), ConsistencyLevel.ONE)
assert result.super_column.columns is not None, result.super_column
def test_empty_range(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE) == []
_insert_simple()
assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE) == []
@since('2.1', max_version='4')
def test_super_cql_read_compatibility(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super(utf8encode("key1"))
_insert_super(utf8encode("key2"))
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
session.execute('USE "Keyspace1"')
assert_all(session, "SELECT * FROM \"Super1\"",
[[utf8encode("key1"), utf8encode("sc1"), 4, utf8encode("value4")],
[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key1"), utf8encode("sc2"), 6, utf8encode("value6")],
[utf8encode("key2"), utf8encode("sc1"), 4, utf8encode("value4")],
[utf8encode("key2"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key2"), utf8encode("sc2"), 6, utf8encode("value6")]])
assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1')",
[[utf8encode("key1"), utf8encode("sc1"), 4, utf8encode("value4")],
[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key1"), utf8encode("sc2"), 6, utf8encode("value6")]])
assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2')",
[[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key1"), utf8encode("sc2"), 6, utf8encode("value6")]])
assert_all(session, "SELECT * FROM \"Super1\" WHERE key=textAsBlob('key1') AND column1=textAsBlob('sc2') AND column2 = 5",
[[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")]])
assert_all(session, "SELECT * FROM \"Super1\" WHERE key = textAsBlob('key1') AND column1 = textAsBlob('sc2')",
[[utf8encode("key1"), utf8encode("sc2"), 5, utf8encode("value5")],
[utf8encode("key1"), utf8encode("sc2"), 6, utf8encode("value6")]])
assert_all(session, "SELECT column2, value FROM \"Super1\" WHERE key = textAsBlob('key1') AND column1 = textAsBlob('sc2')",
[[5, utf8encode("value5")],
[6, utf8encode("value6")]])
@since('2.1', max_version='4')
def test_super_cql_write_compatibility(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
session.execute('USE "Keyspace1"')
query = "INSERT INTO \"Super1\" (key, column1, column2, value) VALUES (textAsBlob(%s), textAsBlob(%s), %s, textAsBlob(%s)) USING TIMESTAMP 1234"
session.execute(query, ("key1", "sc1", 4, "value4"))
session.execute(query, ("key1", "sc2", 5, "value5"))
session.execute(query, ("key1", "sc2", 6, "value6"))
session.execute(query, ("key2", "sc1", 4, "value4"))
session.execute(query, ("key2", "sc2", 5, "value5"))
session.execute(query, ("key2", "sc2", 6, "value6"))
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc1'), utf8encode('sc2'), False, 2))
result = client.get_slice(utf8encode('key1'), ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert_length_equal(result, 2)
assert result[0].super_column.name == utf8encode('sc1')
assert result[0].super_column.columns[0], Column(_i64(4), utf8encode('value4') == 1234)
assert result[1].super_column.name == utf8encode('sc2')
assert result[1].super_column.columns, [Column(_i64(5), utf8encode('value5'), 1234), Column(_i64(6), utf8encode('value6') == 1234)]
def test_range_with_remove(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode('key1'), utf8encode(''), 1000, ConsistencyLevel.ONE)[0].key == utf8encode('key1')
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c1')), 1, ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('c2')), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c2')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key=utf8encode('key1'))], actual
def test_range_with_remove_cf(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode('key1'), utf8encode(''), 1000, ConsistencyLevel.ONE)[0].key == utf8encode('key1')
client.remove(utf8encode('key1'), ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c1')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key=utf8encode('key1'))], actual
def test_range_collation(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in range(100)]:
key = utf8encode(key)
client.insert(key, ColumnParent('Standard1'), Column(key, utf8encode('v'), 0), ConsistencyLevel.ONE)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode(''), utf8encode(''), 1000, ConsistencyLevel.ONE)
L = ['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'b']
assert len(slices) == len(L)
for key, ks in zip(L, slices):
key = utf8encode(key)
assert key == ks.key
def test_range_partial(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in range(100)]:
key = utf8encode(key)
client.insert(key, ColumnParent('Standard1'), Column(key, utf8encode('v'), 0), ConsistencyLevel.ONE)
def check_slices_against_keys(keyList, sliceList):
assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
for key, ks in zip(keyList, sliceList):
key = utf8encode(key)
assert key == ks.key
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode('a'), utf8encode(''), 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['a', 'b'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode(''), utf8encode('15'), 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode('50'), utf8encode('51'), 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['50', '51'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=[utf8encode('-a'), utf8encode('-a')]), utf8encode('1'), utf8encode(''), 10, ConsistencyLevel.ONE)
check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
def test_get_slice_range(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_range()
_verify_range()
def test_get_slice_super_range(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super_range()
_verify_super_range()
def test_get_range_slices_tokens(self):
_set_keyspace('Keyspace2')
self.truncate_all('Super3')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
key = utf8encode(key)
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
cnameutf = utf8encode(cname)
client.insert(key, ColumnParent('Super3', utf8encode('sc1')), Column(cnameutf, utf8encode('v-' + cname), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', utf8encode('sc1'))
predicate = SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')])
range = KeyRange(start_token='55', end_token='55', count=100)
result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
assert len(result) == 5
assert result[0].columns[0].column.name == utf8encode('col1')
assert result[0].columns[1].column.name == utf8encode('col3')
def test_get_range_slice_super(self):
_set_keyspace('Keyspace2')
self.truncate_all('Super3')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
key = utf8encode(key)
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
cnameutf = utf8encode(cname)
client.insert(key, ColumnParent('Super3', utf8encode('sc1')), Column(cnameutf, utf8encode('v-' + cname), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', utf8encode('sc1'))
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), utf8encode('key2'), utf8encode('key4'), 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert result[0].columns[0].column.name == utf8encode('col1')
assert result[0].columns[1].column.name == utf8encode('col3')
cp = ColumnParent('Super3')
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('sc1')]), utf8encode('key2'), utf8encode('key4'), 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert list(set(row.columns[0].super_column.name for row in result))[0] == utf8encode('sc1')
def test_get_range_slice(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
key = utf8encode(key)
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
cnameutf = utf8encode(cname)
client.insert(key, ColumnParent('Standard1'), Column(cnameutf, utf8encode('v-' + cname), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
# test empty slice
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), utf8encode('key6'), utf8encode(''), 1, ConsistencyLevel.ONE)
assert len(result) == 0
# test empty columns
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('a')]), utf8encode('key2'), utf8encode(''), 1, ConsistencyLevel.ONE)
assert len(result) == 1
assert len(result[0].columns) == 0
# test column_names predicate
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), utf8encode('key2'), utf8encode('key4'), 5, ConsistencyLevel.ONE)
assert len(result) == 3, result
assert result[0].columns[0].column.name == utf8encode('col1')
assert result[0].columns[1].column.name == utf8encode('col3')
# row limiting via count.
result = get_range_slice(client, cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), utf8encode('key2'), utf8encode('key4'), 1, ConsistencyLevel.ONE)
assert len(result) == 1
# test column slice predicate
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start=utf8encode('col2'), finish=utf8encode('col4'), reversed=False, count=5)), utf8encode('key1'), utf8encode('key2'), 5, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].key == utf8encode('key1')
assert result[1].key == utf8encode('key2')
assert len(result[0].columns) == 3
assert result[0].columns[0].column.name == utf8encode('col2')
assert result[0].columns[2].column.name == utf8encode('col4')
# col limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start=utf8encode('col2'), finish=utf8encode('col4'), reversed=False, count=2)), utf8encode('key1'), utf8encode('key2'), 5, ConsistencyLevel.ONE)
assert len(result[0].columns) == 2
# and reversed
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start=utf8encode('col4'), finish=utf8encode('col2'), reversed=True, count=5)), utf8encode('key1'), utf8encode('key2'), 5, ConsistencyLevel.ONE)
assert result[0].columns[0].column.name == utf8encode('col4')
assert result[0].columns[2].column.name == utf8encode('col2')
# row limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start=utf8encode('col2'), finish=utf8encode('col4'), reversed=False, count=5)), utf8encode('key1'), utf8encode('key2'), 1, ConsistencyLevel.ONE)
assert len(result) == 1
# removed data
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('col1')), 1, ConsistencyLevel.ONE)
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''))), utf8encode('key1'), utf8encode('key2'), 5, ConsistencyLevel.ONE)
assert len(result) == 2, result
assert result[0].columns[0].column.name == utf8encode('col2'), result[0].columns[0].column.name
assert result[1].columns[0].column.name == utf8encode('col1')
def test_wrapped_range_slices(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
def copp_token(key):
# I cheated and generated this from Java
return {'a': '00530000000100000001',
'b': '00540000000100000001',
'c': '00550000000100000001',
'd': '00560000000100000001',
'e': '00580000000100000001'}[key]
keylist = [utf8encode(key) for key in ['a', 'b', 'c', 'd', 'e']]
for key in keylist:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
cnameutf = utf8encode(cname)
client.insert(key, ColumnParent('Standard1'), Column(cnameutf, utf8encode('v-' + cname), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
result = client.get_range_slices(cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
assert [row.key for row in result] == keylist, [row.key for row in result]
result = client.get_range_slices(cp, SlicePredicate(column_names=[utf8encode('col1'), utf8encode('col3')]), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
assert [row.key for row in result] == keylist, [row.key for row in result]
def test_get_slice_by_names(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1', 'Super1')
_insert_range()
p = SlicePredicate(column_names=[utf8encode('c1'), utf8encode('c2')])
result = client.get_slice(utf8encode('key1'), ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == utf8encode('c1')
assert result[1].column.name == utf8encode('c2')
_insert_super()
p = SlicePredicate(column_names=[_i64(4)])
result = client.get_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1')), p, ConsistencyLevel.ONE)
assert len(result) == 1
assert result[0].column.name == _i64(4)
def test_multiget_slice_with_compact_table(self):
"""Insert multiple keys in a compact table and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
# create
cd = ColumnDef(utf8encode('v'), 'AsciiType', None, None)
newcf = CfDef('Keyspace1', 'CompactColumnFamily', default_validation_class='AsciiType', column_metadata=[cd])
client.system_add_column_family(newcf)
CL = ConsistencyLevel.ONE
for i in range(0, 5):
client.insert(utf8encode('key' + str(i)), ColumnParent('CompactColumnFamily'), Column(utf8encode('v'), utf8encode('value' + str(i)), 0), CL)
time.sleep(0.1)
p = SlicePredicate(column_names=[utf8encode('v')])
rows = client.multiget_slice([utf8encode('key' + str(i)) for i in range(0, 5)], ColumnParent('CompactColumnFamily'), p, ConsistencyLevel.ONE)
for i in range(0, 5):
key = utf8encode('key' + str(i))
assert key in rows
assert len(rows[key]) == 1
assert rows[key][0].column.name == utf8encode('v')
assert rows[key][0].column.value == utf8encode('value' + str(i))
def test_multiget_slice(self):
"""Insert multiple keys and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
# Generate a list of 10 keys and insert them
num_keys = 10
keys = [utf8encode('key' + str(i)) for i in range(1, num_keys + 1)]
_insert_multi(keys)
# Retrieve all 10 key slices
rows = _big_multislice(keys, ColumnParent('Standard1'))
columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
# Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
for key in keys:
assert key in rows
assert columns == rows[key]
def test_multi_count(self):
"""Insert multiple keys and count them using the multiget interface"""
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
# Generate a list of 10 keys countaining 1 to 10 columns and insert them
num_keys = 10
for i in range(1, num_keys + 1):
key = utf8encode('key' + str(i))
for j in range(1, i + 1):
client.insert(key, ColumnParent('Standard1'), Column(utf8encode('c' + str(j)), utf8encode('value' + str(j)), 0), ConsistencyLevel.ONE)
# Count columns in all 10 keys
keys = [utf8encode('key' + str(i)) for i in range(1, num_keys + 1)]
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))
counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
# Check the returned counts
for i in range(1, num_keys + 1):
key = utf8encode('key' + str(i))
assert counts[key] == i
def test_batch_mutate_super_deletion(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
_insert_super('test')
d = Deletion(1, predicate=SlicePredicate(column_names=[utf8encode('sc1')]))
cfmap = {'Super1': [Mutation(deletion=d)]}
client.batch_mutate({utf8encode('test'): cfmap}, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Super1', utf8encode('sc1')), ConsistencyLevel.ONE))
def test_super_reinsert(self):
_set_keyspace('Keyspace1')
self.truncate_all('Super1')
for x in range(3):
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(x), utf8encode('value'), 1), ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
for x in range(3):
client.insert(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), Column(_i64(x + 3), utf8encode('value'), 3), ConsistencyLevel.ONE)
for n in range(1, 4):
p = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, n))
slice = client.get_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc2')), p, ConsistencyLevel.ONE)
assert len(slice) == n, "expected %s results; found %s" % (n, slice)
def test_describe_keyspace(self):
try:
client.system_drop_keyspace("ValidKsForUpdate")
except InvalidRequestException:
pass # The keyspace doesn't exit, because this test was run in isolation.
kspaces = client.describe_keyspaces()
if self.cluster.version() >= '3.0':
assert len(kspaces) == 7, [x.name for x in kspaces] # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth', 'system_distributed', 'system_schema']
elif self.cluster.version() >= '2.2':
assert len(kspaces) == 6, [x.name for x in kspaces] # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth', 'system_distributed']
else:
assert len(kspaces) == 4, [x.name for x in kspaces] # ['Keyspace2', 'Keyspace1', 'system', 'system_traces']
sysks = client.describe_keyspace("system")
assert sysks in kspaces
ks1 = client.describe_keyspace("Keyspace1")
assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
for cf in ks1.cf_defs:
if cf.name == "Standard1":
cf0 = cf
break
assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
def test_describe(self):
assert client.describe_cluster_name() == 'test'
def test_describe_ring(self):
assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
def test_describe_token_map(self):
# test/conf/cassandra.yaml specifies org.apache.cassandra.dht.ByteOrderedPartitioner
# which uses BytesToken, so this just tests that the string representation of the token
# matches a regex pattern for BytesToken.toString().
ring = list(client.describe_token_map().items())
if not self.dtest_config.use_vnodes:
assert len(ring) == 1
else:
assert len(ring) == int(self.dtest_config.num_tokens)
token, node = ring[0]
if self.dtest_config.use_vnodes:
assert re.match("[0-9A-Fa-f]{32}", token)
assert node == '127.0.0.1'
def test_describe_partitioner(self):
# Make sure this just reads back the values from the config.
assert client.describe_partitioner() == "org.apache.cassandra.dht.ByteOrderedPartitioner"
def test_describe_snitch(self):
assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
def test_invalid_ks_names(self):
def invalid_keyspace():
client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[]))
_expect_exception(invalid_keyspace, InvalidRequestException)
def test_invalid_strategy_class(self):
def add_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def update_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[]))
client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def test_invalid_cf_names(self):
def invalid_cf():
_set_keyspace('Keyspace1')
newcf = CfDef('Keyspace1', 'in-valid')
client.system_add_column_family(newcf)
_expect_exception(invalid_cf, InvalidRequestException)
def invalid_cf_inside_new_ks():
cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
_set_keyspace('system')
client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
_expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
def test_system_cf_recreate(self):
"ensures that keyspaces and column familes can be dropped and recreated in short order"
for x in range(2):
keyspace = 'test_cf_recreate'
cf_name = 'recreate_cf'
# create
newcf = CfDef(keyspace, cf_name)
newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[newcf])
client.system_add_keyspace(newks)
_set_keyspace(keyspace)
# insert
client.insert(utf8encode('key0'), ColumnParent(cf_name), Column(utf8encode('colA'), utf8encode('colA-value'), 0), ConsistencyLevel.ONE)
col1 = client.get_slice(utf8encode('key0'), ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 100)), ConsistencyLevel.ONE)[0].column
assert col1.name == utf8encode('colA') and col1.value == utf8encode('colA-value')
# drop
client.system_drop_column_family(cf_name)
# recreate
client.system_add_column_family(newcf)
# query
cosc_list = client.get_slice(utf8encode('key0'), ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 100)), ConsistencyLevel.ONE)
# this was failing prior to CASSANDRA-1477.
assert len(cosc_list) == 0, 'cosc length test failed'
client.system_drop_keyspace(keyspace)
def test_system_keyspace_operations(self):
# create. note large RF, this is OK
keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{'replication_factor': '10'},
cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
client.system_add_keyspace(keyspace)
newks = client.describe_keyspace('CreateKeyspace')
assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
_set_keyspace('CreateKeyspace')
# modify valid
modified_keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
{'replication_factor': '1'},
cf_defs=[])
client.system_update_keyspace(modified_keyspace)
modks = client.describe_keyspace('CreateKeyspace')
assert modks.strategy_class == modified_keyspace.strategy_class
assert modks.strategy_options == modified_keyspace.strategy_options
# check strategy options are validated on modify
def modify_invalid_ks():
client.system_update_keyspace(KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{},
cf_defs=[]))
_expect_exception(modify_invalid_ks, InvalidRequestException)
# drop
client.system_drop_keyspace('CreateKeyspace')
def get_second_ks():
client.describe_keyspace('CreateKeyspace')
_expect_exception(get_second_ks, NotFoundException)
# check strategy options are validated on creation
def create_invalid_ks():
client.system_add_keyspace(KsDef('InvalidKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{},
cf_defs=[]))
_expect_exception(create_invalid_ks, InvalidRequestException)
def test_create_then_drop_ks(self):
keyspace = KsDef('AddThenDrop',
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={'replication_factor': '1'},
cf_defs=[])
def test_existence():
client.describe_keyspace(keyspace.name)
_expect_exception(test_existence, NotFoundException)
client.set_keyspace('system')
client.system_add_keyspace(keyspace)
test_existence()
client.system_drop_keyspace(keyspace.name)
def test_column_validators(self):
# columndef validation for regular CF
ks = 'Keyspace1'
_set_keyspace(ks)
cd = ColumnDef(utf8encode('col'), 'LongType', None, None)
cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
cp = ColumnParent('ValidatorColumnFamily')
col0 = Column(utf8encode('col'), _i64(42), 0)
col1 = Column(utf8encode('col'), utf8encode("ceci n'est pas 64bit"), 0)
client.insert(utf8encode('key0'), cp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert(utf8encode('key1'), cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef validation for super CF
scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
client.system_add_column_family(scf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
scp = ColumnParent('ValidatorSuperColumnFamily', utf8encode('sc1'))
client.insert(utf8encode('key0'), scp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert(utf8encode('key1'), scp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef and cfdef default validation
cf = CfDef('Keyspace1', 'DefaultValidatorColumnFamily', column_metadata=[cd], default_validation_class='UTF8Type')
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'DefaultValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
dcp = ColumnParent('DefaultValidatorColumnFamily')
# inserting a longtype into column 'col' is valid at the columndef level
client.insert(utf8encode('key0'), dcp, col0, ConsistencyLevel.ONE)
# inserting a UTF8type into column 'col' fails at the columndef level
e = _expect_exception(lambda: client.insert(utf8encode('key1'), dcp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a longtype into column 'fcol' should fail at the cfdef level
col2 = Column(utf8encode('fcol'), _i64(4224), 0)
e = _expect_exception(lambda: client.insert(utf8encode('key1'), dcp, col2, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a UTF8type into column 'fcol' is valid at the cfdef level
col3 = Column(utf8encode('fcol'), utf8encode("Stringin' it up in the Stringtel Stringifornia"), 0)
client.insert(utf8encode('key0'), dcp, col3, ConsistencyLevel.ONE)
def test_system_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef(utf8encode('ValidationColumn'), 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' in [x.name for x in ks1.cf_defs]
cfid = [x.id for x in ks1.cf_defs if x.name == 'NewColumnFamily'][0]
# modify invalid
modified_cf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
modified_cf.id = cfid
def fail_invalid_field():
modified_cf.comparator_type = 'LongType'
client.system_update_column_family(modified_cf)
_expect_exception(fail_invalid_field, InvalidRequestException)
# modify valid
modified_cf.comparator_type = 'BytesType' # revert back to old value.
modified_cf.gc_grace_seconds = 1
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name == 'NewColumnFamily'][0]
assert server_cf
assert server_cf.gc_grace_seconds == 1
# drop
client.system_drop_column_family('NewColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
# Make a LongType CF and add a validator
newcf = CfDef('Keyspace1', 'NewLongColumnFamily', comparator_type='LongType')
client.system_add_column_family(newcf)
three = _i64(3)
cd = ColumnDef(three, 'LongType', None, None)
ks1 = client.describe_keyspace('Keyspace1')
modified_cf = [x for x in ks1.cf_defs if x.name == 'NewLongColumnFamily'][0]
modified_cf.column_metadata = [cd]
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name == 'NewLongColumnFamily'][0]
assert server_cf.column_metadata[0].name == _i64(3), server_cf.column_metadata
def test_dynamic_indexes_creation_deletion(self):
_set_keyspace('Keyspace1')
cfdef = CfDef('Keyspace1', 'BlankCF')
client.system_add_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name == 'BlankCF'][0]
modified_cd = ColumnDef(utf8encode('birthdate'), 'BytesType', IndexType.KEYS, None)
modified_cf = CfDef('Keyspace1', 'BlankCF', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
# Add a second indexed CF ...
birthdate_coldef = ColumnDef(utf8encode('birthdate'), 'BytesType', IndexType.KEYS, None)
age_coldef = ColumnDef(utf8encode('age'), 'BytesType', IndexType.KEYS, 'age_index')
cfdef = CfDef('Keyspace1', 'BlankCF2', column_metadata=[birthdate_coldef, age_coldef])
client.system_add_column_family(cfdef)
# ... and update it to have a third index
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name == 'BlankCF2'][0]
name_coldef = ColumnDef(utf8encode('name'), 'BytesType', IndexType.KEYS, 'name_index')
cfdef.column_metadata.append(name_coldef)
client.system_update_column_family(cfdef)
# Now drop the indexes
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name == 'BlankCF2'][0]
birthdate_coldef = ColumnDef(utf8encode('birthdate'), 'BytesType', None, None)
age_coldef = ColumnDef(utf8encode('age'), 'BytesType', None, None)
name_coldef = ColumnDef(utf8encode('name'), 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef, age_coldef, name_coldef]
client.system_update_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name == 'BlankCF'][0]
birthdate_coldef = ColumnDef(utf8encode('birthdate'), 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef]
client.system_update_column_family(cfdef)
client.system_drop_column_family('BlankCF')
client.system_drop_column_family('BlankCF2')
def test_dynamic_indexes_with_system_update_cf(self):
_set_keyspace('Keyspace1')
cd = ColumnDef(utf8encode('birthdate'), 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'ToBeIndexed', default_validation_class='LongType', column_metadata=[cd])
client.system_add_column_family(newcf)
client.insert(utf8encode('key1'), ColumnParent('ToBeIndexed'), Column(utf8encode('birthdate'), _i64(1), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key2'), ColumnParent('ToBeIndexed'), Column(utf8encode('birthdate'), _i64(2), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key2'), ColumnParent('ToBeIndexed'), Column(utf8encode('b'), _i64(2), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key3'), ColumnParent('ToBeIndexed'), Column(utf8encode('birthdate'), _i64(3), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key3'), ColumnParent('ToBeIndexed'), Column(utf8encode('b'), _i64(3), 0), ConsistencyLevel.ONE)
# First without index
cp = ColumnParent('ToBeIndexed')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode('')))
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('birthdate'), IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == utf8encode('key1')
assert len(result[0].columns) == 1, result[0].columns
# add an index on 'birthdate'
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name == 'ToBeIndexed'][0]
modified_cd = ColumnDef(utf8encode('birthdate'), 'BytesType', IndexType.KEYS, 'bd_index')
modified_cf = CfDef('Keyspace1', 'ToBeIndexed', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name == 'ToBeIndexed'][0]
assert server_cf
assert server_cf.column_metadata[0].index_type == modified_cd.index_type
assert server_cf.column_metadata[0].index_name == modified_cd.index_name
# sleep a bit to give time for the index to build.
time.sleep(5)
# repeat query on one index expression
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == utf8encode('key1')
assert len(result[0].columns) == 1, result[0].columns
def test_system_super_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef(utf8encode('ValidationColumn'), 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewSuperColumnFamily', 'Super', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' in [x.name for x in ks1.cf_defs]
# drop
client.system_drop_column_family('NewSuperColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
def test_insert_ttl(self):
self._base_insert_ttl()
def test_insert_max_ttl(self):
self._base_insert_ttl(ttl=MAX_TTL, max_default_ttl=False)
def test_insert_max_default_ttl(self):
self._base_insert_ttl(ttl=None, max_default_ttl=True)
def _base_insert_ttl(self, ttl=5, max_default_ttl=False):
""" Test simple insertion of a column with max ttl """
_set_keyspace('Keyspace1')
cf = 'ExpiringMaxTTL' if max_default_ttl else 'Standard1'
logprefix = 'default ' if max_default_ttl else ''
self.truncate_all(cf)
node1 = self.cluster.nodelist()[0]
mark = node1.mark_log()
column = Column(utf8encode('cttl1'), utf8encode('value1'), 0, ttl)
expected = Column(utf8encode('cttl1'), utf8encode('value1'), 0, MAX_TTL) if max_default_ttl else column
client.insert(utf8encode('key1'), ColumnParent(cf), column, ConsistencyLevel.ONE)
assert client.get(utf8encode('key1'), ColumnPath(cf, column=utf8encode('cttl1')), ConsistencyLevel.ONE).column == expected
if ttl and ttl < MAX_TTL:
assert not node1.grep_log("exceeds maximum supported expiration", from_mark=mark), "Should not print max expiration date exceeded warning"
else:
node1.watch_log_for("Request on table {}.{} with {}ttl of {} seconds exceeds maximum supported expiration"
.format('Keyspace1', cf, logprefix, MAX_TTL), timeout=10)
def test_simple_expiration(self):
""" Test that column ttled do expires """
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
column = Column(utf8encode('cttl3'), utf8encode('value1'), 0, 2)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
c = client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl3')), ConsistencyLevel.ONE).column
assert c == column
time.sleep(3)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl3')), ConsistencyLevel.ONE))
def test_expiration_with_default_ttl(self):
""" Test that column with default ttl do expires """
_set_keyspace('Keyspace1')
self.truncate_all('Expiring')
column = Column(utf8encode('cttl3'), utf8encode('value1'), 0)
client.insert(utf8encode('key1'), ColumnParent('Expiring'), column, ConsistencyLevel.ONE)
client.get(utf8encode('key1'), ColumnPath('Expiring', column=utf8encode('cttl3')), ConsistencyLevel.ONE).column
time.sleep(3)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Expiring', column=utf8encode('cttl3')), ConsistencyLevel.ONE))
@since('3.6', max_version='4')
def test_expiration_with_default_ttl_and_zero_ttl(self):
"""
Test that we can remove the default ttl by setting the ttl explicitly to zero
CASSANDRA-11207
"""
_set_keyspace('Keyspace1')
self.truncate_all('Expiring')
column = Column(utf8encode('cttl3'), utf8encode('value1'), 0, 0)
client.insert(utf8encode('key1'), ColumnParent('Expiring'), column, ConsistencyLevel.ONE)
c = client.get(utf8encode('key1'), ColumnPath('Expiring', column=utf8encode('cttl3')), ConsistencyLevel.ONE).column
assert Column(utf8encode('cttl3'), utf8encode('value1'), 0) == c
def test_simple_expiration_batch_mutate(self):
""" Test that column ttled do expires using batch_mutate """
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
column = Column(utf8encode('cttl4'), utf8encode('value1'), 0, 2)
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(column))]}
client.batch_mutate({utf8encode('key1'): cfmap}, ConsistencyLevel.ONE)
c = client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl4')), ConsistencyLevel.ONE).column
assert c == column
time.sleep(3)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl4')), ConsistencyLevel.ONE))
def test_update_expiring(self):
""" Test that updating a column with ttl override the ttl """
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
column1 = Column(utf8encode('cttl4'), utf8encode('value1'), 0, 1)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), column1, ConsistencyLevel.ONE)
column2 = Column(utf8encode('cttl4'), utf8encode('value1'), 1)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), column2, ConsistencyLevel.ONE)
time.sleep(1.5)
assert client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl4')), ConsistencyLevel.ONE).column == column2
def test_remove_expiring(self):
""" Test removing a column with ttl """
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
column = Column(utf8encode('cttl5'), utf8encode('value1'), 0, 10)
client.insert(utf8encode('key1'), ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
client.remove(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('cttl5')), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get(utf8encode('key1'), ColumnPath('Standard1', column=utf8encode('ctt5')), ConsistencyLevel.ONE))
def test_describe_ring_on_invalid_keyspace(self):
def req():
client.describe_ring('system')
_expect_exception(req, InvalidRequestException)
def test_incr_decr_standard_add(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
d1 = 12
d2 = -21
d3 = 35
# insert positive and negative values and check the counts
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get(key, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get(key, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1 + d2)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get(key, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1 + d2 + d3)
def test_incr_decr_super_add(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
d1 = -234
d2 = 52345
d3 = 3123
client.add(key, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c2'), d2), ConsistencyLevel.ONE)
rv1 = client.get(key, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1')), ConsistencyLevel.ONE)
assert rv1.counter_super_column.columns[0].value == d1
assert rv1.counter_super_column.columns[1].value == d2
client.add(key, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d2), ConsistencyLevel.ONE)
rv2 = client.get(key, ColumnPath('SuperCounter1', utf8encode('sc1'), utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1 + d2)
client.add(key, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d3), ConsistencyLevel.ONE)
rv3 = client.get(key, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1 + d2 + d3)
def test_incr_standard_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 124
# insert value and check it exists
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter(key2, ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
def test_incr_super_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 52345
# insert value and check it exists
client.add(key1, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
client.add(key2, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')))
def test_incr_decr_standard_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 124
# insert value and check it exists
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter(key2, ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
def test_incr_decr_super_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 52345
# insert value and check it exists
client.add(key1, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
client.add(key2, ColumnParent(column_family='SuperCounter1', super_column=utf8encode('sc1')), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1')), ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='SuperCounter1', super_column=utf8encode('sc1'), column=utf8encode('c1')))
def test_incr_decr_standard_batch_add(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
d1 = 12
d2 = -21
update_map = {key: {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d2))),
]}}
# insert positive and negative values and check the counts
client.batch_mutate(update_map, ConsistencyLevel.ONE)
rv1 = client.get(key, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1 + d2
def test_incr_decr_standard_batch_remove(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 12
d2 = -21
# insert positive and negative values and check the counts
update_map = {key1: {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
rv1 = client.get(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1 + d2
# remove the previous column and check that it is gone
update_map = {key1: {'Counter1': [
Mutation(deletion=Deletion(predicate=SlicePredicate(column_names=[utf8encode('c1')]))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
_assert_no_columnpath(key1, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
# insert again and this time delete the whole row, check that it is gone
update_map = {key2: {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn(utf8encode('c1'), d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
rv2 = client.get(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1 + d2
update_map = {key2: {'Counter1': [
Mutation(deletion=Deletion()),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
_assert_no_columnpath(key2, ColumnPath(column_family='Counter1', column=utf8encode('c1')))
# known failure: see CASSANDRA-10046
def test_range_deletion(self):
""" Tests CASSANDRA-7990 """
_set_keyspace('Keyspace1')
self.truncate_all('StandardComposite')
for i in range(10):
column_name = composite(str(i), str(i))
column = Column(column_name, utf8encode('value'), int(time.time() * 1000))
client.insert(utf8encode('key1'), ColumnParent('StandardComposite'), column, ConsistencyLevel.ONE)
delete_slice = SlicePredicate(slice_range=SliceRange(composite('3', eoc=b'\xff'), composite('6', b'\x01'), False, 100))
mutations = [Mutation(deletion=Deletion(int(time.time() * 1000), predicate=delete_slice))]
keyed_mutations = {utf8encode('key1'): {'StandardComposite': mutations}}
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
slice_predicate = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 100))
results = client.get_slice(utf8encode('key1'), ColumnParent('StandardComposite'), slice_predicate, ConsistencyLevel.ONE)
columns = [result.column.name for result in results]
assert columns == [composite('0', '0'), composite('1', '1'), composite('2', '2'),
composite('6', '6'), composite('7', '7'), composite('8', '8'), composite('9', '9')]
@pytest.mark.skip_version('3.9')
def test_range_deletion_eoc_0(self):
"""
This test confirms that a range tombstone with a final EOC of 0
results in a exclusive deletion except for cells that exactly match the tombstone bound.
@jira_ticket CASSANDRA-12423
"""
_set_keyspace('Keyspace1')
self.truncate_all('StandardComposite')
for i in range(10):
column_name = composite(str(i), str(i))
column = Column(column_name, utf8encode('value'), int(time.time() * 1000))
client.insert(utf8encode('key1'), ColumnParent('StandardComposite'), column, ConsistencyLevel.ONE)
# insert a partial cell name (just the first element of the composite)
column_name = composite('6', None, eoc=b'\x00')
column = Column(column_name, utf8encode('value'), int(time.time() * 1000))
client.insert(utf8encode('key1'), ColumnParent('StandardComposite'), column, ConsistencyLevel.ONE)
# sanity check the query
slice_predicate = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 100))
results = client.get_slice(utf8encode('key1'), ColumnParent('StandardComposite'), slice_predicate, ConsistencyLevel.ONE)
columns = [result.column.name for result in results]
assert columns == [composite('0', '0'), composite('1', '1'), composite('2', '2'), composite('3', '3'), composite('4', '4'), composite('5', '5'),
composite('6'),
composite('6', '6'),
composite('7', '7'), composite('8', '8'), composite('9', '9')]
# do a slice deletion with (6, ) as the end
delete_slice = SlicePredicate(slice_range=SliceRange(composite('3', eoc=b'\xff'), composite('6', b'\x00'), False, 100))
mutations = [Mutation(deletion=Deletion(int(time.time() * 1000), predicate=delete_slice))]
keyed_mutations = {utf8encode('key1'): {'StandardComposite': mutations}}
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
# check the columns post-deletion, (utf8encode('6'), ) because it is an exact much but not (6, 6)
results = client.get_slice(utf8encode('key1'), ColumnParent('StandardComposite'), slice_predicate, ConsistencyLevel.ONE)
columns = [result.column.name for result in results]
assert columns == [composite('0', '0'), composite('1', '1'), composite('2', '2'),
composite('6', '6'),
composite('7', '7'), composite('8', '8'), composite('9', '9')]
# do another slice deletion, but make the end (6, 6) this time
delete_slice = SlicePredicate(slice_range=SliceRange(composite('3', eoc=b'\xff'), composite('6', '6', b'\x00'), False, 100))
mutations = [Mutation(deletion=Deletion(int(time.time() * 1000), predicate=delete_slice))]
keyed_mutations = {utf8encode('key1'): {'StandardComposite': mutations}}
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
# check the columns post-deletion, now (6, 6) is also gone
results = client.get_slice(utf8encode('key1'), ColumnParent('StandardComposite'), slice_predicate, ConsistencyLevel.ONE)
columns = [result.column.name for result in results]
assert columns == [composite('0', '0'), composite('1', '1'), composite('2', '2'),
composite('7', '7'), composite('8', '8'), composite('9', '9')]
def test_incr_decr_standard_slice(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
d1 = 12
d2 = -21
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c1'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c2'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d2), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c4'), d1), ConsistencyLevel.ONE)
client.add(key, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c5'), d1), ConsistencyLevel.ONE)
# insert positive and negative values and check the counts
counters = client.get_slice(key, ColumnParent('Counter1'), SlicePredicate([utf8encode('c3'), utf8encode('c4')]), ConsistencyLevel.ONE)
assert counters[0].counter_column.value == d1 + d2
assert counters[1].counter_column.value == d1
def test_incr_decr_standard_multiget_slice(self, request):
_set_keyspace('Keyspace1')
key1 = utf8encode(request.node.name + "_1")
key2 = utf8encode(request.node.name + "_2")
d1 = 12
d2 = -21
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c2'), d1), ConsistencyLevel.ONE)
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d1), ConsistencyLevel.ONE)
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d2), ConsistencyLevel.ONE)
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c4'), d1), ConsistencyLevel.ONE)
client.add(key1, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c5'), d1), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c2'), d1), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d1), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c3'), d2), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c4'), d1), ConsistencyLevel.ONE)
client.add(key2, ColumnParent(column_family='Counter1'), CounterColumn(utf8encode('c5'), d1), ConsistencyLevel.ONE)
# insert positive and negative values and check the counts
counters = client.multiget_slice([key1, key2], ColumnParent('Counter1'), SlicePredicate([utf8encode('c3'), utf8encode('c4')]), ConsistencyLevel.ONE)
assert counters[key1][0].counter_column.value == d1 + d2
assert counters[key1][1].counter_column.value == d1
assert counters[key2][0].counter_column.value == d1 + d2
assert counters[key2][1].counter_column.value == d1
def test_counter_get_slice_range(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
client.add(key, ColumnParent('Counter1'), CounterColumn(utf8encode('c1'), 1), ConsistencyLevel.ONE)
client.add(key, ColumnParent('Counter1'), CounterColumn(utf8encode('c2'), 2), ConsistencyLevel.ONE)
client.add(key, ColumnParent('Counter1'), CounterColumn(utf8encode('c3'), 3), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange(utf8encode('c1'), utf8encode('c2'), False, 1000))
result = client.get_slice(key, ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == utf8encode('c1')
assert result[1].counter_column.name == utf8encode('c2')
p = SlicePredicate(slice_range=SliceRange(utf8encode('c3'), utf8encode('c2'), True, 1000))
result = client.get_slice(key, ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == utf8encode('c3')
assert result[1].counter_column.name == utf8encode('c2')
p = SlicePredicate(slice_range=SliceRange(utf8encode('a'), utf8encode('z'), False, 1000))
result = client.get_slice(key, ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange(utf8encode('a'), utf8encode('z'), False, 2))
result = client.get_slice(key, ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def test_counter_get_slice_super_range(self, request):
_set_keyspace('Keyspace1')
key = utf8encode(request.node.name)
client.add(key, ColumnParent('SuperCounter1', utf8encode('sc1')), CounterColumn(_i64(4), 4), ConsistencyLevel.ONE)
client.add(key, ColumnParent('SuperCounter1', utf8encode('sc2')), CounterColumn(_i64(5), 5), ConsistencyLevel.ONE)
client.add(key, ColumnParent('SuperCounter1', utf8encode('sc2')), CounterColumn(_i64(6), 6), ConsistencyLevel.ONE)
client.add(key, ColumnParent('SuperCounter1', utf8encode('sc3')), CounterColumn(_i64(7), 7), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc2'), utf8encode('sc3'), False, 2))
result = client.get_slice(key, ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == utf8encode('sc2')
assert result[1].counter_super_column.name == utf8encode('sc3')
p = SlicePredicate(slice_range=SliceRange(utf8encode('sc3'), utf8encode('sc2'), True, 2))
result = client.get_slice(key, ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == utf8encode('sc3')
assert result[1].counter_super_column.name == utf8encode('sc2')
def test_index_scan(self):
_set_keyspace('Keyspace1')
self.truncate_all('Indexed1')
client.insert(utf8encode('key1'), ColumnParent('Indexed1'), Column(utf8encode('birthdate'), _i64(1), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key2'), ColumnParent('Indexed1'), Column(utf8encode('birthdate'), _i64(2), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key2'), ColumnParent('Indexed1'), Column(utf8encode('b'), _i64(2), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key3'), ColumnParent('Indexed1'), Column(utf8encode('birthdate'), _i64(3), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key3'), ColumnParent('Indexed1'), Column(utf8encode('b'), _i64(3), 0), ConsistencyLevel.ONE)
# simple query on one index expression
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode('')))
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('birthdate'), IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == utf8encode('key1')
assert len(result[0].columns) == 1, result[0].columns
# without index
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('b'), IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
# but unindexed expression added to indexed one is ok
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('b'), IndexOperator.EQ, _i64(3)), IndexExpression(utf8encode('birthdate'), IndexOperator.EQ, _i64(3))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == utf8encode('key3')
assert len(result[0].columns) == 2, result[0].columns
def test_index_scan_uuid_names(self):
_set_keyspace('Keyspace1')
self.truncate_all('Indexed3')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode('')))
cp = ColumnParent('Indexed3') # timeuuid name, utf8 values
u = uuid.UUID('00000000-0000-1000-0000-000000000000').bytes
u2 = uuid.UUID('00000000-0000-1000-0000-000000000001').bytes
client.insert(utf8encode('key1'), ColumnParent('Indexed3'), Column(u, utf8encode('a'), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Indexed3'), Column(u2, utf8encode('b'), 0), ConsistencyLevel.ONE)
# name comparator + data validator of incompatible types -- see CASSANDRA-2347
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(u, IndexOperator.EQ, utf8encode('a')), IndexExpression(u2, IndexOperator.EQ, utf8encode('b'))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
cp = ColumnParent('Indexed2') # timeuuid name, long values
# name must be valid (TimeUUID)
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('foo'), IndexOperator.EQ, uuid.UUID('00000000-0000-1000-0000-000000000000').bytes)], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
# value must be valid (TimeUUID)
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, IndexOperator.EQ, utf8encode("foo"))], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
def test_index_scan_expiring(self):
""" Test that column ttled expires from KEYS index"""
_set_keyspace('Keyspace1')
self.truncate_all('Indexed1')
client.insert(utf8encode('key1'), ColumnParent('Indexed1'), Column(utf8encode('birthdate'), _i64(1), 0, 2), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode('')))
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('birthdate'), IndexOperator.EQ, _i64(1))], 100)
# query before expiration
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
# wait for expiration and requery
time.sleep(3)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
def test_index_scan_indexed_column_outside_slice_predicate(self):
"""
Verify that performing an indexed read works when the indexed column
is not included in the slice predicate. Checks both cases where the
predicate contains a slice range or a set of column names, which
translate to slice and names queries server-side.
@jira_ticket CASSANDRA-11523
"""
_set_keyspace('Keyspace1')
self.truncate_all('Indexed4')
client.insert(utf8encode('key1'), ColumnParent('Indexed4'), Column(utf8encode('a'), _i64(1), 0), ConsistencyLevel.ONE)
client.insert(utf8encode('key1'), ColumnParent('Indexed4'), Column(utf8encode('z'), utf8encode('zzz'), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed4')
sp = SlicePredicate(slice_range=SliceRange(utf8encode('z'), utf8encode('z')))
key_range = KeyRange(utf8encode(''), utf8encode(''), None, None, [IndexExpression(utf8encode('a'), IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].column.name == utf8encode('z')
sp = SlicePredicate(column_names=[utf8encode('z')])
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].column.name == utf8encode('z')
def test_column_not_found_quorum(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
key = utf8encode('doesntexist')
column_path = ColumnPath(column_family="Standard1", column=utf8encode("idontexist"))
try:
client.get(key, column_path, ConsistencyLevel.QUORUM)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def test_get_range_slice_after_deletion(self):
_set_keyspace('Keyspace2')
self.truncate_all('Super3')
key = utf8encode('key1')
# three supercoluns, each with "col1" subcolumn
for i in range(1, 4):
client.insert(key, ColumnParent('Super3', utf8encode('sc%d' % i)), Column(utf8encode('col1'), utf8encode('val1'), 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3')
predicate = SlicePredicate(slice_range=SliceRange(utf8encode('sc1'), utf8encode('sc3'), False, count=1))
k_range = KeyRange(start_key=key, end_key=key, count=1)
# validate count=1 restricts to 1 supercolumn
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
# remove sc1; add back subcolumn to override tombstone
client.remove(key, ColumnPath('Super3', utf8encode('sc1')), 1, ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
client.insert(key, ColumnParent('Super3', utf8encode('sc1')), Column(utf8encode('col1'), utf8encode('val1'), 2), ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].super_column.name == utf8encode('sc1')
def test_multi_slice(self):
_set_keyspace('Keyspace1')
self.truncate_all('Standard1')
_insert_six_columns('abc')
L = [result.column
for result in _big_multi_slice('abc')]
assert L == _MULTI_SLICE_COLUMNS, L
def test_truncate(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# truncate Standard1
self.truncate_all('Standard1')
assert _big_slice(utf8encode('key1'), ColumnParent('Standard1')) == []
# truncate Super1
self.truncate_all('Super1')
assert _big_slice(utf8encode('key1'), ColumnParent('Super1')) == []
assert _big_slice(utf8encode('key1'), ColumnParent('Super1', utf8encode('sc1'))) == []
@since('3.0', max_version='4')
def test_cql_range_tombstone_and_static(self):
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
# Create a CQL table with a static column and insert a row
session.execute('USE "Keyspace1"')
session.execute("CREATE TABLE t (k text, s text static, t text, v text, PRIMARY KEY (k, t))")
session.execute("INSERT INTO t (k, s, t, v) VALUES ('k', 's', 't', 'v') USING TIMESTAMP 0")
assert_one(session, "SELECT * FROM t", ['k', 't', 's', 'v'])
# Now submit a range deletion that should include both the row and the static value
_set_keyspace('Keyspace1')
mutations = [Mutation(deletion=Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(utf8encode(''), utf8encode(''), False, 1000))))]
mutation_map = dict((table, mutations) for table in ['t'])
keyed_mutations = dict((key, mutation_map) for key in [utf8encode('k')])
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
# And check everything is gone
assert_none(session, "SELECT * FROM t")
def test_compact_storage_get(self):
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
# Create a CQL table with a static column and insert a row
session.execute("USE \"Keyspace1\"")
session.execute("CREATE TABLE IF NOT EXISTS cs1 (k int PRIMARY KEY,v int) WITH COMPACT STORAGE")
_set_keyspace('Keyspace1')
CL = ConsistencyLevel.ONE
i = 1
client.insert(_i32(i), ColumnParent('cs1'), Column(utf8encode('v'), _i32(i), 0), CL)
_assert_column('cs1', _i32(i), utf8encode('v'), _i32(i), 0)
@pytest.mark.skip_version('3.9')
def test_range_tombstone_eoc_0(self):
"""
Insert a range tombstone with EOC=0 for a compact storage table. Insert 2 rows that
are just outside the range and check that they are present.
@jira_ticket CASSANDRA-12423
"""
node1 = self.cluster.nodelist()[0]
session = self.patient_cql_connection(node1)
session.execute('USE "Keyspace1"')
session.execute("CREATE TABLE test (id INT, c1 TEXT, c2 TEXT, v INT, PRIMARY KEY (id, c1, c2)) "
"with compact storage and compression = {'sstable_compression': ''};")
_set_keyspace('Keyspace1')
range_delete = {
_i32(1): {
'test': [Mutation(deletion=Deletion(2470761440040513,
predicate=SlicePredicate(slice_range=SliceRange(
start=composite('a'), finish=composite('asd')))))]
}
}
client.batch_mutate(range_delete, ConsistencyLevel.ONE)
session.execute("INSERT INTO test (id, c1, c2, v) VALUES (1, 'asd', '', 0) USING TIMESTAMP 1470761451368658")
session.execute("INSERT INTO test (id, c1, c2, v) VALUES (1, 'asd', 'asd', 0) USING TIMESTAMP 1470761449416613")
ret = list(session.execute('SELECT * FROM test'))
assert 2 == len(ret)
node1.nodetool('flush Keyspace1 test')
| apache-2.0 |
raychorn/knowu | django/djangononrelsample2/django/core/management/templates.py | 102 | 12715 | import cgi
import errno
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
try:
from urllib.request import urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve
from optparse import make_option
from os import path
import django
from django.template import Template, Context
from django.utils import archive
from django.utils._os import rmtree_errorhandler
from django.core.management.base import BaseCommand, CommandError
from django.core.management.commands.makemessages import handle_extensions
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
args = "[name] [optional destination directory]"
option_list = BaseCommand.option_list + (
make_option('--template',
action='store', dest='template',
help='The dotted import path to load the template from.'),
make_option('--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'),
make_option('--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. '
'Separate multiple extensions with commas, or use '
'-n multiple times.')
)
requires_model_validation = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = int(options.get('verbosity'))
# If it's not a valid directory name.
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = ('make sure the name begins '
'with a letter or underscore')
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(
handle_extensions(options.get('extensions'), ignored=()))
extra_files = []
for file in options.get('files'):
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
}), autoescape=False)
# Setup a stub settings environment for template rendering
from django.conf import settings
if not settings.configured:
settings.configure()
template_dir = self.handle_template(options.get('template'),
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
with open(old_path, 'rb') as template_file:
content = template_file.read()
if filename.endswith(extensions) or filename in extra_files:
content = content.decode('utf-8')
template = Template(content)
content = template.render(context)
content = content.encode('utf-8')
with open(new_path, 'wb') as new_file:
new_file.write(content)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove,
onerror=rmtree_errorhandler)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognnized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| lgpl-3.0 |
guru-digital/CouchPotatoServer | libs/suds/client.py | 150 | 25971 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception, 'No services defined'
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, 'at [%d]' % name
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception, 'No ports defined: %s' % self.__qn
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
| gpl-3.0 |
highweb-project/highweb-webcl-html5spec | third_party/pycoverage/coverage/report.py | 214 | 3031 | """Reporter foundation for Coverage."""
import fnmatch, os
from coverage.codeunit import code_unit_factory
from coverage.files import prep_patterns
from coverage.misc import CoverageException, NoSource, NotPython
class Reporter(object):
"""A base class for all reporters."""
def __init__(self, coverage, config):
"""Create a reporter.
`coverage` is the coverage instance. `config` is an instance of
CoverageConfig, for controlling all sorts of behavior.
"""
self.coverage = coverage
self.config = config
# The code units to report on. Set by find_code_units.
self.code_units = []
# The directory into which to place the report, used by some derived
# classes.
self.directory = None
def find_code_units(self, morfs):
"""Find the code units we'll report on.
`morfs` is a list of modules or filenames.
"""
morfs = morfs or self.coverage.data.measured_files()
file_locator = self.coverage.file_locator
self.code_units = code_unit_factory(morfs, file_locator)
if self.config.include:
patterns = prep_patterns(self.config.include)
filtered = []
for cu in self.code_units:
for pattern in patterns:
if fnmatch.fnmatch(cu.filename, pattern):
filtered.append(cu)
break
self.code_units = filtered
if self.config.omit:
patterns = prep_patterns(self.config.omit)
filtered = []
for cu in self.code_units:
for pattern in patterns:
if fnmatch.fnmatch(cu.filename, pattern):
break
else:
filtered.append(cu)
self.code_units = filtered
self.code_units.sort()
def report_files(self, report_fn, morfs, directory=None):
"""Run a reporting function on a number of morfs.
`report_fn` is called for each relative morf in `morfs`. It is called
as::
report_fn(code_unit, analysis)
where `code_unit` is the `CodeUnit` for the morf, and `analysis` is
the `Analysis` for the morf.
"""
self.find_code_units(morfs)
if not self.code_units:
raise CoverageException("No data to report.")
self.directory = directory
if self.directory and not os.path.exists(self.directory):
os.makedirs(self.directory)
for cu in self.code_units:
try:
report_fn(cu, self.coverage._analyze(cu))
except NoSource:
if not self.config.ignore_errors:
raise
except NotPython:
# Only report errors for .py files, and only if we didn't
# explicitly suppress those errors.
if cu.should_be_python() and not self.config.ignore_errors:
raise
| bsd-3-clause |
google/carfac | python/tf/car_saveload_test.py | 1 | 1728 | # Lint as: python3
#!/usr/bin/env python
# Copyright 2021 The CARFAC Authors. All Rights Reserved.
#
# This file is part of an implementation of Lyon's cochlear model:
# "Cascade of Asymmetric Resonators with Fast-Acting Compression"
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for carfac.tf.pz."""
import tempfile
import unittest
from absl import app
import numpy as np
import tensorflow as tf
from . import car
class CARTest(unittest.TestCase):
def testSaveLoad(self):
car_cell = car.CARCell()
car_layer = tf.keras.layers.RNN(car_cell, return_sequences=True)
model = tf.keras.Sequential()
model.add(car_layer)
impulse: np.ndarray = np.zeros([3, 10, 1], dtype=np.float32)
impulse[:, 0, :] = 1
impulse: tf.Tensor = tf.constant(impulse)
model.build(impulse.shape)
with tempfile.TemporaryDirectory() as savefile:
model.save(savefile)
loaded_model: tf.keras.models.Model = tf.keras.models.load_model(
savefile, custom_objects={'CARCell': car.CARCell})
np.testing.assert_array_almost_equal(model(impulse),
loaded_model(impulse))
def main(_):
unittest.main()
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
thaim/ansible | lib/ansible/modules/network/f5/bigip_gtm_datacenter.py | 38 | 14466 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_datacenter
short_description: Manage Datacenter configuration in BIG-IP
description:
- Manage BIG-IP data center configuration. A data center defines the location
where the physical network components reside, such as the server and link
objects that share the same subnet on the network. This module is able to
manipulate the data center definitions in a BIG-IP.
version_added: 2.2
options:
contact:
description:
- The name of the contact for the data center.
type: str
description:
description:
- The description of the data center.
type: str
location:
description:
- The location of the data center.
type: str
name:
description:
- The name of the data center.
type: str
required: True
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
type: str
choices:
- present
- absent
- enabled
- disabled
default: present
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create data center "New York"
bigip_gtm_datacenter:
name: New York
location: 222 West 23rd
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
contact:
description: The contact that was set on the datacenter.
returned: changed
type: str
sample: [email protected]
description:
description: The description that was set for the datacenter.
returned: changed
type: str
sample: Datacenter in NYC
enabled:
description: Whether the datacenter is enabled or not
returned: changed
type: bool
sample: true
disabled:
description: Whether the datacenter is disabled or not.
returned: changed
type: bool
sample: true
state:
description: State of the datacenter.
returned: changed
type: str
sample: disabled
location:
description: The location that is set for the datacenter.
returned: changed
type: str
sample: 222 West 23rd
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {}
updatables = [
'location', 'description', 'contact', 'state',
]
returnables = [
'location', 'description', 'contact', 'state', 'enabled', 'disabled',
]
api_attributes = [
'enabled', 'location', 'description', 'contact', 'disabled',
]
class ApiParameters(Parameters):
@property
def disabled(self):
if self._values['disabled'] is True:
return True
return None
@property
def enabled(self):
if self._values['enabled'] is True:
return True
return None
class ModuleParameters(Parameters):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
return None
@property
def state(self):
if self.enabled and self._values['state'] != 'present':
return 'enabled'
elif self.disabled and self._values['state'] != 'present':
return 'disabled'
else:
return self._values['state']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
class ReportableChanges(Changes):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['enabled', 'present']:
return False
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
elif self._values['state'] == 'disabled':
return False
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def state(self):
if self.want.enabled != self.have.enabled:
return dict(
state=self.want.state,
enabled=self.want.enabled
)
if self.want.disabled != self.have.disabled:
return dict(
state=self.want.state,
disabled=self.want.disabled
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def create(self):
self.have = ApiParameters()
self.should_update()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the datacenter")
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the datacenter")
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/datacenter/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
contact=dict(),
description=dict(),
location=dict(),
name=dict(required=True),
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| mit |
secynic/ipwhois | ipwhois/tests/online/test_rdap.py | 1 | 1923 | import json
import io
from os import path
import logging
from ipwhois.tests import TestCommon
from ipwhois.exceptions import (HTTPLookupError, HTTPRateLimitError)
from ipwhois.rdap import (RDAP, Net)
LOG_FORMAT = ('[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)s] '
'[%(funcName)s()] %(message)s')
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
log = logging.getLogger(__name__)
class TestRDAP(TestCommon):
def test_lookup(self):
data_dir = path.abspath(path.join(path.dirname(__file__), '..'))
with io.open(str(data_dir) + '/rdap.json', 'r') as data_file:
data = json.load(data_file)
for key, val in data.items():
log.debug('Testing: {0}'.format(key))
net = Net(key)
obj = RDAP(net)
try:
self.assertIsInstance(obj.lookup(asn_data=val['asn_data'],
depth=1), dict)
except (HTTPLookupError, HTTPRateLimitError):
pass
except AssertionError as e:
raise e
except Exception as e:
self.fail('Unexpected exception raised: {0}'.format(e))
for key, val in data.items():
log.debug('Testing bootstrap and raw: {0}'.format(key))
net = Net(key)
obj = RDAP(net)
try:
self.assertIsInstance(obj.lookup(asn_data=val['asn_data'],
depth=3,
bootstrap=True,
inc_raw=True), dict)
except (HTTPLookupError, HTTPRateLimitError):
pass
except AssertionError as e:
raise e
except Exception as e:
self.fail('Unexpected exception raised: {0}'.format(e))
| bsd-2-clause |
gdimitris/ChessPuzzler | Virtual_Environment/lib/python2.7/site-packages/migrate/tests/changeset/test_changeset.py | 66 | 36587 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlalchemy
import warnings
from sqlalchemy import *
from migrate import changeset, exceptions
from migrate.changeset import *
from migrate.changeset import constraint
from migrate.changeset.schema import ColumnDelta
from migrate.tests import fixture
from migrate.tests.fixture.warnings import catch_warnings
import six
class TestAddDropColumn(fixture.DB):
"""Test add/drop column through all possible interfaces
also test for constraints
"""
level = fixture.DB.CONNECT
table_name = 'tmp_adddropcol'
table_name_idx = 'tmp_adddropcol_idx'
table_int = 0
def _setup(self, url):
super(TestAddDropColumn, self)._setup(url)
self.meta = MetaData()
self.table = Table(self.table_name, self.meta,
Column('id', Integer, unique=True),
)
self.table_idx = Table(
self.table_name_idx,
self.meta,
Column('id', Integer, primary_key=True),
Column('a', Integer),
Column('b', Integer),
Index('test_idx', 'a', 'b')
)
self.meta.bind = self.engine
if self.engine.has_table(self.table.name):
self.table.drop()
if self.engine.has_table(self.table_idx.name):
self.table_idx.drop()
self.table.create()
self.table_idx.create()
def _teardown(self):
if self.engine.has_table(self.table.name):
self.table.drop()
if self.engine.has_table(self.table_idx.name):
self.table_idx.drop()
self.meta.clear()
super(TestAddDropColumn,self)._teardown()
def run_(self, create_column_func, drop_column_func, *col_p, **col_k):
col_name = 'data'
def assert_numcols(num_of_expected_cols):
# number of cols should be correct in table object and in database
self.refresh_table(self.table_name)
result = len(self.table.c)
self.assertEqual(result, num_of_expected_cols),
if col_k.get('primary_key', None):
# new primary key: check its length too
result = len(self.table.primary_key)
self.assertEqual(result, num_of_expected_cols)
# we have 1 columns and there is no data column
assert_numcols(1)
self.assertTrue(getattr(self.table.c, 'data', None) is None)
if len(col_p) == 0:
col_p = [String(40)]
col = Column(col_name, *col_p, **col_k)
create_column_func(col)
assert_numcols(2)
# data column exists
self.assertTrue(self.table.c.data.type.length, 40)
col2 = self.table.c.data
drop_column_func(col2)
assert_numcols(1)
@fixture.usedb()
def test_undefined(self):
"""Add/drop columns not yet defined in the table"""
def add_func(col):
return create_column(col, self.table)
def drop_func(col):
return drop_column(col, self.table)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_defined(self):
"""Add/drop columns already defined in the table"""
def add_func(col):
self.meta.clear()
self.table = Table(self.table_name, self.meta,
Column('id', Integer, primary_key=True),
col,
)
return create_column(col)
def drop_func(col):
return drop_column(col)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_method_bound(self):
"""Add/drop columns via column methods; columns bound to a table
ie. no table parameter passed to function
"""
def add_func(col):
self.assertTrue(col.table is None, col.table)
self.table.append_column(col)
return col.create()
def drop_func(col):
#self.assertTrue(col.table is None,col.table)
#self.table.append_column(col)
return col.drop()
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_method_notbound(self):
"""Add/drop columns via column methods; columns not bound to a table"""
def add_func(col):
return col.create(self.table)
def drop_func(col):
return col.drop(self.table)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_tablemethod_obj(self):
"""Add/drop columns via table methods; by column object"""
def add_func(col):
return self.table.create_column(col)
def drop_func(col):
return self.table.drop_column(col)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_tablemethod_name(self):
"""Add/drop columns via table methods; by column name"""
def add_func(col):
# must be bound to table
self.table.append_column(col)
return self.table.create_column(col.name)
def drop_func(col):
# Not necessarily bound to table
return self.table.drop_column(col.name)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_byname(self):
"""Add/drop columns via functions; by table object and column name"""
def add_func(col):
self.table.append_column(col)
return create_column(col.name, self.table)
def drop_func(col):
return drop_column(col.name, self.table)
return self.run_(add_func, drop_func)
@fixture.usedb()
def test_drop_column_not_in_table(self):
"""Drop column by name"""
def add_func(col):
return self.table.create_column(col)
def drop_func(col):
if SQLA_07:
self.table._columns.remove(col)
else:
self.table.c.remove(col)
return self.table.drop_column(col.name)
self.run_(add_func, drop_func)
@fixture.usedb()
def test_fk(self):
"""Can create columns with foreign keys"""
# create FK's target
reftable = Table('tmp_ref', self.meta,
Column('id', Integer, primary_key=True),
)
if self.engine.has_table(reftable.name):
reftable.drop()
reftable.create()
# create column with fk
col = Column('data', Integer, ForeignKey(reftable.c.id, name='testfk'))
col.create(self.table)
# check if constraint is added
for cons in self.table.constraints:
if isinstance(cons, sqlalchemy.schema.ForeignKeyConstraint):
break
else:
self.fail('No constraint found')
# TODO: test on db level if constraints work
if SQLA_07:
self.assertEqual(reftable.c.id.name,
list(col.foreign_keys)[0].column.name)
else:
self.assertEqual(reftable.c.id.name,
col.foreign_keys[0].column.name)
if self.engine.name == 'mysql':
constraint.ForeignKeyConstraint([self.table.c.data],
[reftable.c.id],
name='testfk').drop()
col.drop(self.table)
if self.engine.has_table(reftable.name):
reftable.drop()
@fixture.usedb(not_supported='sqlite')
def test_pk(self):
"""Can create columns with primary key"""
col = Column('data', Integer, nullable=False)
self.assertRaises(exceptions.InvalidConstraintError,
col.create, self.table, primary_key_name=True)
col.create(self.table, primary_key_name='data_pkey')
# check if constraint was added (cannot test on objects)
self.table.insert(values={'data': 4}).execute()
try:
self.table.insert(values={'data': 4}).execute()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.exc.ProgrammingError):
pass
else:
self.fail()
col.drop()
@fixture.usedb(not_supported=['mysql'])
def test_check(self):
"""Can create columns with check constraint"""
col = Column('foo',
Integer,
sqlalchemy.schema.CheckConstraint('foo > 4'))
col.create(self.table)
# check if constraint was added (cannot test on objects)
self.table.insert(values={'foo': 5}).execute()
try:
self.table.insert(values={'foo': 3}).execute()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.exc.ProgrammingError):
pass
else:
self.fail()
col.drop()
@fixture.usedb()
def test_unique_constraint(self):
self.assertRaises(exceptions.InvalidConstraintError,
Column('data', Integer, unique=True).create, self.table)
col = Column('data', Integer)
col.create(self.table, unique_name='data_unique')
# check if constraint was added (cannot test on objects)
self.table.insert(values={'data': 5}).execute()
try:
self.table.insert(values={'data': 5}).execute()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.exc.ProgrammingError):
pass
else:
self.fail()
col.drop(self.table)
# TODO: remove already attached columns with uniques, pks, fks ..
@fixture.usedb(not_supported=['ibm_db_sa', 'postgresql'])
def test_drop_column_of_composite_index(self):
# NOTE(rpodolyaka): postgresql automatically drops a composite index
# if one of its columns is dropped
# NOTE(mriedem): DB2 does the same.
self.table_idx.c.b.drop()
reflected = Table(self.table_idx.name, MetaData(), autoload=True,
autoload_with=self.engine)
index = next(iter(reflected.indexes))
self.assertEquals(['a'], [c.name for c in index.columns])
@fixture.usedb()
def test_drop_all_columns_of_composite_index(self):
self.table_idx.c.a.drop()
self.table_idx.c.b.drop()
reflected = Table(self.table_idx.name, MetaData(), autoload=True,
autoload_with=self.engine)
self.assertEquals(0, len(reflected.indexes))
def _check_index(self,expected):
if 'mysql' in self.engine.name or 'postgres' in self.engine.name:
for index in tuple(
Table(self.table.name, MetaData(),
autoload=True, autoload_with=self.engine).indexes
):
if index.name=='ix_data':
break
self.assertEqual(expected,index.unique)
@fixture.usedb()
def test_index(self):
col = Column('data', Integer)
col.create(self.table, index_name='ix_data')
self._check_index(False)
col.drop()
@fixture.usedb()
def test_index_unique(self):
# shows how to create a unique index
col = Column('data', Integer)
col.create(self.table)
Index('ix_data', col, unique=True).create(bind=self.engine)
# check if index was added
self.table.insert(values={'data': 5}).execute()
try:
self.table.insert(values={'data': 5}).execute()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.exc.ProgrammingError):
pass
else:
self.fail()
self._check_index(True)
col.drop()
@fixture.usedb()
def test_server_defaults(self):
"""Can create columns with server_default values"""
col = Column('data', String(244), server_default='foobar')
col.create(self.table)
self.table.insert(values={'id': 10}).execute()
row = self._select_row()
self.assertEqual(u'foobar', row['data'])
col.drop()
@fixture.usedb()
def test_populate_default(self):
"""Test populate_default=True"""
def default():
return 'foobar'
col = Column('data', String(244), default=default)
col.create(self.table, populate_default=True)
self.table.insert(values={'id': 10}).execute()
row = self._select_row()
self.assertEqual(u'foobar', row['data'])
col.drop()
# TODO: test sequence
# TODO: test quoting
# TODO: test non-autoname constraints
@fixture.usedb()
def test_drop_doesnt_delete_other_indexes(self):
# add two indexed columns
self.table.drop()
self.meta.clear()
self.table = Table(
self.table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('d1', String(10), index=True),
Column('d2', String(10), index=True),
)
self.table.create()
# paranoid check
self.refresh_table()
self.assertEqual(
sorted([i.name for i in self.table.indexes]),
[u'ix_tmp_adddropcol_d1', u'ix_tmp_adddropcol_d2']
)
# delete one
self.table.c.d2.drop()
# ensure the other index is still there
self.refresh_table()
self.assertEqual(
sorted([i.name for i in self.table.indexes]),
[u'ix_tmp_adddropcol_d1']
)
def _actual_foreign_keys(self):
from sqlalchemy.schema import ForeignKeyConstraint
result = []
for cons in self.table.constraints:
if isinstance(cons,ForeignKeyConstraint):
col_names = []
for col_name in cons.columns:
if not isinstance(col_name,six.string_types):
col_name = col_name.name
col_names.append(col_name)
result.append(col_names)
result.sort()
return result
@fixture.usedb()
def test_drop_with_foreign_keys(self):
self.table.drop()
self.meta.clear()
# create FK's target
reftable = Table('tmp_ref', self.meta,
Column('id', Integer, primary_key=True),
)
if self.engine.has_table(reftable.name):
reftable.drop()
reftable.create()
# add a table with two foreign key columns
self.table = Table(
self.table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('r1', Integer, ForeignKey('tmp_ref.id', name='test_fk1')),
Column('r2', Integer, ForeignKey('tmp_ref.id', name='test_fk2')),
)
self.table.create()
# paranoid check
self.assertEqual([['r1'],['r2']],
self._actual_foreign_keys())
# delete one
if self.engine.name == 'mysql':
constraint.ForeignKeyConstraint([self.table.c.r2], [reftable.c.id],
name='test_fk2').drop()
self.table.c.r2.drop()
# check remaining foreign key is there
self.assertEqual([['r1']],
self._actual_foreign_keys())
@fixture.usedb()
def test_drop_with_complex_foreign_keys(self):
from sqlalchemy.schema import ForeignKeyConstraint
from sqlalchemy.schema import UniqueConstraint
self.table.drop()
self.meta.clear()
# NOTE(mriedem): DB2 does not currently support unique constraints
# on nullable columns, so the columns that are used to create the
# foreign keys here need to be non-nullable for testing with DB2
# to work.
# create FK's target
reftable = Table('tmp_ref', self.meta,
Column('id', Integer, primary_key=True),
Column('jd', Integer, nullable=False),
UniqueConstraint('id','jd')
)
if self.engine.has_table(reftable.name):
reftable.drop()
reftable.create()
# add a table with a complex foreign key constraint
self.table = Table(
self.table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('r1', Integer, nullable=False),
Column('r2', Integer, nullable=False),
ForeignKeyConstraint(['r1','r2'],
[reftable.c.id,reftable.c.jd],
name='test_fk')
)
self.table.create()
# paranoid check
self.assertEqual([['r1','r2']],
self._actual_foreign_keys())
# delete one
if self.engine.name == 'mysql':
constraint.ForeignKeyConstraint([self.table.c.r1, self.table.c.r2],
[reftable.c.id, reftable.c.jd],
name='test_fk').drop()
self.table.c.r2.drop()
# check the constraint is gone, since part of it
# is no longer there - if people hit this,
# they may be confused, maybe we should raise an error
# and insist that the constraint is deleted first, separately?
self.assertEqual([],
self._actual_foreign_keys())
class TestRename(fixture.DB):
"""Tests for table and index rename methods"""
level = fixture.DB.CONNECT
meta = MetaData()
def _setup(self, url):
super(TestRename, self)._setup(url)
self.meta.bind = self.engine
@fixture.usedb(not_supported='firebird')
def test_rename_table(self):
"""Tables can be renamed"""
c_name = 'col_1'
table_name1 = 'name_one'
table_name2 = 'name_two'
index_name1 = 'x' + table_name1
index_name2 = 'x' + table_name2
self.meta.clear()
self.column = Column(c_name, Integer)
self.table = Table(table_name1, self.meta, self.column)
self.index = Index(index_name1, self.column, unique=False)
if self.engine.has_table(self.table.name):
self.table.drop()
if self.engine.has_table(table_name2):
tmp = Table(table_name2, self.meta, autoload=True)
tmp.drop()
tmp.deregister()
del tmp
self.table.create()
def assert_table_name(expected, skip_object_check=False):
"""Refresh a table via autoload
SA has changed some since this test was written; we now need to do
meta.clear() upon reloading a table - clear all rather than a
select few. So, this works only if we're working with one table at
a time (else, others will vanish too).
"""
if not skip_object_check:
# Table object check
self.assertEqual(self.table.name,expected)
newname = self.table.name
else:
# we know the object's name isn't consistent: just assign it
newname = expected
# Table DB check
self.meta.clear()
self.table = Table(newname, self.meta, autoload=True)
self.assertEqual(self.table.name, expected)
def assert_index_name(expected, skip_object_check=False):
if not skip_object_check:
# Index object check
self.assertEqual(self.index.name, expected)
else:
# object is inconsistent
self.index.name = expected
# TODO: Index DB check
def add_table_to_meta(name):
# trigger the case where table_name2 needs to be
# removed from the metadata in ChangesetTable.deregister()
tmp = Table(name, self.meta, Column(c_name, Integer))
tmp.create()
tmp.drop()
try:
# Table renames
assert_table_name(table_name1)
add_table_to_meta(table_name2)
rename_table(self.table, table_name2)
assert_table_name(table_name2)
self.table.rename(table_name1)
assert_table_name(table_name1)
# test by just the string
rename_table(table_name1, table_name2, engine=self.engine)
assert_table_name(table_name2, True) # object not updated
# Index renames
if self.url.startswith('sqlite') or self.url.startswith('mysql'):
self.assertRaises(exceptions.NotSupportedError,
self.index.rename, index_name2)
else:
assert_index_name(index_name1)
rename_index(self.index, index_name2, engine=self.engine)
assert_index_name(index_name2)
self.index.rename(index_name1)
assert_index_name(index_name1)
# test by just the string
rename_index(index_name1, index_name2, engine=self.engine)
assert_index_name(index_name2, True)
finally:
if self.table.exists():
self.table.drop()
class TestColumnChange(fixture.DB):
level = fixture.DB.CONNECT
table_name = 'tmp_colchange'
def _setup(self, url):
super(TestColumnChange, self)._setup(url)
self.meta = MetaData(self.engine)
self.table = Table(self.table_name, self.meta,
Column('id', Integer, primary_key=True),
Column('data', String(40), server_default=DefaultClause("tluafed"),
nullable=True),
)
if self.table.exists():
self.table.drop()
try:
self.table.create()
except sqlalchemy.exc.SQLError:
# SQLite: database schema has changed
if not self.url.startswith('sqlite://'):
raise
def _teardown(self):
if self.table.exists():
try:
self.table.drop(self.engine)
except sqlalchemy.exc.SQLError:
# SQLite: database schema has changed
if not self.url.startswith('sqlite://'):
raise
super(TestColumnChange, self)._teardown()
@fixture.usedb()
def test_rename(self):
"""Can rename a column"""
def num_rows(col, content):
return len(list(self.table.select(col == content).execute()))
# Table content should be preserved in changed columns
content = "fgsfds"
self.engine.execute(self.table.insert(), data=content, id=42)
self.assertEqual(num_rows(self.table.c.data, content), 1)
# ...as a function, given a column object and the new name
alter_column('data', name='data2', table=self.table)
self.refresh_table()
alter_column(self.table.c.data2, name='atad')
self.refresh_table(self.table.name)
self.assertTrue('data' not in self.table.c.keys())
self.assertTrue('atad' in self.table.c.keys())
self.assertEqual(num_rows(self.table.c.atad, content), 1)
# ...as a method, given a new name
self.table.c.atad.alter(name='data')
self.refresh_table(self.table.name)
self.assertTrue('atad' not in self.table.c.keys())
self.table.c.data # Should not raise exception
self.assertEqual(num_rows(self.table.c.data, content), 1)
# ...as a function, given a new object
alter_column(self.table.c.data,
name = 'atad', type=String(40),
server_default=self.table.c.data.server_default)
self.refresh_table(self.table.name)
self.assertTrue('data' not in self.table.c.keys())
self.table.c.atad # Should not raise exception
self.assertEqual(num_rows(self.table.c.atad, content), 1)
# ...as a method, given a new object
self.table.c.atad.alter(
name='data',type=String(40),
server_default=self.table.c.atad.server_default
)
self.refresh_table(self.table.name)
self.assertTrue('atad' not in self.table.c.keys())
self.table.c.data # Should not raise exception
self.assertEqual(num_rows(self.table.c.data,content), 1)
@fixture.usedb()
def test_type(self):
# Test we can change a column's type
# Just the new type
self.table.c.data.alter(type=String(43))
self.refresh_table(self.table.name)
self.assertTrue(isinstance(self.table.c.data.type, String))
self.assertEqual(self.table.c.data.type.length, 43)
# Different type
self.assertTrue(isinstance(self.table.c.id.type, Integer))
self.assertEqual(self.table.c.id.nullable, False)
if not self.engine.name == 'firebird':
self.table.c.id.alter(type=String(20))
self.assertEqual(self.table.c.id.nullable, False)
self.refresh_table(self.table.name)
self.assertTrue(isinstance(self.table.c.id.type, String))
@fixture.usedb()
def test_default(self):
"""Can change a column's server_default value (DefaultClauses only)
Only DefaultClauses are changed here: others are managed by the
application / by SA
"""
self.assertEqual(self.table.c.data.server_default.arg, 'tluafed')
# Just the new default
default = 'my_default'
self.table.c.data.alter(server_default=DefaultClause(default))
self.refresh_table(self.table.name)
#self.assertEqual(self.table.c.data.server_default.arg,default)
# TextClause returned by autoload
self.assertTrue(default in str(self.table.c.data.server_default.arg))
self.engine.execute(self.table.insert(), id=12)
row = self._select_row()
self.assertEqual(row['data'], default)
# Column object
default = 'your_default'
self.table.c.data.alter(type=String(40), server_default=DefaultClause(default))
self.refresh_table(self.table.name)
self.assertTrue(default in str(self.table.c.data.server_default.arg))
# Drop/remove default
self.table.c.data.alter(server_default=None)
self.assertEqual(self.table.c.data.server_default, None)
self.refresh_table(self.table.name)
# server_default isn't necessarily None for Oracle
#self.assertTrue(self.table.c.data.server_default is None,self.table.c.data.server_default)
self.engine.execute(self.table.insert(), id=11)
row = self.table.select(self.table.c.id == 11).execution_options(autocommit=True).execute().fetchone()
self.assertTrue(row['data'] is None, row['data'])
@fixture.usedb(not_supported='firebird')
def test_null(self):
"""Can change a column's null constraint"""
self.assertEqual(self.table.c.data.nullable, True)
# Full column
self.table.c.data.alter(type=String(40), nullable=False)
self.table.nullable = None
self.refresh_table(self.table.name)
self.assertEqual(self.table.c.data.nullable, False)
# Just the new status
self.table.c.data.alter(nullable=True)
self.refresh_table(self.table.name)
self.assertEqual(self.table.c.data.nullable, True)
@fixture.usedb()
def test_alter_deprecated(self):
try:
# py 2.4 compatability :-/
cw = catch_warnings(record=True)
w = cw.__enter__()
warnings.simplefilter("always")
self.table.c.data.alter(Column('data', String(100)))
self.assertEqual(len(w),1)
self.assertTrue(issubclass(w[-1].category,
MigrateDeprecationWarning))
self.assertEqual(
'Passing a Column object to alter_column is deprecated. '
'Just pass in keyword parameters instead.',
str(w[-1].message))
finally:
cw.__exit__()
@fixture.usedb()
def test_alter_returns_delta(self):
"""Test if alter constructs return delta"""
delta = self.table.c.data.alter(type=String(100))
self.assertTrue('type' in delta)
@fixture.usedb()
def test_alter_all(self):
"""Tests all alter changes at one time"""
# test for each db separately
# since currently some dont support everything
# test pre settings
self.assertEqual(self.table.c.data.nullable, True)
self.assertEqual(self.table.c.data.server_default.arg, 'tluafed')
self.assertEqual(self.table.c.data.name, 'data')
self.assertTrue(isinstance(self.table.c.data.type, String))
self.assertTrue(self.table.c.data.type.length, 40)
kw = dict(nullable=False,
server_default='foobar',
name='data_new',
type=String(50))
if self.engine.name == 'firebird':
del kw['nullable']
self.table.c.data.alter(**kw)
# test altered objects
self.assertEqual(self.table.c.data.server_default.arg, 'foobar')
if not self.engine.name == 'firebird':
self.assertEqual(self.table.c.data.nullable, False)
self.assertEqual(self.table.c.data.name, 'data_new')
self.assertEqual(self.table.c.data.type.length, 50)
self.refresh_table(self.table.name)
# test post settings
if not self.engine.name == 'firebird':
self.assertEqual(self.table.c.data_new.nullable, False)
self.assertEqual(self.table.c.data_new.name, 'data_new')
self.assertTrue(isinstance(self.table.c.data_new.type, String))
self.assertTrue(self.table.c.data_new.type.length, 50)
# insert data and assert default
self.table.insert(values={'id': 10}).execute()
row = self._select_row()
self.assertEqual(u'foobar', row['data_new'])
class TestColumnDelta(fixture.DB):
"""Tests ColumnDelta class"""
level = fixture.DB.CONNECT
table_name = 'tmp_coldelta'
table_int = 0
def _setup(self, url):
super(TestColumnDelta, self)._setup(url)
self.meta = MetaData()
self.table = Table(self.table_name, self.meta,
Column('ids', String(10)),
)
self.meta.bind = self.engine
if self.engine.has_table(self.table.name):
self.table.drop()
self.table.create()
def _teardown(self):
if self.engine.has_table(self.table.name):
self.table.drop()
self.meta.clear()
super(TestColumnDelta,self)._teardown()
def mkcol(self, name='id', type=String, *p, **k):
return Column(name, type, *p, **k)
def verify(self, expected, original, *p, **k):
self.delta = ColumnDelta(original, *p, **k)
result = list(self.delta.keys())
result.sort()
self.assertEqual(expected, result)
return self.delta
def test_deltas_two_columns(self):
"""Testing ColumnDelta with two columns"""
col_orig = self.mkcol(primary_key=True)
col_new = self.mkcol(name='ids', primary_key=True)
self.verify([], col_orig, col_orig)
self.verify(['name'], col_orig, col_orig, 'ids')
self.verify(['name'], col_orig, col_orig, name='ids')
self.verify(['name'], col_orig, col_new)
self.verify(['name', 'type'], col_orig, col_new, type=String)
# Type comparisons
self.verify([], self.mkcol(type=String), self.mkcol(type=String))
self.verify(['type'], self.mkcol(type=String), self.mkcol(type=Integer))
self.verify(['type'], self.mkcol(type=String), self.mkcol(type=String(42)))
self.verify([], self.mkcol(type=String(42)), self.mkcol(type=String(42)))
self.verify(['type'], self.mkcol(type=String(24)), self.mkcol(type=String(42)))
self.verify(['type'], self.mkcol(type=String(24)), self.mkcol(type=Text(24)))
# Other comparisons
self.verify(['primary_key'], self.mkcol(nullable=False), self.mkcol(primary_key=True))
# PK implies nullable=False
self.verify(['nullable', 'primary_key'], self.mkcol(nullable=True), self.mkcol(primary_key=True))
self.verify([], self.mkcol(primary_key=True), self.mkcol(primary_key=True))
self.verify(['nullable'], self.mkcol(nullable=True), self.mkcol(nullable=False))
self.verify([], self.mkcol(nullable=True), self.mkcol(nullable=True))
self.verify([], self.mkcol(server_default=None), self.mkcol(server_default=None))
self.verify([], self.mkcol(server_default='42'), self.mkcol(server_default='42'))
# test server default
delta = self.verify(['server_default'], self.mkcol(), self.mkcol('id', String, DefaultClause('foobar')))
self.assertEqual(delta['server_default'].arg, 'foobar')
self.verify([], self.mkcol(server_default='foobar'), self.mkcol('id', String, DefaultClause('foobar')))
self.verify(['type'], self.mkcol(server_default='foobar'), self.mkcol('id', Text, DefaultClause('foobar')))
col = self.mkcol(server_default='foobar')
self.verify(['type'], col, self.mkcol('id', Text, DefaultClause('foobar')), alter_metadata=True)
self.assertTrue(isinstance(col.type, Text))
col = self.mkcol()
self.verify(['name', 'server_default', 'type'], col, self.mkcol('beep', Text, DefaultClause('foobar')),
alter_metadata=True)
self.assertTrue(isinstance(col.type, Text))
self.assertEqual(col.name, 'beep')
self.assertEqual(col.server_default.arg, 'foobar')
@fixture.usedb()
def test_deltas_zero_columns(self):
"""Testing ColumnDelta with zero columns"""
self.verify(['name'], 'ids', table=self.table, name='hey')
# test reflection
self.verify(['type'], 'ids', table=self.table.name, type=String(80), engine=self.engine)
self.verify(['type'], 'ids', table=self.table.name, type=String(80), metadata=self.meta)
self.meta.clear()
delta = self.verify(['type'], 'ids', table=self.table.name, type=String(80), metadata=self.meta,
alter_metadata=True)
self.assertTrue(self.table.name in self.meta)
self.assertEqual(delta.result_column.type.length, 80)
self.assertEqual(self.meta.tables.get(self.table.name).c.ids.type.length, 80)
# test defaults
self.meta.clear()
self.verify(['server_default'], 'ids', table=self.table.name, server_default='foobar',
metadata=self.meta,
alter_metadata=True)
self.meta.tables.get(self.table.name).c.ids.server_default.arg == 'foobar'
# test missing parameters
self.assertRaises(ValueError, ColumnDelta, table=self.table.name)
self.assertRaises(ValueError, ColumnDelta, 'ids', table=self.table.name, alter_metadata=True)
self.assertRaises(ValueError, ColumnDelta, 'ids', table=self.table.name, alter_metadata=False)
def test_deltas_one_column(self):
"""Testing ColumnDelta with one column"""
col_orig = self.mkcol(primary_key=True)
self.verify([], col_orig)
self.verify(['name'], col_orig, 'ids')
# Parameters are always executed, even if they're 'unchanged'
# (We can't assume given column is up-to-date)
self.verify(['name', 'primary_key', 'type'], col_orig, 'id', Integer, primary_key=True)
self.verify(['name', 'primary_key', 'type'], col_orig, name='id', type=Integer, primary_key=True)
# Change name, given an up-to-date definition and the current name
delta = self.verify(['name'], col_orig, name='blah')
self.assertEqual(delta.get('name'), 'blah')
self.assertEqual(delta.current_name, 'id')
col_orig = self.mkcol(primary_key=True)
self.verify(['name', 'type'], col_orig, name='id12', type=Text, alter_metadata=True)
self.assertTrue(isinstance(col_orig.type, Text))
self.assertEqual(col_orig.name, 'id12')
# test server default
col_orig = self.mkcol(primary_key=True)
delta = self.verify(['server_default'], col_orig, DefaultClause('foobar'))
self.assertEqual(delta['server_default'].arg, 'foobar')
delta = self.verify(['server_default'], col_orig, server_default=DefaultClause('foobar'))
self.assertEqual(delta['server_default'].arg, 'foobar')
# no change
col_orig = self.mkcol(server_default=DefaultClause('foobar'))
delta = self.verify(['type'], col_orig, DefaultClause('foobar'), type=PickleType)
self.assertTrue(isinstance(delta.result_column.type, PickleType))
# TODO: test server on update
# TODO: test bind metadata
| mit |
ilay09/keystone | keystone/common/cache/_context_cache.py | 5 | 3758 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A dogpile.cache proxy that caches objects in the request local cache."""
from dogpile.cache import api
from dogpile.cache import proxy
from oslo_context import context as oslo_context
from oslo_serialization import msgpackutils
# Register our new handler.
_registry = msgpackutils.default_registry
def _register_model_handler(handler_class):
"""Register a new model handler."""
_registry.frozen = False
_registry.register(handler_class(registry=_registry))
_registry.frozen = True
class _ResponseCacheProxy(proxy.ProxyBackend):
__key_pfx = '_request_cache_%s'
def _get_request_context(self):
# Return the current context or a new/empty context.
return oslo_context.get_current() or oslo_context.RequestContext()
def _get_request_key(self, key):
return self.__key_pfx % key
def _set_local_cache(self, key, value):
# Set a serialized version of the returned value in local cache for
# subsequent calls to the memoized method.
ctx = self._get_request_context()
serialize = {'payload': value.payload, 'metadata': value.metadata}
setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize))
def _get_local_cache(self, key):
# Return the version from our local request cache if it exists.
ctx = self._get_request_context()
try:
value = getattr(ctx, self._get_request_key(key))
except AttributeError:
return api.NO_VALUE
value = msgpackutils.loads(value)
return api.CachedValue(payload=value['payload'],
metadata=value['metadata'])
def _delete_local_cache(self, key):
# On invalidate/delete remove the value from the local request cache
ctx = self._get_request_context()
try:
delattr(ctx, self._get_request_key(key))
except AttributeError: # nosec
# NOTE(morganfainberg): We will simply pass here, this value has
# not been cached locally in the request.
pass
def get(self, key):
value = self._get_local_cache(key)
if value is api.NO_VALUE:
value = self.proxied.get(key)
if value is not api.NO_VALUE:
self._set_local_cache(key, value)
return value
def set(self, key, value):
self._set_local_cache(key, value)
self.proxied.set(key, value)
def delete(self, key):
self._delete_local_cache(key)
self.proxied.delete(key)
def get_multi(self, keys):
values = {}
for key in keys:
v = self._get_local_cache(key)
if v is not api.NO_VALUE:
values[key] = v
query_keys = set(keys).difference(set(values.keys()))
values.update(dict(
zip(query_keys, self.proxied.get_multi(query_keys))))
return [values[k] for k in keys]
def set_multi(self, mapping):
for k, v in mapping.items():
self._set_local_cache(k, v)
self.proxied.set_multi(mapping)
def delete_multi(self, keys):
for k in keys:
self._delete_local_cache(k)
self.proxied.delete_multi(keys)
| apache-2.0 |
0xCCD/mitro | browser-ext/third_party/firefox-addon-sdk/python-lib/plural-rules-generator.py | 36 | 6116 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Program used to generate /packages/api-utils/lib/l10n/plural-rules.js
# Fetch unicode.org data in order to build functions specific to each language
# that will return for a given integer, its plural form name.
# Plural form names are: zero, one, two, few, many, other.
#
# More information here:
# http://unicode.org/repos/cldr-tmp/trunk/diff/supplemental/language_plural_rules.html
# http://cldr.unicode.org/index/cldr-spec/plural-rules
# Usage:
# $ python plural-rules-generator.py > ../packages/api-utils/lib/l10n/plural-rules.js
import urllib2
import xml.dom.minidom
import json
import re
PRINT_CONDITIONS_IN_COMMENTS = False
UNICODE_ORG_XML_URL = "http://unicode.org/repos/cldr/trunk/common/supplemental/plurals.xml"
CONDITION_RE = r'n( mod \d+)? (is|in|within|(not in))( not)? ([^\s]+)'
# For a given regexp.MatchObject `g` for `CONDITION_RE`,
# returns the equivalent JS piece of code
# i.e. maps pseudo conditional language from unicode.org XML to JS code
def parseCondition(g):
lvalue = "n"
if g.group(1):
lvalue = "(n %% %d)" % int(g.group(1).replace("mod ", ""))
operator = g.group(2)
if g.group(4):
operator += " not"
rvalue = g.group(5)
if operator == "is":
return "%s == %s" % (lvalue, rvalue)
if operator == "is not":
return "%s != %s" % (lvalue, rvalue)
# "in", "within" or "not in" case:
notPrefix = ""
if operator == "not in":
notPrefix = "!"
# `rvalue` is a comma seperated list of either:
# - numbers: 42
# - ranges: 42..72
sections = rvalue.split(',')
if ".." not in rvalue:
# If we don't have range, but only a list of integer,
# we can simplify the generated code by using `isIn`
# n in 1,3,6,42
return "%sisIn(%s, [%s])" % (notPrefix, lvalue, ", ".join(sections))
# n in 1..42
# n in 1..3,42
subCondition = []
integers = []
for sub in sections:
if ".." in sub:
left, right = sub.split("..")
subCondition.append("isBetween(%s, %d, %d)" % (
lvalue,
int(left),
int(right)
))
else:
integers.append(int(sub))
if len(integers) > 1:
subCondition.append("isIn(%s, [%s])" % (lvalue, ", ".join(integers)))
elif len(integers) == 1:
subCondition.append("(%s == %s)" % (lvalue, integers[0]))
return "%s(%s)" % (notPrefix, " || ".join(subCondition))
def computeRules():
# Fetch plural rules data directly from unicode.org website:
url = UNICODE_ORG_XML_URL
f = urllib2.urlopen(url)
doc = xml.dom.minidom.parse(f)
# Read XML document and extract locale to rules mapping
localesMapping = {}
algorithms = {}
for index,pluralRules in enumerate(doc.getElementsByTagName("pluralRules")):
if not index in algorithms:
algorithms[index] = {}
for locale in pluralRules.getAttribute("locales").split():
localesMapping[locale] = index
for rule in pluralRules.childNodes:
if rule.nodeType != rule.ELEMENT_NODE or rule.tagName != "pluralRule":
continue
pluralForm = rule.getAttribute("count")
algorithm = rule.firstChild.nodeValue
algorithms[index][pluralForm] = algorithm
# Go through all rules and compute a Javascript code for each of them
rules = {}
for index,rule in algorithms.iteritems():
lines = []
for pluralForm in rule:
condition = rule[pluralForm]
originalCondition = str(condition)
# Convert pseudo language to JS code
condition = rule[pluralForm].lower()
condition = re.sub(CONDITION_RE, parseCondition, condition)
condition = re.sub(r'or', "||", condition)
condition = re.sub(r'and', "&&", condition)
# Prints original condition in unicode.org pseudo language
if PRINT_CONDITIONS_IN_COMMENTS:
lines.append( '// %s' % originalCondition )
lines.append( 'if (%s)' % condition )
lines.append( ' return "%s";' % pluralForm )
rules[index] = "\n ".join(lines)
return localesMapping, rules
localesMapping, rules = computeRules()
rulesLines = []
for index in rules:
lines = rules[index]
rulesLines.append('"%d": function (n) {' % index)
rulesLines.append(' %s' % lines)
rulesLines.append(' return "other"')
rulesLines.append('},')
print """/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// This file is automatically generated with /python-lib/plural-rules-generator.py
// Fetching data from: %s
// Mapping of short locale name == to == > rule index in following list
const LOCALES_TO_RULES = %s;
// Utility functions for plural rules methods
function isIn(n, list) list.indexOf(n) !== -1;
function isBetween(n, start, end) start <= n && n <= end;
// List of all plural rules methods, that maps an integer to the plural form name to use
const RULES = {
%s
};
/**
* Return a function that gives the plural form name for a given integer
* for the specified `locale`
* let fun = getRulesForLocale('en');
* fun(1) -> 'one'
* fun(0) -> 'other'
* fun(1000) -> 'other'
*/
exports.getRulesForLocale = function getRulesForLocale(locale) {
let index = LOCALES_TO_RULES[locale];
if (!(index in RULES)) {
console.warn('Plural form unknown for locale "' + locale + '"');
return function () { return "other"; };
}
return RULES[index];
}
""" % (UNICODE_ORG_XML_URL,
json.dumps(localesMapping, sort_keys=True, indent=2),
"\n ".join(rulesLines))
| gpl-3.0 |
akretion/odoo | odoo/addons/base/models/ir_module.py | 4 | 42225 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
from collections import defaultdict
from decorator import decorator
from operator import attrgetter
import importlib
import io
import logging
import os
import shutil
import tempfile
import zipfile
import requests
import werkzeug.urls
from odoo.tools import pycompat
from docutils import nodes
from docutils.core import publish_string
from docutils.transforms import Transform, writer_aux
from docutils.writers.html4css1 import Writer
import lxml.html
import psycopg2
import odoo
from odoo import api, fields, models, modules, tools, _
from odoo.addons.base.models.ir_model import MODULE_UNINSTALL_FLAG
from odoo.exceptions import AccessDenied, UserError
from odoo.osv import expression
from odoo.tools.parse_version import parse_version
from odoo.tools.misc import topological_sort
from odoo.http import request
_logger = logging.getLogger(__name__)
ACTION_DICT = {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'target': 'new',
'type': 'ir.actions.act_window',
}
def backup(path, raise_exception=True):
path = os.path.normpath(path)
if not os.path.exists(path):
if not raise_exception:
return None
raise OSError('path does not exists')
cnt = 1
while True:
bck = '%s~%d' % (path, cnt)
if not os.path.exists(bck):
shutil.move(path, bck)
return bck
cnt += 1
def assert_log_admin_access(method):
"""Decorator checking that the calling user is an administrator, and logging the call.
Raises an AccessDenied error if the user does not have administrator privileges, according
to `user._is_admin()`.
"""
def check_and_log(method, self, *args, **kwargs):
user = self.env.user
origin = request.httprequest.remote_addr if request else 'n/a'
log_data = (method.__name__, self.sudo().mapped('name'), user.login, user.id, origin)
if not self.env.user._is_admin():
_logger.warning('DENY access to module.%s on %s to user %s ID #%s via %s', *log_data)
raise AccessDenied()
_logger.info('ALLOW access to module.%s on %s to user %s #%s via %s', *log_data)
return method(self, *args, **kwargs)
return decorator(check_and_log, method)
class ModuleCategory(models.Model):
_name = "ir.module.category"
_description = "Application"
_order = 'name'
@api.depends('module_ids')
def _compute_module_nr(self):
cr = self._cr
cr.execute('SELECT category_id, COUNT(*) \
FROM ir_module_module \
WHERE category_id IN %(ids)s \
OR category_id IN (SELECT id \
FROM ir_module_category \
WHERE parent_id IN %(ids)s) \
GROUP BY category_id', {'ids': tuple(self.ids)}
)
result = dict(cr.fetchall())
for cat in self.filtered('id'):
cr.execute('SELECT id FROM ir_module_category WHERE parent_id=%s', (cat.id,))
cat.module_nr = sum([result.get(c, 0) for (c,) in cr.fetchall()], result.get(cat.id, 0))
name = fields.Char(string='Name', required=True, translate=True, index=True)
parent_id = fields.Many2one('ir.module.category', string='Parent Application', index=True)
child_ids = fields.One2many('ir.module.category', 'parent_id', string='Child Applications')
module_nr = fields.Integer(string='Number of Apps', compute='_compute_module_nr')
module_ids = fields.One2many('ir.module.module', 'category_id', string='Modules')
description = fields.Text(string='Description', translate=True)
sequence = fields.Integer(string='Sequence')
visible = fields.Boolean(string='Visible', default=True)
exclusive = fields.Boolean(string='Exclusive')
xml_id = fields.Char(string='External ID', compute='_compute_xml_id')
def _compute_xml_id(self):
xml_ids = defaultdict(list)
domain = [('model', '=', self._name), ('res_id', 'in', self.ids)]
for data in self.env['ir.model.data'].sudo().search_read(domain, ['module', 'name', 'res_id']):
xml_ids[data['res_id']].append("%s.%s" % (data['module'], data['name']))
for cat in self:
cat.xml_id = xml_ids.get(cat.id, [''])[0]
class MyFilterMessages(Transform):
"""
Custom docutils transform to remove `system message` for a document and
generate warnings.
(The standard filter removes them based on some `report_level` passed in
the `settings_override` dictionary, but if we use it, we can't see them
and generate warnings.)
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
_logger.warning("docutils' system message present: %s", str(node))
node.parent.remove(node)
class MyWriter(Writer):
"""
Custom docutils html4ccs1 writer that doesn't add the warnings to the
output document.
"""
def get_transforms(self):
return [MyFilterMessages, writer_aux.Admonitions]
STATES = [
('uninstallable', 'Uninstallable'),
('uninstalled', 'Not Installed'),
('installed', 'Installed'),
('to upgrade', 'To be upgraded'),
('to remove', 'To be removed'),
('to install', 'To be installed'),
]
class Module(models.Model):
_name = "ir.module.module"
_rec_name = "shortdesc"
_description = "Module"
_order = 'sequence,name'
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(Module, self).fields_view_get(view_id, view_type, toolbar=toolbar, submenu=False)
if view_type == 'form' and res.get('toolbar',False):
install_id = self.env.ref('base.action_server_module_immediate_install').id
action = [rec for rec in res['toolbar']['action'] if rec.get('id', False) != install_id]
res['toolbar'] = {'action': action}
return res
@classmethod
def get_module_info(cls, name):
try:
return modules.load_information_from_description_file(name)
except Exception:
_logger.debug('Error when trying to fetch information for module %s', name, exc_info=True)
return {}
@api.depends('name', 'description')
def _get_desc(self):
for module in self:
path = modules.get_module_resource(module.name, 'static/description/index.html')
if path:
with tools.file_open(path, 'rb') as desc_file:
doc = desc_file.read()
html = lxml.html.document_fromstring(doc)
for element, attribute, link, pos in html.iterlinks():
if element.get('src') and not '//' in element.get('src') and not 'static/' in element.get('src'):
element.set('src', "/%s/static/description/%s" % (module.name, element.get('src')))
module.description_html = tools.html_sanitize(lxml.html.tostring(html))
else:
overrides = {
'embed_stylesheet': False,
'doctitle_xform': False,
'output_encoding': 'unicode',
'xml_declaration': False,
'file_insertion_enabled': False,
}
output = publish_string(source=module.description if not module.application and module.description else '', settings_overrides=overrides, writer=MyWriter())
module.description_html = tools.html_sanitize(output)
@api.depends('name')
def _get_latest_version(self):
default_version = modules.adapt_version('1.0')
for module in self:
module.installed_version = self.get_module_info(module.name).get('version', default_version)
@api.depends('name', 'state')
def _get_views(self):
IrModelData = self.env['ir.model.data'].with_context(active_test=True)
dmodels = ['ir.ui.view', 'ir.actions.report', 'ir.ui.menu']
for module in self:
# Skip uninstalled modules below, no data to find anyway.
if module.state not in ('installed', 'to upgrade', 'to remove'):
module.views_by_module = ""
module.reports_by_module = ""
module.menus_by_module = ""
continue
# then, search and group ir.model.data records
imd_models = defaultdict(list)
imd_domain = [('module', '=', module.name), ('model', 'in', tuple(dmodels))]
for data in IrModelData.sudo().search(imd_domain):
imd_models[data.model].append(data.res_id)
def browse(model):
# as this method is called before the module update, some xmlid
# may be invalid at this stage; explictly filter records before
# reading them
return self.env[model].browse(imd_models[model]).exists()
def format_view(v):
return '%s%s (%s)' % (v.inherit_id and '* INHERIT ' or '', v.name, v.type)
module.views_by_module = "\n".join(sorted(format_view(v) for v in browse('ir.ui.view')))
module.reports_by_module = "\n".join(sorted(r.name for r in browse('ir.actions.report')))
module.menus_by_module = "\n".join(sorted(m.complete_name for m in browse('ir.ui.menu')))
@api.depends('icon')
def _get_icon_image(self):
for module in self:
module.icon_image = ''
if module.icon:
path_parts = module.icon.split('/')
path = modules.get_module_resource(path_parts[1], *path_parts[2:])
else:
path = modules.module.get_module_icon(module.name)
if path:
with tools.file_open(path, 'rb') as image_file:
module.icon_image = base64.b64encode(image_file.read())
name = fields.Char('Technical Name', readonly=True, required=True, index=True)
category_id = fields.Many2one('ir.module.category', string='Category', readonly=True, index=True)
shortdesc = fields.Char('Module Name', readonly=True, translate=True)
summary = fields.Char('Summary', readonly=True, translate=True)
description = fields.Text('Description', readonly=True, translate=True)
description_html = fields.Html('Description HTML', compute='_get_desc')
author = fields.Char("Author", readonly=True)
maintainer = fields.Char('Maintainer', readonly=True)
contributors = fields.Text('Contributors', readonly=True)
website = fields.Char("Website", readonly=True)
# attention: Incorrect field names !!
# installed_version refers the latest version (the one on disk)
# latest_version refers the installed version (the one in database)
# published_version refers the version available on the repository
installed_version = fields.Char('Latest Version', compute='_get_latest_version')
latest_version = fields.Char('Installed Version', readonly=True)
published_version = fields.Char('Published Version', readonly=True)
url = fields.Char('URL', readonly=True)
sequence = fields.Integer('Sequence', default=100)
dependencies_id = fields.One2many('ir.module.module.dependency', 'module_id',
string='Dependencies', readonly=True)
exclusion_ids = fields.One2many('ir.module.module.exclusion', 'module_id',
string='Exclusions', readonly=True)
auto_install = fields.Boolean('Automatic Installation',
help='An auto-installable module is automatically installed by the '
'system when all its dependencies are satisfied. '
'If the module has no dependency, it is always installed.')
state = fields.Selection(STATES, string='Status', default='uninstallable', readonly=True, index=True)
demo = fields.Boolean('Demo Data', default=False, readonly=True)
license = fields.Selection([
('GPL-2', 'GPL Version 2'),
('GPL-2 or any later version', 'GPL-2 or later version'),
('GPL-3', 'GPL Version 3'),
('GPL-3 or any later version', 'GPL-3 or later version'),
('AGPL-3', 'Affero GPL-3'),
('LGPL-3', 'LGPL Version 3'),
('Other OSI approved licence', 'Other OSI Approved Licence'),
('OEEL-1', 'Odoo Enterprise Edition License v1.0'),
('OPL-1', 'Odoo Proprietary License v1.0'),
('Other proprietary', 'Other Proprietary')
], string='License', default='LGPL-3', readonly=True)
menus_by_module = fields.Text(string='Menus', compute='_get_views', store=True)
reports_by_module = fields.Text(string='Reports', compute='_get_views', store=True)
views_by_module = fields.Text(string='Views', compute='_get_views', store=True)
application = fields.Boolean('Application', readonly=True)
icon = fields.Char('Icon URL')
icon_image = fields.Binary(string='Icon', compute='_get_icon_image')
to_buy = fields.Boolean('Odoo Enterprise Module', default=False)
_sql_constraints = [
('name_uniq', 'UNIQUE (name)', 'The name of the module must be unique!'),
]
@api.multi
def unlink(self):
if not self:
return True
for module in self:
if module.state in ('installed', 'to upgrade', 'to remove', 'to install'):
raise UserError(_('You are trying to remove a module that is installed or will be installed.'))
self.clear_caches()
return super(Module, self).unlink()
@staticmethod
def _check_external_dependencies(terp):
depends = terp.get('external_dependencies')
if not depends:
return
for pydep in depends.get('python', []):
try:
importlib.import_module(pydep)
except ImportError:
raise ImportError('No module named %s' % (pydep,))
for binary in depends.get('bin', []):
try:
tools.find_in_path(binary)
except IOError:
raise Exception('Unable to find %r in path' % (binary,))
@classmethod
def check_external_dependencies(cls, module_name, newstate='to install'):
terp = cls.get_module_info(module_name)
try:
cls._check_external_dependencies(terp)
except Exception as e:
if newstate == 'to install':
msg = _('Unable to install module "%s" because an external dependency is not met: %s')
elif newstate == 'to upgrade':
msg = _('Unable to upgrade module "%s" because an external dependency is not met: %s')
else:
msg = _('Unable to process module "%s" because an external dependency is not met: %s')
raise UserError(msg % (module_name, e.args[0]))
@api.multi
def _state_update(self, newstate, states_to_update, level=100):
if level < 1:
raise UserError(_('Recursion error in modules dependencies !'))
# whether some modules are installed with demo data
demo = False
for module in self:
# determine dependency modules to update/others
update_mods, ready_mods = self.browse(), self.browse()
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise UserError(_("You try to install module '%s' that depends on module '%s'.\nBut the latter module is not available in your system.") % (module.name, dep.name,))
if dep.depend_id.state == newstate:
ready_mods += dep.depend_id
else:
update_mods += dep.depend_id
# update dependency modules that require it, and determine demo for module
update_demo = update_mods._state_update(newstate, states_to_update, level=level-1)
module_demo = module.demo or update_demo or any(mod.demo for mod in ready_mods)
demo = demo or module_demo
if module.state in states_to_update:
# check dependencies and update module itself
self.check_external_dependencies(module.name, newstate)
module.write({'state': newstate, 'demo': module_demo})
return demo
@assert_log_admin_access
@api.multi
def button_install(self):
# domain to select auto-installable (but not yet installed) modules
auto_domain = [('state', '=', 'uninstalled'), ('auto_install', '=', True)]
# determine whether an auto-install module must be installed:
# - all its dependencies are installed or to be installed,
# - at least one dependency is 'to install'
install_states = frozenset(('installed', 'to install', 'to upgrade'))
def must_install(module):
states = set(dep.state for dep in module.dependencies_id)
return states <= install_states and 'to install' in states
modules = self
while modules:
# Mark the given modules and their dependencies to be installed.
modules._state_update('to install', ['uninstalled'])
# Determine which auto-installable modules must be installed.
modules = self.search(auto_domain).filtered(must_install)
# the modules that are installed/to install/to upgrade
install_mods = self.search([('state', 'in', list(install_states))])
# check individual exclusions
install_names = {module.name for module in install_mods}
for module in install_mods:
for exclusion in module.exclusion_ids:
if exclusion.name in install_names:
msg = _('Modules "%s" and "%s" are incompatible.')
raise UserError(msg % (module.shortdesc, exclusion.exclusion_id.shortdesc))
# check category exclusions
def closure(module):
todo = result = module
while todo:
result |= todo
todo = todo.mapped('dependencies_id.depend_id')
return result
exclusives = self.env['ir.module.category'].search([('exclusive', '=', True)])
for category in exclusives:
# retrieve installed modules in category and sub-categories
categories = category.search([('id', 'child_of', category.ids)])
modules = install_mods.filtered(lambda mod: mod.category_id in categories)
# the installation is valid if all installed modules in categories
# belong to the transitive dependencies of one of them
if modules and not any(modules <= closure(module) for module in modules):
msg = _('You are trying to install incompatible modules in category "%s":')
labels = dict(self.fields_get(['state'])['state']['selection'])
raise UserError("\n".join([msg % category.name] + [
"- %s (%s)" % (module.shortdesc, labels[module.state])
for module in modules
]))
return dict(ACTION_DICT, name=_('Install'))
@assert_log_admin_access
@api.multi
def button_immediate_install(self):
""" Installs the selected module(s) immediately and fully,
returns the next res.config action to execute
:returns: next res.config item to execute
:rtype: dict[str, object]
"""
_logger.info('User #%d triggered module installation', self.env.uid)
return self._button_immediate_function(type(self).button_install)
@assert_log_admin_access
@api.multi
def button_install_cancel(self):
self.write({'state': 'uninstalled', 'demo': False})
return True
@assert_log_admin_access
@api.multi
def module_uninstall(self):
""" Perform the various steps required to uninstall a module completely
including the deletion of all database structures created by the module:
tables, columns, constraints, etc.
"""
modules_to_remove = self.mapped('name')
self.env['ir.model.data']._module_data_uninstall(modules_to_remove)
# we deactivate prefetching to not try to read a column that has been deleted
self.with_context(prefetch_fields=False).write({'state': 'uninstalled', 'latest_version': False})
return True
@api.multi
def _remove_copied_views(self):
""" Remove the copies of the views installed by the modules in `self`.
Those copies do not have an external id so they will not be cleaned by
`_module_data_uninstall`. This is why we rely on `key` instead.
It is important to remove these copies because using them will crash if
they rely on data that don't exist anymore if the module is removed.
"""
domain = expression.OR([[('key', '=like', m.name + '.%')] for m in self])
orphans = self.env['ir.ui.view'].with_context(**{'active_test': False, MODULE_UNINSTALL_FLAG: True}).search(domain)
orphans.unlink()
@api.multi
@api.returns('self')
def downstream_dependencies(self, known_deps=None,
exclude_states=('uninstalled', 'uninstallable', 'to remove')):
""" Return the modules that directly or indirectly depend on the modules
in `self`, and that satisfy the `exclude_states` filter.
"""
if not self:
return self
known_deps = known_deps or self.browse()
query = """ SELECT DISTINCT m.id
FROM ir_module_module_dependency d
JOIN ir_module_module m ON (d.module_id=m.id)
WHERE
d.name IN (SELECT name from ir_module_module where id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s """
self._cr.execute(query, (tuple(self.ids), tuple(exclude_states), tuple(known_deps.ids or self.ids)))
new_deps = self.browse([row[0] for row in self._cr.fetchall()])
missing_mods = new_deps - known_deps
known_deps |= new_deps
if missing_mods:
known_deps |= missing_mods.downstream_dependencies(known_deps, exclude_states)
return known_deps
@api.multi
@api.returns('self')
def upstream_dependencies(self, known_deps=None,
exclude_states=('installed', 'uninstallable', 'to remove')):
""" Return the dependency tree of modules of the modules in `self`, and
that satisfy the `exclude_states` filter.
"""
if not self:
return self
known_deps = known_deps or self.browse()
query = """ SELECT DISTINCT m.id
FROM ir_module_module_dependency d
JOIN ir_module_module m ON (d.module_id=m.id)
WHERE
m.name IN (SELECT name from ir_module_module_dependency where module_id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s """
self._cr.execute(query, (tuple(self.ids), tuple(exclude_states), tuple(known_deps.ids or self.ids)))
new_deps = self.browse([row[0] for row in self._cr.fetchall()])
missing_mods = new_deps - known_deps
known_deps |= new_deps
if missing_mods:
known_deps |= missing_mods.upstream_dependencies(known_deps, exclude_states)
return known_deps
def next(self):
"""
Return the action linked to an ir.actions.todo is there exists one that
should be executed. Otherwise, redirect to /web
"""
Todos = self.env['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todo = Todos.search([('state', '=', 'open')], limit=1)
if active_todo:
_logger.info('next action is "%s"', active_todo.name)
return active_todo.action_launch()
return {
'type': 'ir.actions.act_url',
'target': 'self',
'url': '/web',
}
@api.multi
def _button_immediate_function(self, function):
try:
# This is done because the installation/uninstallation/upgrade can modify a currently
# running cron job and prevent it from finishing, and since the ir_cron table is locked
# during execution, the lock won't be released until timeout.
self._cr.execute("SELECT * FROM ir_cron FOR UPDATE NOWAIT")
except psycopg2.OperationalError:
raise UserError(_("The server is busy right now, module operations are not possible at"
" this time, please try again later."))
function(self)
self._cr.commit()
api.Environment.reset()
modules.registry.Registry.new(self._cr.dbname, update_module=True)
self._cr.commit()
env = api.Environment(self._cr, self._uid, self._context)
# pylint: disable=next-method-called
config = env['ir.module.module'].next() or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# reload the client; open the first available root menu
menu = env['ir.ui.menu'].search([('parent_id', '=', False)])[:1]
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu.id},
}
@assert_log_admin_access
@api.multi
def button_immediate_uninstall(self):
"""
Uninstall the selected module(s) immediately and fully,
returns the next res.config action to execute
"""
_logger.info('User #%d triggered module uninstallation', self.env.uid)
return self._button_immediate_function(type(self).button_uninstall)
@assert_log_admin_access
@api.multi
def button_uninstall(self):
if 'base' in self.mapped('name'):
raise UserError(_("The `base` module cannot be uninstalled"))
if not all(state in ('installed', 'to upgrade') for state in self.mapped('state')):
raise UserError(_(
"One or more of the selected modules have already been uninstalled, if you "
"believe this to be an error, you may try again later or contact support."
))
deps = self.downstream_dependencies()
(self + deps).write({'state': 'to remove'})
return dict(ACTION_DICT, name=_('Uninstall'))
@assert_log_admin_access
@api.multi
def button_uninstall_wizard(self):
""" Launch the wizard to uninstall the given module. """
return {
'type': 'ir.actions.act_window',
'target': 'new',
'name': _('Uninstall module'),
'view_mode': 'form',
'res_model': 'base.module.uninstall',
'context': {'default_module_id': self.id},
}
@api.multi
def button_uninstall_cancel(self):
self.write({'state': 'installed'})
return True
@assert_log_admin_access
@api.multi
def button_immediate_upgrade(self):
"""
Upgrade the selected module(s) immediately and fully,
return the next res.config action to execute
"""
return self._button_immediate_function(type(self).button_upgrade)
@assert_log_admin_access
@api.multi
def button_upgrade(self):
Dependency = self.env['ir.module.module.dependency']
self.update_list()
todo = list(self)
i = 0
while i < len(todo):
module = todo[i]
i += 1
if module.state not in ('installed', 'to upgrade'):
raise UserError(_("Can not upgrade module '%s'. It is not installed.") % (module.name,))
self.check_external_dependencies(module.name, 'to upgrade')
for dep in Dependency.search([('name', '=', module.name)]):
if dep.module_id.state == 'installed' and dep.module_id not in todo:
todo.append(dep.module_id)
self.browse(module.id for module in todo).write({'state': 'to upgrade'})
to_install = []
for module in todo:
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise UserError(_('You try to upgrade the module %s that depends on the module: %s.\nBut this module is not available in your system.') % (module.name, dep.name,))
if dep.state == 'uninstalled':
to_install += self.search([('name', '=', dep.name)]).ids
self.browse(to_install).button_install()
return dict(ACTION_DICT, name=_('Apply Schedule Upgrade'))
@assert_log_admin_access
@api.multi
def button_upgrade_cancel(self):
self.write({'state': 'installed'})
return True
@staticmethod
def get_values_from_terp(terp):
return {
'description': terp.get('description', ''),
'shortdesc': terp.get('name', ''),
'author': terp.get('author', 'Unknown'),
'maintainer': terp.get('maintainer', False),
'contributors': ', '.join(terp.get('contributors', [])) or False,
'website': terp.get('website', ''),
'license': terp.get('license', 'LGPL-3'),
'sequence': terp.get('sequence', 100),
'application': terp.get('application', False),
'auto_install': terp.get('auto_install', False),
'icon': terp.get('icon', False),
'summary': terp.get('summary', ''),
'url': terp.get('url') or terp.get('live_test_url', ''),
'to_buy': False
}
@api.model
def create(self, vals):
new = super(Module, self).create(vals)
module_metadata = {
'name': 'module_%s' % vals['name'],
'model': 'ir.module.module',
'module': 'base',
'res_id': new.id,
'noupdate': True,
}
self.env['ir.model.data'].create(module_metadata)
return new
# update the list of available packages
@assert_log_admin_access
@api.model
def update_list(self):
res = [0, 0] # [update, add]
default_version = modules.adapt_version('1.0')
known_mods = self.with_context(lang=None).search([])
known_mods_names = {mod.name: mod for mod in known_mods}
# iterate through detected modules and update/create them in db
for mod_name in modules.get_modules():
mod = known_mods_names.get(mod_name)
terp = self.get_module_info(mod_name)
values = self.get_values_from_terp(terp)
if mod:
updated_values = {}
for key in values:
old = getattr(mod, key)
updated = tools.ustr(values[key]) if isinstance(values[key], pycompat.string_types) else values[key]
if (old or updated) and updated != old:
updated_values[key] = values[key]
if terp.get('installable', True) and mod.state == 'uninstallable':
updated_values['state'] = 'uninstalled'
if parse_version(terp.get('version', default_version)) > parse_version(mod.latest_version or default_version):
res[0] += 1
if updated_values:
mod.write(updated_values)
else:
mod_path = modules.get_module_path(mod_name)
if not mod_path or not terp:
continue
state = "uninstalled" if terp.get('installable', True) else "uninstallable"
mod = self.create(dict(name=mod_name, state=state, **values))
res[1] += 1
mod._update_dependencies(terp.get('depends', []))
mod._update_exclusions(terp.get('excludes', []))
mod._update_category(terp.get('category', 'Uncategorized'))
return res
@assert_log_admin_access
@api.multi
def download(self, download=True):
return []
@assert_log_admin_access
@api.model
def install_from_urls(self, urls):
if not self.env.user.has_group('base.group_system'):
raise AccessDenied()
# One-click install is opt-in - cfr Issue #15225
ad_dir = tools.config.addons_data_dir
if not os.access(ad_dir, os.W_OK):
msg = (_("Automatic install of downloaded Apps is currently disabled.") + "\n\n" +
_("To enable it, make sure this directory exists and is writable on the server:") +
"\n%s" % ad_dir)
_logger.warning(msg)
raise UserError(msg)
apps_server = werkzeug.urls.url_parse(self.get_apps_server())
OPENERP = odoo.release.product_name.lower()
tmp = tempfile.mkdtemp()
_logger.debug('Install from url: %r', urls)
try:
# 1. Download & unzip missing modules
for module_name, url in urls.items():
if not url:
continue # nothing to download, local version is already the last one
up = werkzeug.urls.url_parse(url)
if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc:
raise AccessDenied()
try:
_logger.info('Downloading module `%s` from OpenERP Apps', module_name)
response = requests.get(url)
response.raise_for_status()
content = response.content
except Exception:
_logger.exception('Failed to fetch module %s', module_name)
raise UserError(_('The `%s` module appears to be unavailable at the moment, please try again later.') % module_name)
else:
zipfile.ZipFile(io.BytesIO(content)).extractall(tmp)
assert os.path.isdir(os.path.join(tmp, module_name))
# 2a. Copy/Replace module source in addons path
for module_name, url in urls.items():
if module_name == OPENERP or not url:
continue # OPENERP is special case, handled below, and no URL means local module
module_path = modules.get_module_path(module_name, downloaded=True, display_warning=False)
bck = backup(module_path, False)
_logger.info('Copy downloaded module `%s` to `%s`', module_name, module_path)
shutil.move(os.path.join(tmp, module_name), module_path)
if bck:
shutil.rmtree(bck)
# 2b. Copy/Replace server+base module source if downloaded
if urls.get(OPENERP):
# special case. it contains the server and the base module.
# extract path is not the same
base_path = os.path.dirname(modules.get_module_path('base'))
# copy all modules in the SERVER/odoo/addons directory to the new "odoo" module (except base itself)
for d in os.listdir(base_path):
if d != 'base' and os.path.isdir(os.path.join(base_path, d)):
destdir = os.path.join(tmp, OPENERP, 'addons', d) # XXX 'odoo' subdirectory ?
shutil.copytree(os.path.join(base_path, d), destdir)
# then replace the server by the new "base" module
server_dir = tools.config['root_path'] # XXX or dirname()
bck = backup(server_dir)
_logger.info('Copy downloaded module `odoo` to `%s`', server_dir)
shutil.move(os.path.join(tmp, OPENERP), server_dir)
#if bck:
# shutil.rmtree(bck)
self.update_list()
with_urls = [module_name for module_name, url in urls.items() if url]
downloaded = self.search([('name', 'in', with_urls)])
installed = self.search([('id', 'in', downloaded.ids), ('state', '=', 'installed')])
to_install = self.search([('name', 'in', list(urls)), ('state', '=', 'uninstalled')])
post_install_action = to_install.button_immediate_install()
if installed or to_install:
# in this case, force server restart to reload python code...
self._cr.commit()
odoo.service.server.restart()
return {
'type': 'ir.actions.client',
'tag': 'home',
'params': {'wait': True},
}
return post_install_action
finally:
shutil.rmtree(tmp)
@api.model
def get_apps_server(self):
return tools.config.get('apps_server', 'https://apps.odoo.com/apps')
def _update_dependencies(self, depends=None):
existing = set(dep.name for dep in self.dependencies_id)
needed = set(depends or [])
for dep in (needed - existing):
self._cr.execute('INSERT INTO ir_module_module_dependency (module_id, name) values (%s, %s)', (self.id, dep))
for dep in (existing - needed):
self._cr.execute('DELETE FROM ir_module_module_dependency WHERE module_id = %s and name = %s', (self.id, dep))
self.invalidate_cache(['dependencies_id'], self.ids)
def _update_exclusions(self, excludes=None):
existing = set(excl.name for excl in self.exclusion_ids)
needed = set(excludes or [])
for name in (needed - existing):
self._cr.execute('INSERT INTO ir_module_module_exclusion (module_id, name) VALUES (%s, %s)', (self.id, name))
for name in (existing - needed):
self._cr.execute('DELETE FROM ir_module_module_exclusion WHERE module_id=%s AND name=%s', (self.id, name))
self.invalidate_cache(['exclusion_ids'], self.ids)
def _update_category(self, category='Uncategorized'):
current_category = self.category_id
current_category_path = []
while current_category:
current_category_path.insert(0, current_category.name)
current_category = current_category.parent_id
categs = category.split('/')
if categs != current_category_path:
cat_id = modules.db.create_categories(self._cr, categs)
self.write({'category_id': cat_id})
@api.multi
def _update_translations(self, filter_lang=None):
if not filter_lang:
langs = self.env['res.lang'].search([('translatable', '=', True)])
filter_lang = [lang.code for lang in langs]
elif not isinstance(filter_lang, (list, tuple)):
filter_lang = [filter_lang]
update_mods = self.filtered(lambda r: r.state in ('installed', 'to install', 'to upgrade'))
mod_dict = {
mod.name: mod.dependencies_id.mapped('name')
for mod in update_mods
}
mod_names = topological_sort(mod_dict)
self.env['ir.translation'].load_module_terms(mod_names, filter_lang)
@api.multi
def _check(self):
for module in self:
if not module.description_html:
_logger.warning('module %s: description is empty !', module.name)
@api.model
@tools.ormcache()
def _installed(self):
""" Return the set of installed modules as a dictionary {name: id} """
return {
module.name: module.id
for module in self.sudo().search([('state', '=', 'installed')])
}
DEP_STATES = STATES + [('unknown', 'Unknown')]
class ModuleDependency(models.Model):
_name = "ir.module.module.dependency"
_description = "Module dependency"
# the dependency name
name = fields.Char(index=True)
# the module that depends on it
module_id = fields.Many2one('ir.module.module', 'Module', ondelete='cascade')
# the module corresponding to the dependency, and its status
depend_id = fields.Many2one('ir.module.module', 'Dependency', compute='_compute_depend')
state = fields.Selection(DEP_STATES, string='Status', compute='_compute_state')
@api.multi
@api.depends('name')
def _compute_depend(self):
# retrieve all modules corresponding to the dependency names
names = list(set(dep.name for dep in self))
mods = self.env['ir.module.module'].search([('name', 'in', names)])
# index modules by name, and assign dependencies
name_mod = dict((mod.name, mod) for mod in mods)
for dep in self:
dep.depend_id = name_mod.get(dep.name)
@api.one
@api.depends('depend_id.state')
def _compute_state(self):
self.state = self.depend_id.state or 'unknown'
class ModuleExclusion(models.Model):
_name = "ir.module.module.exclusion"
_description = "Module exclusion"
# the exclusion name
name = fields.Char(index=True)
# the module that excludes it
module_id = fields.Many2one('ir.module.module', 'Module', ondelete='cascade')
# the module corresponding to the exclusion, and its status
exclusion_id = fields.Many2one('ir.module.module', 'Exclusion Module', compute='_compute_exclusion')
state = fields.Selection(DEP_STATES, string='Status', compute='_compute_state')
@api.multi
@api.depends('name')
def _compute_exclusion(self):
# retrieve all modules corresponding to the exclusion names
names = list(set(excl.name for excl in self))
mods = self.env['ir.module.module'].search([('name', 'in', names)])
# index modules by name, and assign dependencies
name_mod = {mod.name: mod for mod in mods}
for excl in self:
excl.exclusion_id = name_mod.get(excl.name)
@api.one
@api.depends('exclusion_id.state')
def _compute_state(self):
self.state = self.exclusion_id.state or 'unknown'
| agpl-3.0 |
sergg75/dataModels | Environment/AirQualityObserved/harvest/madrid_air_quality_harvest.py | 1 | 10084 | #!../bin/python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import csv
import datetime
import json
import urllib2
import StringIO
import logging
import logging.handlers
import re
from pytz import timezone
import contextlib
import copy
# Entity type
AMBIENT_TYPE_NAME = 'AirQualityObserved'
# List of known air quality stations
station_dict = { }
# Orion service that will store the data
orion_service = 'http://localhost:1030'
logger = None
madrid_tz = timezone('CET')
pollutant_dict = {
'01': 'SO2',
'06': 'CO',
'07': 'NO',
'08': 'NO2',
'09': 'PM2.5',
'10': 'PM10',
'12': 'NOx',
'14': 'O3',
'20': 'TOL',
'30': 'BEN',
'35': 'EBE',
'37': 'MXY',
'38': 'PXY',
'39': 'OXY',
'42': 'TCH',
'43': 'CH4',
'44': 'NHMC'
}
pollutant_descriptions = {
'01': 'Sulfur Dioxide',
'06': 'Carbon Monoxide',
'07': 'Nitrogen Monoxide',
'08': 'Nitrogen Dioxide',
'09': 'Particles lower than 2.5',
'10': 'Particles lower than 10',
'12': 'Nitrogen oxides',
'14': 'Ozone',
'20': 'Toluene',
'30': 'Benzene',
'35': 'Etilbenzene',
'37': 'Metaxylene',
'38': 'Paraxylene',
'39': 'Orthoxylene',
'42': 'Total Hydrocarbons',
'43': 'Hydrocarbons - Methane',
'44': 'Non-methane hydrocarbons - Hexane'
}
other_dict = {
'80': 'ultravioletRadiation',
'81': 'windSpeed',
'82': 'windDirection',
'83': 'temperature',
'86': 'relativeHumidity',
'87': 'barometricPressure',
'88': 'solarRadiation',
'89': 'precipitation',
'92': 'acidRainLevel'
}
other_descriptions = {
'80': 'Ultraviolet Radiation',
'81': 'Wind Speed',
'82': 'Wind Direction',
'83': 'temperature',
'86': 'Relative Humidity',
'87': 'Barometric Pressure',
'88': 'Solar Radiation',
'89': 'Precipitation',
'92': 'Acid Rain Level'
}
dataset_url = 'http://datos.madrid.es/egob/catalogo/212531-7916318-calidad-aire-tiempo-real.txt'
# Statistics for tracking purposes
persisted_entities = 0
in_error_entities = 0
MIME_JSON = 'application/json'
FIWARE_SERVICE = 'AirQuality'
FIWARE_SPATH = '/Spain_Madrid'
# Sanitize string to avoid forbidden characters by Orion
def sanitize(str_in):
return re.sub(r"[<(>)\"\'=;]", "", str_in)
# Obtains air quality data and harmonizes it, persisting to Orion
def get_air_quality_madrid():
req = urllib2.Request(url=dataset_url)
with contextlib.closing(urllib2.urlopen(req)) as f:
csv_data = f.read()
csv_file = StringIO.StringIO(csv_data)
reader = csv.reader(csv_file, delimiter=',')
# Dictionary with station data indexed by station code
# An array per station code containing one element per hour
stations = { }
for row in reader:
station_code = str(row[0]) + str(row[1]) + str(row[2])
station_num = row[2]
if not station_dict[station_num]:
continue
if not station_code in stations:
stations[station_code] = []
magnitude = row[3]
if (not magnitude in pollutant_dict) and (not magnitude in other_dict):
continue
is_other = None
if magnitude in pollutant_dict:
property_name = pollutant_dict[magnitude]
property_desc = pollutant_descriptions[magnitude]
is_other = False
if magnitude in other_dict:
property_name = other_dict[magnitude]
property_desc = other_descriptions[magnitude]
is_other = True
hour = 0
for x in xrange(9, 57, 2):
value = row[x]
value_control = row[x + 1]
if value_control == 'V':
# A new entity object is created if it does not exist yet
if (len(stations[station_code]) < hour + 1):
stations[station_code].append(build_station(station_num, station_code, hour, row))
elif (not 'id' in stations[station_code][hour]):
stations[station_code][hour] = build_station(station_num, station_code, hour, row)
param_value = float(value)
if not is_other:
unit_code = 'GQ'
if property_name == 'CO':
unit_code = 'GP'
measurand_data = [property_name, str(param_value), unit_code, property_desc]
stations[station_code][hour]['measurand']['value'].append(','.join(measurand_data))
else:
if property_name == 'relativeHumidity':
param_value = param_value / 100
stations[station_code][hour][property_name] = {
'value': param_value
}
else:
# ensure there are no holes in the data
if (len(stations[station_code]) < hour + 1):
stations[station_code].append({})
hour += 1
print len(stations[station_code])
# Now persisting data to Orion Context Broker
for station in stations:
station_data = stations[station]
data_array = []
for data in station_data:
if 'id' in data:
data_array.append(data)
if len(data_array) > 0:
logger.debug("Retrieved data for %s at %s (last hour)", station, data_array[-1]['dateObserved']['value'])
# Last measurement is duplicated to have an entity with the latest measurement obtained
last_measurement = data_array[-1]
last_measurement['id'] = 'Madrid-AirQualityObserved-' + last_measurement['stationCode']['value'] + '-' + 'latest'
else: logger.warn('No data retrieved for: %s', station)
post_station_data(station, data_array)
#############
# Builds a new entity of type AirQualityObserved
def build_station(station_num, station_code, hour, row):
station_data = {
'type': AMBIENT_TYPE_NAME,
'measurand': {
'type': 'List',
'value': []
},
'stationCode': {
'value': station_code
},
'stationName': {
'value': sanitize(station_dict[station_num]['name'])
},
'address': {
'type': 'PostalAddress',
'value': {
'addressCountry': 'ES',
'addressLocality': 'Madrid',
'streetAddress': sanitize(station_dict[station_num]['address'])
}
},
'location': {
'type': 'geo:json',
'value': station_dict[station_num]['location']['value'] or None
},
'source': {
'type': 'URL',
'value': 'http://datos.madrid.es'
},
'dataProvider': {
'value': 'TEF'
}
}
valid_from = datetime.datetime(int(row[6]), int(row[7]), int(row[8]), hour)
station_data['id'] = 'Madrid-AirQualityObserved-' + station_code + '-' + valid_from.isoformat()
valid_to = (valid_from + datetime.timedelta(hours=1))
# Adjust timezones
valid_from = valid_from.replace(tzinfo=madrid_tz)
valid_to = valid_to.replace(tzinfo=madrid_tz)
station_data['validity'] = {
'value': {
'from': valid_from.isoformat(),
'to': valid_to.isoformat()
},
'type': 'StructuredValue'
}
station_data['hour'] = {
'value': str(hour) + ':' + '00'
}
observ_corrected_date = valid_from
station_data['dateObserved'] = {
'type': 'DateTime',
'value': observ_corrected_date.isoformat()
}
return station_data
# POST data to an Orion Context Broker instance using NGSIv2 API
def post_station_data(station_code, data):
if len(data) == 0:
return
payload = {
'actionType': 'APPEND',
'entities': data
}
data_as_str = json.dumps(payload)
headers = {
'Content-Type': MIME_JSON,
'Content-Length': len(data_as_str),
'Fiware-Service': FIWARE_SERVICE,
'Fiware-Servicepath': FIWARE_SPATH
}
req = urllib2.Request(url=(orion_service + '/v2/op/update'), data=data_as_str, headers=headers)
logger.debug('Going to persist %s to %s - %d', station_code, orion_service, len(data))
try:
with contextlib.closing(urllib2.urlopen(req)) as f:
global persisted_entities
logger.debug("Entity successfully created: %s", station_code)
persisted_entities = persisted_entities + 1
except urllib2.URLError as e:
global in_error_entities
logger.error('Error while POSTing data to Orion: %d %s', e.code, e.read())
logger.debug('Data which failed: %s', data_as_str)
in_error_entities = in_error_entities + 1
# Reads station data from CSV file
def read_station_csv():
with contextlib.closing(open('madrid_airquality_stations.csv', 'rU')) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
index = 0
for row in reader:
if index <> 0:
station_code = row[2]
station_name = row[3]
station_address = row[4]
station_coords = {
'type': 'geo:json',
'value': {
'type': 'Point',
'coordinates': [float(row[0]), float(row[1])]
}
}
station_dict[station_code.zfill(3)] = {
'name': station_name,
'address': station_address,
'location': station_coords
}
index += 1
station_dict['099'] = {
'name': 'average',
'address': None,
'location': None
}
def setup_logger():
global logger
LOG_FILENAME = 'harvest_madrid.log'
# Set up a specific logger with our desired output level
logger = logging.getLogger('Madrid')
logger.setLevel(logging.DEBUG)
# Add the log message handler to the logger
handler = logging.handlers.RotatingFileHandler(LOG_FILENAME, maxBytes=2000000, backupCount=3)
formatter = logging.Formatter('%(levelname)s %(asctime)s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
if __name__ == '__main__':
setup_logger()
read_station_csv()
logger.debug('#### Starting a new harvesting and harmonization cycle ... ####')
logger.debug('Number of air quality stations known: %d', len(station_dict.keys()))
get_air_quality_madrid()
logger.debug('Number of entities persisted: %d', persisted_entities)
logger.debug('Number of entities in error: %d', in_error_entities)
logger.debug('#### Harvesting cycle finished ... ####')
| mit |
tudorvio/nova | nova/virt/disk/mount/loop.py | 64 | 2270 | # Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Support for mounting images with the loop device."""
from oslo_log import log as logging
from nova.i18n import _, _LI
from nova import utils
from nova.virt.disk.mount import api
LOG = logging.getLogger(__name__)
class LoopMount(api.Mount):
"""loop back support for raw images."""
mode = 'loop'
def _inner_get_dev(self):
out, err = utils.trycmd('losetup', '--find', '--show',
self.image.path,
run_as_root=True)
if err:
self.error = _('Could not attach image to loopback: %s') % err
LOG.info(_LI('Loop mount error: %s'), self.error)
self.linked = False
self.device = None
return False
self.device = out.strip()
LOG.debug("Got loop device %s", self.device)
self.linked = True
return True
def get_dev(self):
# NOTE(mikal): the retry is required here in case we are low on loop
# devices. Note however that modern kernels will use more loop devices
# if they exist. If you're seeing lots of retries, consider adding
# more devices.
return self._get_dev_retry_helper()
def unget_dev(self):
if not self.linked:
return
# NOTE(mikal): On some kernels, losetup -d will intermittently fail,
# thus leaking a loop device unless the losetup --detach is retried:
# https://lkml.org/lkml/2012/9/28/62
LOG.debug("Release loop device %s", self.device)
utils.execute('losetup', '--detach', self.device, run_as_root=True,
attempts=3)
self.linked = False
self.device = None
| apache-2.0 |
kushG/osf.io | tests/test_rubeus.py | 2 | 19647 | #!/usr/bin/env python
# encoding: utf-8
import os
from types import NoneType
from xmlrpclib import DateTime
import mock
from nose.tools import *
from webtest_plus import TestApp
from tests.base import OsfTestCase
from tests.factories import (UserFactory, ProjectFactory, NodeFactory,
AuthFactory, PointerFactory, DashboardFactory, FolderFactory, RegistrationFactory)
from framework.auth import Auth
from website.util import rubeus, api_url_for
import website.app
from website.util.rubeus import sort_by_name
from website.settings import ALL_MY_REGISTRATIONS_ID, ALL_MY_PROJECTS_ID, \
ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME
app = website.app.init_app(
routes=True, set_backends=False, settings_module='website.settings'
)
class TestRubeus(OsfTestCase):
def setUp(self):
super(TestRubeus, self).setUp()
self.project = ProjectFactory.build()
self.consolidated_auth = Auth(user=self.project.creator)
self.non_authenticator = UserFactory()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('s3', self.consolidated_auth)
self.project.creator.add_addon('s3', self.consolidated_auth)
self.node_settings = self.project.get_addon('s3')
self.user_settings = self.project.creator.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.user_settings
self.node_settings.save()
def test_hgrid_dummy(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
# FIXME: These tests are very brittle.
expected = {
'isPointer': False,
'provider': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon Simple Storage Service: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'extra': None,
'buttons': None,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
expected['permissions'] = permissions
actual = rubeus.build_addon_root(node_settings, node_settings.bucket, permissions=permissions)
assert actual['urls']['fetch']
assert actual['urls']['upload']
del actual['urls']
assert_equals(actual, expected)
def test_build_addon_root_has_correct_upload_limits(self):
self.node_settings.config.max_file_size = 10
self.node_settings.config.high_max_file_size = 20
node = self.project
user = self.project.creator
auth = Auth(user)
permissions = {
'view': node.can_view(auth),
'edit': node.can_edit(auth) and not node.is_registration,
}
result = rubeus.build_addon_root(
self.node_settings,
self.node_settings.bucket,
permissions=permissions,
user=user
)
assert_equal(result['accept']['maxSize'], self.node_settings.config.max_file_size)
# user now has elevated upload limit
user.system_tags.append('high_upload_limit')
user.save()
result = rubeus.build_addon_root(
self.node_settings,
self.node_settings.bucket,
permissions=permissions,
user=user
)
assert_equal(
result['accept']['maxSize'],
self.node_settings.config.high_max_file_size
)
def test_hgrid_dummy_fail(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
rv = {
'isPointer': False,
'addon': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon Simple Storage Service: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'permissions': {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
},
'urls': {
'fetch': node.api_url + 's3/hgrid/',
'upload': node.api_url + 's3/upload/'
},
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
assert_not_equals(rubeus.build_addon_root(
node_settings, node_settings.bucket, permissions=permissions), rv)
def test_hgrid_dummy_overrides(self):
node_settings = self.node_settings
node = self.project
user = Auth(self.project.creator)
expected = {
'isPointer': False,
'provider': 's3',
'addonFullname': node_settings.config.full_name,
'iconUrl': node_settings.config.icon_url,
'name': 'Amazon Simple Storage Service: {0}'.format(
node_settings.bucket
),
'kind': 'folder',
'permissions': {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
},
'urls': {},
'accept': {
'maxSize': node_settings.config.max_file_size,
'acceptedFiles': node_settings.config.accept_extensions
},
'isAddonRoot': True,
'extra': None,
'buttons': None,
'nodeId': node._id,
'nodeUrl': node.url,
'nodeApiUrl': node.api_url,
}
permissions = {
'view': node.can_view(user),
'edit': node.can_edit(user) and not node.is_registration,
}
assert_equal(
rubeus.build_addon_root(
node_settings, node_settings.bucket,
permissions=permissions, urls={}
),
expected
)
def test_serialize_private_node(self):
user = UserFactory()
auth = Auth(user=user)
public = ProjectFactory.build(is_public=True)
# Add contributor with write permissions to avoid admin permission cascade
public.add_contributor(user, permissions=['read', 'write'])
public.save()
private = ProjectFactory(project=public, is_public=False)
NodeFactory(project=private)
collector = rubeus.NodeFileCollector(node=public, auth=auth)
private_dummy = collector._serialize_node(private)
assert_false(private_dummy['permissions']['edit'])
assert_false(private_dummy['permissions']['view'])
assert_equal(private_dummy['name'], 'Private Component')
assert_equal(len(private_dummy['children']), 0)
def test_collect_components_deleted(self):
node = NodeFactory(creator=self.project.creator, project=self.project)
node.is_deleted = True
collector = rubeus.NodeFileCollector(
self.project, Auth(user=UserFactory())
)
nodes = collector._collect_components(self.project, visited=[])
assert_equal(len(nodes), 0)
def test_serialized_pointer_has_flag_indicating_its_a_pointer(self):
pointer = PointerFactory()
serializer = rubeus.NodeFileCollector(node=pointer, auth=self.consolidated_auth)
ret = serializer._serialize_node(pointer)
assert_true(ret['isPointer'])
# TODO: Make this more reusable across test modules
mock_addon = mock.Mock()
serialized = {
'addon': 'mockaddon',
'name': 'Mock Addon',
'isAddonRoot': True,
'extra': '',
'permissions': {'view': True, 'edit': True},
'urls': {
'fetch': '/fetch',
'delete': '/delete'
}
}
mock_addon.config.get_hgrid_data.return_value = [serialized]
class TestSerializingNodeWithAddon(OsfTestCase):
def setUp(self):
super(TestSerializingNodeWithAddon, self).setUp()
self.auth = AuthFactory()
self.project = ProjectFactory(creator=self.auth.user)
self.project.get_addons = mock.Mock()
self.project.get_addons.return_value = [mock_addon]
self.serializer = rubeus.NodeFileCollector(node=self.project, auth=self.auth)
def test_collect_addons(self):
ret = self.serializer._collect_addons(self.project)
assert_equal(ret, [serialized])
def test_sort_by_name(self):
files = [
{'name': 'F.png'},
{'name': 'd.png'},
{'name': 'B.png'},
{'name': 'a.png'},
{'name': 'c.png'},
{'name': 'e.png'},
{'name': 'g.png'},
]
sorted_files = [
{'name': 'a.png'},
{'name': 'B.png'},
{'name': 'c.png'},
{'name': 'd.png'},
{'name': 'e.png'},
{'name': 'F.png'},
{'name': 'g.png'},
]
ret = sort_by_name(files)
for index, value in enumerate(ret):
assert_equal(value['name'], sorted_files[index]['name'])
def test_sort_by_name_none(self):
files = None
sorted_files = None
ret = sort_by_name(files)
assert_equal(ret, sorted_files)
def test_serialize_node(self):
ret = self.serializer._serialize_node(self.project)
assert_equal(
len(ret['children']),
len(self.project.get_addons.return_value) + len(self.project.nodes)
)
assert_equal(ret['kind'], rubeus.FOLDER)
assert_equal(ret['name'], 'Project: {0}'.format(self.project.title))
assert_equal(
ret['permissions'],
{
'view': True,
'edit': True,
}
)
assert_equal(
ret['urls'],
{
'upload': None,
'fetch': None,
},
)
def test_collect_js_recursive(self):
self.project.get_addons.return_value[0].config.include_js = {'files': ['foo.js']}
self.project.get_addons.return_value[0].config.short_name = 'dropbox'
node = NodeFactory(project=self.project)
mock_node_addon = mock.Mock()
mock_node_addon.config.include_js = {'files': ['bar.js', 'baz.js']}
mock_node_addon.config.short_name = 'dropbox'
node.get_addons = mock.Mock()
node.get_addons.return_value = [mock_node_addon]
result = rubeus.collect_addon_js(self.project)
assert_in('foo.js', result)
assert_in('bar.js', result)
assert_in('baz.js', result)
def test_collect_js_unique(self):
self.project.get_addons.return_value[0].config.include_js = {'files': ['foo.js']}
self.project.get_addons.return_value[0].config.short_name = 'dropbox'
node = NodeFactory(project=self.project)
mock_node_addon = mock.Mock()
mock_node_addon.config.include_js = {'files': ['foo.js', 'baz.js']}
mock_node_addon.config.short_name = 'dropbox'
node.get_addons = mock.Mock()
node.get_addons.return_value = [mock_node_addon]
result = rubeus.collect_addon_js(self.project)
assert_in('foo.js', result)
assert_in('baz.js', result)
class TestSerializingEmptyDashboard(OsfTestCase):
def setUp(self):
super(TestSerializingEmptyDashboard, self).setUp()
self.dash = DashboardFactory()
self.auth = AuthFactory(user=self.dash.creator)
self.dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
def test_empty_dashboard_hgrid_representation_is_list(self):
assert_is_instance(self.dash_hgrid, list)
def test_empty_dashboard_has_proper_number_of_smart_folders(self):
assert_equal(len(self.dash_hgrid), 2)
def test_empty_dashboard_smart_folders_have_correct_names_and_ids(self):
for node_hgrid in self.dash_hgrid:
assert_in(node_hgrid['name'], (ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME))
for node_hgrid in self.dash_hgrid:
if node_hgrid['name'] == ALL_MY_PROJECTS_ID:
assert_equal(node_hgrid['node_id'], ALL_MY_PROJECTS_ID)
elif node_hgrid['name'] == ALL_MY_REGISTRATIONS_ID:
assert_equal(node_hgrid['node_id'], ALL_MY_REGISTRATIONS_ID)
def test_empty_dashboard_smart_folders_are_empty(self):
for node_hgrid in self.dash_hgrid:
assert_equal(node_hgrid['children'], [])
def test_empty_dashboard_are_valid_folders(self):
for node in self.dash_hgrid:
assert_valid_hgrid_folder(node)
def test_empty_dashboard_smart_folders_are_valid_smart_folders(self):
for node in self.dash_hgrid:
assert_valid_hgrid_smart_folder(node)
class TestSerializingPopulatedDashboard(OsfTestCase):
def setUp(self):
super(TestSerializingPopulatedDashboard, self).setUp()
self.dash = DashboardFactory()
self.user = self.dash.creator
self.auth = AuthFactory(user=self.user)
self.init_dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
def test_dashboard_adding_one_folder_increases_size_by_one(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_equal(len(dash_hgrid), len(self.init_dash_hgrid) + 1)
def test_dashboard_adding_one_folder_does_not_remove_smart_folders(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_true(
{ALL_MY_PROJECTS_NAME, ALL_MY_REGISTRATIONS_NAME, folder.title} <=
{node_hgrid['name'] for node_hgrid in dash_hgrid}
)
def test_dashboard_adding_one_folder_increases_size_by_one_in_hgrid_representation(self):
folder = FolderFactory(creator=self.user)
self.dash.add_pointer(folder, self.auth)
project = ProjectFactory(creator=self.user)
folder.add_pointer(project,self.auth)
dash_hgrid = rubeus.to_project_hgrid(self.dash, self.auth)
assert_equal(len(dash_hgrid), len(self.init_dash_hgrid) + 1)
class TestSerializingFolders(OsfTestCase):
def setUp(self):
super(TestSerializingFolders, self).setUp()
self.user = UserFactory()
self.auth = AuthFactory(user=self.user)
def test_serialized_folder_is_valid_folder(self):
folder = FolderFactory(creator=self.user)
folder_hgrid = rubeus.to_project_hgrid(folder, self.auth)
assert_equal(folder_hgrid, [])
def test_serialize_folder_containing_folder_increases_size_by_one(self):
outer_folder = FolderFactory(creator=self.user)
folder_hgrid = rubeus.to_project_hgrid(outer_folder, self.auth)
inner_folder = FolderFactory(creator=self.user)
outer_folder.add_pointer(inner_folder, self.auth)
new_hgrid = rubeus.to_project_hgrid(outer_folder, self.auth)
assert_equal(len(folder_hgrid) + 1, len(new_hgrid))
class TestSmartFolderViews(OsfTestCase):
def setUp(self):
super(TestSmartFolderViews, self).setUp()
self.app = TestApp(app)
self.dash = DashboardFactory()
self.user = self.dash.creator
self.auth = AuthFactory(user=self.user)
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_adding_project_to_dashboard_increases_json_size_by_one(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=self.user)
with app.test_request_context():
url = api_url_for('get_dashboard')
res = self.app.get(url + ALL_MY_PROJECTS_ID)
import pprint;pp = pprint.PrettyPrinter()
init_len = len(res.json[u'data'])
ProjectFactory(creator=self.user)
res = self.app.get(url + ALL_MY_PROJECTS_ID)
assert_equal(len(res.json[u'data']), init_len + 1)
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_adding_registration_to_dashboard_increases_json_size_by_one(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=self.user)
with app.test_request_context():
url = api_url_for('get_dashboard')
res = self.app.get(url + ALL_MY_REGISTRATIONS_ID)
init_len = len(res.json[u'data'])
RegistrationFactory(creator=self.user)
res = self.app.get(url + ALL_MY_REGISTRATIONS_ID)
assert_equal(len(res.json[u'data']), init_len + 1)
def assert_valid_hgrid_folder(node_hgrid):
folder_types = {
'name': str,
'children': list,
'contributors': list,
'dateModified': (DateTime, NoneType),
'node_id': str,
'modifiedDelta': int,
'modifiedBy': (dict, NoneType),
'urls': dict,
'isDashboard': bool,
'expand': bool,
'permissions': dict,
'isSmartFolder': bool,
'childrenCount': int,
}
keys_types = {
'urls': (str, NoneType),
'permissions': bool,
}
folder_values = {
'parentIsFolder': True,
'isPointer': False,
'isFolder': True,
'kind': 'folder',
'type': 'smart-folder'
}
if isinstance(node_hgrid, list):
node_hgrid = node_hgrid[0]['data']
else:
assert_is_instance(node_hgrid, dict)
for key, correct_value in folder_values.items():
assert_equal(node_hgrid[key], correct_value)
for key, correct_type in folder_types.items():
assert_is_instance(node_hgrid[key], correct_type)
for key, correct_type in keys_types.items():
for inner_key, inner_value in node_hgrid[key].items():
assert_is_instance(inner_value, correct_type)
valid_keys = set(folder_types.keys()).union(folder_values.keys())
for key in node_hgrid.keys():
assert_in(key, valid_keys)
def assert_valid_hgrid_smart_folder(node_hgrid):
smart_folder_values = {
'contributors': [],
'isPointer': False,
'dateModified': None,
'modifiedDelta': 0,
'modifiedBy': None,
'isSmartFolder': True,
'urls': {
'upload': None,
'fetch': None
},
'isDashboard': False,
'permissions': {
'edit': False,
'acceptsDrops': False,
'copyable': False,
'movable': False,
'view': True
}
}
assert_valid_hgrid_folder(node_hgrid)
for attr, correct_value in smart_folder_values.items():
assert_equal(correct_value, node_hgrid[attr])
| apache-2.0 |
nhuntwalker/astroML | book_figures/chapter5/fig_likelihood_cauchy.py | 3 | 3219 | """
Log-likelihood for Cauchy Distribution
--------------------------------------
Figure 5.10
An illustration of the logarithm of posterior probability distribution for
:math:`\mu` and :math:`\gamma`, :math:`L(\mu,\gamma)` (see eq. 5.75) for
N = 10 (the sample is generated using the Cauchy distribution with
:math:`\mu = 0` and :math:`\gamma = 2`). The maximum of L is renormalized
to 0, and color coded as shown in the legend. The contours enclose the regions
that contain 0.683, 0.955 and 0.997 of the cumulative (integrated) posterior
probability.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import cauchy
from astroML.plotting.mcmc import convert_to_stdev
from astroML.stats import median_sigmaG
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def cauchy_logL(xi, gamma, mu):
"""Equation 5.74: cauchy likelihood"""
xi = np.asarray(xi)
n = xi.size
shape = np.broadcast(gamma, mu).shape
xi = xi.reshape(xi.shape + tuple([1 for s in shape]))
return ((n - 1) * np.log(gamma)
- np.sum(np.log(gamma ** 2 + (xi - mu) ** 2), 0))
#------------------------------------------------------------
# Define the grid and compute logL
gamma = np.linspace(0.1, 5, 70)
mu = np.linspace(-5, 5, 70)
np.random.seed(44)
mu0 = 0
gamma0 = 2
xi = cauchy(mu0, gamma0).rvs(10)
logL = cauchy_logL(xi, gamma[:, np.newaxis], mu)
logL -= logL.max()
#------------------------------------------------------------
# Find the max and print some information
i, j = np.where(logL >= np.max(logL))
print("mu from likelihood:", mu[j])
print("gamma from likelihood:", gamma[i])
print()
med, sigG = median_sigmaG(xi)
print("mu from median", med)
print("gamma from quartiles:", sigG / 1.483) # Equation 3.54
print()
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
plt.imshow(logL, origin='lower', cmap=plt.cm.binary,
extent=(mu[0], mu[-1], gamma[0], gamma[-1]),
aspect='auto')
plt.colorbar().set_label(r'$\log(L)$')
plt.clim(-5, 0)
plt.contour(mu, gamma, convert_to_stdev(logL),
levels=(0.683, 0.955, 0.997),
colors='k')
plt.text(0.5, 0.93,
r'$L(\mu,\gamma)\ \mathrm{for}\ \bar{x}=0,\ \gamma=2,\ n=10$',
bbox=dict(ec='k', fc='w', alpha=0.9),
ha='center', va='center', transform=plt.gca().transAxes)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\gamma$')
plt.show()
| bsd-2-clause |
nhomar/odoo-mirror | addons/mail/__openerp__.py | 41 | 3832 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP S.A. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Social Network',
'version': '1.0',
'category': 'Social Network',
'sequence': 2,
'summary': 'Discussions, Mailing Lists, News',
'description': """
Business oriented Social Networking
===================================
The Social Networking module provides a unified social network abstraction layer allowing applications to display a complete
communication history on documents with a fully-integrated email and message management system.
It enables the users to read and send messages as well as emails. It also provides a feeds page combined to a subscription mechanism that allows to follow documents and to be constantly updated about recent news.
Main Features
-------------
* Clean and renewed communication history for any OpenERP document that can act as a discussion topic
* Subscription mechanism to be updated about new messages on interesting documents
* Unified feeds page to see recent messages and activity on followed documents
* User communication through the feeds page
* Threaded discussion design on documents
* Relies on the global outgoing mail server - an integrated email management system - allowing to send emails with a configurable scheduler-based processing engine
* Includes an extensible generic email composition assistant, that can turn into a mass-mailing assistant and is capable of interpreting simple *placeholder expressions* that will be replaced with dynamic data when each email is actually sent.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/enterprise-social-network',
'depends': ['base', 'base_setup'],
'data': [
'wizard/invite_view.xml',
'wizard/mail_compose_message_view.xml',
'mail_message_subtype.xml',
'res_config_view.xml',
'mail_message_view.xml',
'mail_mail_view.xml',
'mail_followers_view.xml',
'mail_thread_view.xml',
'mail_group_view.xml',
'res_partner_view.xml',
'data/mail_data.xml',
'data/mail_group_data.xml',
'security/mail_security.xml',
'security/ir.model.access.csv',
'mail_alias_view.xml',
'res_users_view.xml',
'views/mail.xml',
],
'demo': [
'data/mail_demo.xml',
'data/mail_group_demo_data.xml',
],
'installable': True,
'application': True,
'images': [
'images/inbox.jpeg',
'images/messages_form.jpeg',
'images/messages_list.jpeg',
'images/email.jpeg',
'images/join_a_group.jpeg',
'images/share_a_message.jpeg',
],
'qweb': [
'static/src/xml/mail.xml',
'static/src/xml/mail_followers.xml',
'static/src/xml/announcement.xml',
'static/src/xml/suggestions.xml',
],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pombredanne/bitmath | tests/test_properties.py | 2 | 2080 | # -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright © 2014 Tim Bielawa <[email protected]>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tests to verify that type properties are accessable and immutable
"""
from . import TestCase
import bitmath
class TestAttributeProperties(TestCase):
def setUp(self):
self.kib = bitmath.KiB(1)
self.kib_bits = 8192
self.kib_bytes = 1024
self.kib_value = 1
def test_read_bits(self):
"""Read the 'bits' property of a bitmath type"""
self.assertEqual(self.kib.bits, self.kib_bits)
def test_read_bytes(self):
"""Read the 'bytes' property of a bitmath type"""
self.assertEqual(self.kib.bytes, self.kib_bytes)
def test_read_value(self):
"""Read the 'value' property of a bitmath type"""
self.assertEqual(self.kib.value, self.kib_value)
def test_write_property_fails(self):
"""bitmath type's properties are read-only"""
with self.assertRaises(AttributeError):
self.kib.value += 42
| mit |
denovator/myfriki | lib/jinja2/jinja2/_compat.py | 638 | 4042 | # -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
get_next = lambda x: x.__next__
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import Mapping as mapping_types
except ImportError:
import UserDict
mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict)
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
function_type = type(_func)
generator_type = type(_func())
method_type = type(_C().method)
code_type = type(_C.method.__code__)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
traceback_type = type(_tb)
frame_type = type(_tb.tb_frame)
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from thread import allocate_lock
except ImportError:
try:
from threading import Lock as allocate_lock
except ImportError:
from dummy_thread import allocate_lock
| apache-2.0 |
MichaelHills/seizure-detection | common/data.py | 1 | 1254 | import io
import os
import os.path
def makedirs(dir):
try:
os.makedirs(dir)
except:
pass
class jsdict(dict):
def __init__(self, data):
self.__dict__ = data
class CachedDataLoader:
def __init__(self, dir):
self.dir = dir
makedirs(dir)
# try to load data from filename, if it doesn't exist then run the func()
# and save the data to filename
def load(self, filename, func):
def wrap_data(data):
if isinstance(data, list):
return [jsdict(x) for x in data]
else:
return jsdict(data)
if filename is not None:
filename = os.path.join(self.dir, filename)
data = io.load_hkl_file(filename)
if data is not None:
return wrap_data(data)
data = io.load_pickle_file(filename)
if data is not None:
return wrap_data(data)
data = func()
if filename is not None:
if isinstance(data, dict) and '__use_pickle' not in data.keys():
if io.save_hkl_file(filename, data):
return wrap_data(data)
io.save_pickle_file(filename, data)
return wrap_data(data)
| mit |
GranPC/linux-asus-flo | scripts/build-all.py | 1182 | 9486 | #! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
| gpl-2.0 |
ritzk/ansible-modules-core | cloud/openstack/_nova_compute.py | 66 | 22440 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
# (c) 2013, John Dewey <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import operator
import os
import time
try:
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1 import floating_ips
from novaclient import exceptions
from novaclient import utils
HAS_NOVACLIENT = True
except ImportError:
HAS_NOVACLIENT = False
DOCUMENTATION = '''
---
module: nova_compute
version_added: "1.2"
deprecated: Deprecated in 2.0. Use os_server instead
short_description: Create/Delete VMs from OpenStack
description:
- Create or Remove virtual machines from Openstack.
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- Password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- The tenant name of the login user
required: true
default: 'yes'
auth_url:
description:
- The keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- Name of the region
required: false
default: None
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name that has to be given to the instance
required: true
default: None
image_id:
description:
- The id of the base image to boot. Mutually exclusive with image_name
required: true
default: None
image_name:
description:
- The name of the base image to boot. Mutually exclusive with image_id
required: true
default: None
version_added: "1.8"
image_exclude:
description:
- Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)"
version_added: "1.8"
flavor_id:
description:
- The id of the flavor in which the new VM has to be created. Mutually exclusive with flavor_ram
required: false
default: 1
flavor_ram:
description:
- The minimum amount of ram in MB that the flavor in which the new VM has to be created must have. Mutually exclusive with flavor_id
required: false
default: 1
version_added: "1.8"
flavor_include:
description:
- Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count. flavor_include is a positive match filter - it must exist in the flavor name.
version_added: "1.8"
key_name:
description:
- The key pair name to be used when creating a VM
required: false
default: None
security_groups:
description:
- The name of the security group to which the VM should be added
required: false
default: None
nics:
description:
- A list of network id's to which the VM's interface should be attached
required: false
default: None
auto_floating_ip:
description:
- Should a floating ip be auto created and assigned
required: false
default: 'no'
version_added: "1.8"
floating_ips:
description:
- list of valid floating IPs that pre-exist to assign to this node
required: false
default: None
version_added: "1.8"
floating_ip_pools:
description:
- list of floating IP pools from which to choose a floating IP
required: false
default: None
version_added: "1.8"
availability_zone:
description:
- Name of the availability zone
required: false
default: None
version_added: "1.8"
meta:
description:
- A list of key value pairs that should be provided as a metadata to the new VM
required: false
default: None
wait:
description:
- If the module should wait for the VM to be created.
required: false
default: 'yes'
wait_for:
description:
- The amount of time the module should wait for the VM to get into active state
required: false
default: 180
config_drive:
description:
- Whether to boot the server with config drive enabled
required: false
default: 'no'
version_added: "1.8"
user_data:
description:
- Opaque blob of data which is made available to the instance
required: false
default: None
version_added: "1.6"
scheduler_hints:
description:
- Arbitrary key/value pairs to the scheduler for custom use
required: false
default: None
version_added: "1.9"
requirements:
- "python >= 2.6"
- "python-novaclient"
'''
EXAMPLES = '''
# Creates a new VM and attaches to a network and passes metadata to the instance
- nova_compute:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
name: vm1
image_id: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
wait_for: 200
flavor_id: 4
nics:
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
meta:
hostname: test1
group: uge_master
# Creates a new VM in HP Cloud AE1 region availability zone az2 and automatically assigns a floating IP
- name: launch a nova instance
hosts: localhost
tasks:
- name: launch an instance
nova_compute:
state: present
login_username: username
login_password: Equality7-2521
login_tenant_name: username-project1
name: vm1
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
region_name: region-b.geo-1
availability_zone: az2
image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
wait_for: 200
flavor_id: 101
security_groups: default
auto_floating_ip: yes
# Creates a new VM in HP Cloud AE1 region availability zone az2 and assigns a pre-known floating IP
- name: launch a nova instance
hosts: localhost
tasks:
- name: launch an instance
nova_compute:
state: present
login_username: username
login_password: Equality7-2521
login_tenant_name: username-project1
name: vm1
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
region_name: region-b.geo-1
availability_zone: az2
image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
wait_for: 200
flavor_id: 101
floating-ips:
- 12.34.56.79
# Creates a new VM with 4G of RAM on Ubuntu Trusty, ignoring deprecated images
- name: launch a nova instance
hosts: localhost
tasks:
- name: launch an instance
nova_compute:
name: vm1
state: present
login_username: username
login_password: Equality7-2521
login_tenant_name: username-project1
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
region_name: region-b.geo-1
image_name: Ubuntu Server 14.04
image_exclude: deprecated
flavor_ram: 4096
# Creates a new VM with 4G of RAM on Ubuntu Trusty on a Rackspace Performance node in DFW
- name: launch a nova instance
hosts: localhost
tasks:
- name: launch an instance
nova_compute:
name: vm1
state: present
login_username: username
login_password: Equality7-2521
login_tenant_name: username-project1
auth_url: https://identity.api.rackspacecloud.com/v2.0/
region_name: DFW
image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
flavor_ram: 4096
flavor_include: Performance
'''
def _delete_server(module, nova):
name = None
server_list = None
try:
server_list = nova.servers.list(True, {'name': module.params['name']})
if server_list:
server = [x for x in server_list if x.name == module.params['name']]
nova.servers.delete(server.pop())
except Exception, e:
module.fail_json( msg = "Error in deleting vm: %s" % e.message)
if module.params['wait'] == 'no':
module.exit_json(changed = True, result = "deleted")
expire = time.time() + int(module.params['wait_for'])
while time.time() < expire:
name = nova.servers.list(True, {'name': module.params['name']})
if not name:
module.exit_json(changed = True, result = "deleted")
time.sleep(5)
module.fail_json(msg = "Timed out waiting for server to get deleted, please check manually")
def _add_floating_ip_from_pool(module, nova, server):
# instantiate FloatingIPManager object
floating_ip_obj = floating_ips.FloatingIPManager(nova)
# empty dict and list
usable_floating_ips = {}
pools = []
# user specified
pools = module.params['floating_ip_pools']
# get the list of all floating IPs. Mileage may
# vary according to Nova Compute configuration
# per cloud provider
all_floating_ips = floating_ip_obj.list()
# iterate through all pools of IP address. Empty
# string means all and is the default value
for pool in pools:
# temporary list per pool
pool_ips = []
# loop through all floating IPs
for f_ip in all_floating_ips:
# if not reserved and the correct pool, add
if f_ip.fixed_ip is None and (f_ip.pool == pool):
pool_ips.append(f_ip.ip)
# only need one
break
# if the list is empty, add for this pool
if not pool_ips:
try:
new_ip = nova.floating_ips.create(pool)
except Exception, e:
module.fail_json(msg = "Unable to create floating ip: %s" % (e.message))
pool_ips.append(new_ip.ip)
# Add to the main list
usable_floating_ips[pool] = pool_ips
# finally, add ip(s) to instance for each pool
for pool in usable_floating_ips:
for ip in usable_floating_ips[pool]:
try:
server.add_floating_ip(ip)
# We only need to assign one ip - but there is an inherent
# race condition and some other cloud operation may have
# stolen an available floating ip
break
except Exception, e:
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
def _add_floating_ip_list(module, server, ips):
# add ip(s) to instance
for ip in ips:
try:
server.add_floating_ip(ip)
except Exception, e:
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
def _add_auto_floating_ip(module, nova, server):
try:
new_ip = nova.floating_ips.create()
except Exception as e:
module.fail_json(msg = "Unable to create floating ip: %s" % (e))
try:
server.add_floating_ip(new_ip)
except Exception as e:
# Clean up - we auto-created this ip, and it's not attached
# to the server, so the cloud will not know what to do with it
server.floating_ips.delete(new_ip)
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
def _add_floating_ip(module, nova, server):
if module.params['floating_ip_pools']:
_add_floating_ip_from_pool(module, nova, server)
elif module.params['floating_ips']:
_add_floating_ip_list(module, server, module.params['floating_ips'])
elif module.params['auto_floating_ip']:
_add_auto_floating_ip(module, nova, server)
else:
return server
# this may look redundant, but if there is now a
# floating IP, then it needs to be obtained from
# a recent server object if the above code path exec'd
try:
server = nova.servers.get(server.id)
except Exception, e:
module.fail_json(msg = "Error in getting info from instance: %s " % e.message)
return server
def _get_image_id(module, nova):
if module.params['image_name']:
for image in nova.images.list():
if (module.params['image_name'] in image.name and (
not module.params['image_exclude']
or module.params['image_exclude'] not in image.name)):
return image.id
module.fail_json(msg = "Error finding image id from name(%s)" % module.params['image_name'])
return module.params['image_id']
def _get_flavor_id(module, nova):
if module.params['flavor_ram']:
for flavor in sorted(nova.flavors.list(), key=operator.attrgetter('ram')):
if (flavor.ram >= module.params['flavor_ram'] and
(not module.params['flavor_include'] or module.params['flavor_include'] in flavor.name)):
return flavor.id
module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram'])
return module.params['flavor_id']
def _create_server(module, nova):
image_id = _get_image_id(module, nova)
flavor_id = _get_flavor_id(module, nova)
bootargs = [module.params['name'], image_id, flavor_id]
bootkwargs = {
'nics' : module.params['nics'],
'meta' : module.params['meta'],
'security_groups': module.params['security_groups'].split(','),
#userdata is unhyphenated in novaclient, but hyphenated here for consistency with the ec2 module:
'userdata': module.params['user_data'],
'config_drive': module.params['config_drive'],
}
for optional_param in ('region_name', 'key_name', 'availability_zone', 'scheduler_hints'):
if module.params[optional_param]:
bootkwargs[optional_param] = module.params[optional_param]
try:
server = nova.servers.create(*bootargs, **bootkwargs)
server = nova.servers.get(server.id)
except Exception, e:
module.fail_json( msg = "Error in creating instance: %s " % e.message)
if module.params['wait'] == 'yes':
expire = time.time() + int(module.params['wait_for'])
while time.time() < expire:
try:
server = nova.servers.get(server.id)
except Exception, e:
module.fail_json( msg = "Error in getting info from instance: %s" % e.message)
if server.status == 'ACTIVE':
server = _add_floating_ip(module, nova, server)
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
# now exit with info
module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
if server.status == 'ERROR':
module.fail_json(msg = "Error in creating the server, please check logs")
time.sleep(2)
module.fail_json(msg = "Timeout waiting for the server to come up.. Please check manually")
if server.status == 'ERROR':
module.fail_json(msg = "Error in creating the server.. Please check manually")
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
module.exit_json(changed = True, id = info['id'], private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
def _delete_floating_ip_list(module, nova, server, extra_ips):
for ip in extra_ips:
nova.servers.remove_floating_ip(server=server.id, address=ip)
def _check_floating_ips(module, nova, server):
changed = False
if module.params['floating_ip_pools'] or module.params['floating_ips'] or module.params['auto_floating_ip']:
ips = openstack_find_nova_addresses(server.addresses, 'floating')
if not ips:
# If we're configured to have a floating but we don't have one,
# let's add one
server = _add_floating_ip(module, nova, server)
changed = True
elif module.params['floating_ips']:
# we were configured to have specific ips, let's make sure we have
# those
missing_ips = []
for ip in module.params['floating_ips']:
if ip not in ips:
missing_ips.append(ip)
if missing_ips:
server = _add_floating_ip_list(module, server, missing_ips)
changed = True
extra_ips = []
for ip in ips:
if ip not in module.params['floating_ips']:
extra_ips.append(ip)
if extra_ips:
_delete_floating_ip_list(module, server, extra_ips)
changed = True
return (changed, server)
def _get_server_state(module, nova):
server = None
try:
servers = nova.servers.list(True, {'name': module.params['name']})
if servers:
# the {'name': module.params['name']} will also return servers
# with names that partially match the server name, so we have to
# strictly filter here
servers = [x for x in servers if x.name == module.params['name']]
if servers:
server = servers[0]
except Exception, e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
if server and module.params['state'] == 'present':
if server.status != 'ACTIVE':
module.fail_json( msg="The VM is available but not Active. state:" + server.status)
(ip_changed, server) = _check_floating_ips(module, nova, server)
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
module.exit_json(changed = ip_changed, id = server.id, public_ip = public, private_ip = private, info = server._info)
if server and module.params['state'] == 'absent':
return True
if module.params['state'] == 'absent':
module.exit_json(changed = False, result = "not present")
return True
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
image_id = dict(default=None),
image_name = dict(default=None),
image_exclude = dict(default='(deprecated)'),
flavor_id = dict(default=1),
flavor_ram = dict(default=None, type='int'),
flavor_include = dict(default=None),
key_name = dict(default=None),
security_groups = dict(default='default'),
nics = dict(default=None),
meta = dict(default=None),
wait = dict(default='yes', choices=['yes', 'no']),
wait_for = dict(default=180),
state = dict(default='present', choices=['absent', 'present']),
user_data = dict(default=None),
config_drive = dict(default=False, type='bool'),
auto_floating_ip = dict(default=False, type='bool'),
floating_ips = dict(default=None),
floating_ip_pools = dict(default=None),
scheduler_hints = dict(default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['auto_floating_ip','floating_ips'],
['auto_floating_ip','floating_ip_pools'],
['floating_ips','floating_ip_pools'],
['image_id','image_name'],
['flavor_id','flavor_ram'],
],
)
if not HAS_NOVACLIENT:
module.fail_json(msg='python-novaclient is required for this module')
nova = nova_client.Client(module.params['login_username'],
module.params['login_password'],
module.params['login_tenant_name'],
module.params['auth_url'],
region_name=module.params['region_name'],
service_type='compute')
try:
nova.authenticate()
except exceptions.Unauthorized, e:
module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message)
except exceptions.AuthorizationFailure, e:
module.fail_json(msg = "Unable to authorize user: %s" % e.message)
if module.params['state'] == 'present':
if not module.params['image_id'] and not module.params['image_name']:
module.fail_json( msg = "Parameter 'image_id' or `image_name` is required if state == 'present'")
else:
_get_server_state(module, nova)
_create_server(module, nova)
if module.params['state'] == 'absent':
_get_server_state(module, nova)
_delete_server(module, nova)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
amosonn/distributed | distributed/diagnostics/plugin.py | 2 | 1779 | from __future__ import print_function, division, absolute_import
import logging
logger = logging.getLogger(__name__)
class SchedulerPlugin(object):
""" Interface to extend the Scheduler
The scheduler operates by triggering and responding to events like
``task_finished``, ``update_graph``, ``task_erred``, etc..
A plugin enables custom code to run at each of those same events. The
scheduler will run the analogous methods on this class when each event is
triggered. This runs user code within the scheduler thread that can
perform arbitrary operations in synchrony with the scheduler itself.
Plugins are often used for diagnostics and measurement, but have full
access to the scheduler and could in principle affect core scheduling.
To implement a plugin implement some of the methods of this class and add
the plugin to the scheduler with ``Scheduler.add_plugin(myplugin)``.
Examples
--------
>>> class Counter(SchedulerPlugin):
... def __init__(self):
... self.counter = 0
...
... def transition(self, key, start, finish, *args, **kwargs):
... if start == 'processing' and finish == 'memory':
... self.counter += 1
...
... def restart(self, scheduler):
... self.counter = 0
>>> c = Counter()
>>> scheduler.add_plugin(c) # doctest: +SKIP
"""
def update_graph(self, scheduler, dsk=None, keys=None,
restrictions=None, **kwargs):
""" Run when a new graph / tasks enter the scheduler """
pass
def restart(self, scheduler, **kwargs):
""" Run when the scheduler restarts itself """
pass
def transition(self, key, start, finish, *args, **kwargs):
pass
| bsd-3-clause |
andrewklau/openshift-tools | openshift/installer/vendored/openshift-ansible-git-2016-04-27/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py | 91 | 5294 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
"""Ansible module for modifying OpenShift configs during an upgrade"""
import os
import yaml
DOCUMENTATION = '''
---
module: openshift_upgrade_config
short_description: OpenShift Upgrade Config
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
def modify_api_levels(level_list, remove, ensure, msg_prepend='',
msg_append=''):
""" modify_api_levels """
changed = False
changes = []
if not isinstance(remove, list):
remove = []
if not isinstance(ensure, list):
ensure = []
if not isinstance(level_list, list):
new_list = []
changed = True
changes.append("%s created missing %s" % (msg_prepend, msg_append))
else:
new_list = level_list
for level in remove:
if level in new_list:
new_list.remove(level)
changed = True
changes.append("%s removed %s %s" % (msg_prepend, level, msg_append))
for level in ensure:
if level not in new_list:
new_list.append(level)
changed = True
changes.append("%s added %s %s" % (msg_prepend, level, msg_append))
return {'new_list': new_list, 'changed': changed, 'changes': changes}
def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
"""Main upgrade method for 3.0 to 3.1."""
changes = []
# Facts do not get transferred to the hosts where custom modules run,
# need to make some assumptions here.
master_config = os.path.join(config_base, 'master/master-config.yaml')
master_cfg_file = open(master_config, 'r')
config = yaml.safe_load(master_cfg_file.read())
master_cfg_file.close()
# Remove unsupported api versions and ensure supported api versions from
# master config
unsupported_levels = ['v1beta1', 'v1beta2', 'v1beta3']
supported_levels = ['v1']
result = modify_api_levels(config.get('apiLevels'), unsupported_levels,
supported_levels, 'master-config.yaml:', 'from apiLevels')
if result['changed']:
config['apiLevels'] = result['new_list']
changes.append(result['changes'])
if 'kubernetesMasterConfig' in config and 'apiLevels' in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig'].pop('apiLevels')
changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
# Add masterCA to serviceAccountConfig
if 'serviceAccountConfig' in config and 'masterCA' not in config['serviceAccountConfig']:
config['serviceAccountConfig']['masterCA'] = config['oauthConfig'].get('masterCA', 'ca.crt')
# Add proxyClientInfo to master-config
if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig']['proxyClientInfo'] = {
'certFile': 'master.proxy-client.crt',
'keyFile': 'master.proxy-client.key'
}
changes.append("master-config.yaml: added proxyClientInfo")
if len(changes) > 0:
if backup:
# TODO: Check success:
ansible_module.backup_local(master_config)
# Write the modified config:
out_file = open(master_config, 'w')
out_file.write(yaml.safe_dump(config, default_flow_style=False))
out_file.close()
return changes
def upgrade_master(ansible_module, config_base, from_version, to_version, backup):
"""Upgrade entry point."""
if from_version == '3.0':
if to_version == '3.1':
return upgrade_master_3_0_to_3_1(ansible_module, config_base, backup)
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name,
# redefined-outer-name
global module
module = AnsibleModule(
argument_spec=dict(
config_base=dict(required=True),
from_version=dict(required=True, choices=['3.0']),
to_version=dict(required=True, choices=['3.1']),
role=dict(required=True, choices=['master']),
backup=dict(required=False, default=True, type='bool')
),
supports_check_mode=True,
)
from_version = module.params['from_version']
to_version = module.params['to_version']
role = module.params['role']
backup = module.params['backup']
config_base = module.params['config_base']
try:
changes = []
if role == 'master':
changes = upgrade_master(module, config_base, from_version,
to_version, backup)
changed = len(changes) > 0
return module.exit_json(changed=changed, changes=changes)
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
except Exception, e:
return module.fail_json(msg=str(e))
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| apache-2.0 |
ddasilva/numpy | numpy/distutils/command/build_scripts.py | 264 | 1731 | """ Modified version of build_scripts that handles building scripts from functions.
"""
from __future__ import division, absolute_import, print_function
from distutils.command.build_scripts import build_scripts as old_build_scripts
from numpy.distutils import log
from numpy.distutils.misc_util import is_string
class build_scripts(old_build_scripts):
def generate_scripts(self, scripts):
new_scripts = []
func_scripts = []
for script in scripts:
if is_string(script):
new_scripts.append(script)
else:
func_scripts.append(script)
if not func_scripts:
return new_scripts
build_dir = self.build_dir
self.mkpath(build_dir)
for func in func_scripts:
script = func(build_dir)
if not script:
continue
if is_string(script):
log.info(" adding '%s' to scripts" % (script,))
new_scripts.append(script)
else:
[log.info(" adding '%s' to scripts" % (s,)) for s in script]
new_scripts.extend(list(script))
return new_scripts
def run (self):
if not self.scripts:
return
self.scripts = self.generate_scripts(self.scripts)
# Now make sure that the distribution object has this list of scripts.
# setuptools' develop command requires that this be a list of filenames,
# not functions.
self.distribution.scripts = self.scripts
return old_build_scripts.run(self)
def get_source_files(self):
from numpy.distutils.misc_util import get_script_files
return get_script_files(self.scripts)
| bsd-3-clause |
LockScreen/Backend | venv/lib/python2.7/site-packages/boto/codedeploy/layer1.py | 135 | 40600 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.codedeploy import exceptions
class CodeDeployConnection(AWSQueryConnection):
"""
AWS CodeDeploy **Overview**
This is the AWS CodeDeploy API Reference. This guide provides
descriptions of the AWS CodeDeploy APIs. For additional
information, see the `AWS CodeDeploy User Guide`_.
**Using the APIs**
You can use the AWS CodeDeploy APIs to work with the following
items:
+ Applications , which are unique identifiers that AWS CodeDeploy
uses to ensure that the correct combinations of revisions,
deployment configurations, and deployment groups are being
referenced during deployments. You can work with applications by
calling CreateApplication, DeleteApplication, GetApplication,
ListApplications, BatchGetApplications, and UpdateApplication to
create, delete, and get information about applications, and to
change information about an application, respectively.
+ Deployment configurations , which are sets of deployment rules
and deployment success and failure conditions that AWS CodeDeploy
uses during deployments. You can work with deployment
configurations by calling CreateDeploymentConfig,
DeleteDeploymentConfig, GetDeploymentConfig, and
ListDeploymentConfigs to create, delete, and get information about
deployment configurations, respectively.
+ Deployment groups , which represent groups of Amazon EC2
instances to which application revisions can be deployed. You can
work with deployment groups by calling CreateDeploymentGroup,
DeleteDeploymentGroup, GetDeploymentGroup, ListDeploymentGroups,
and UpdateDeploymentGroup to create, delete, and get information
about single and multiple deployment groups, and to change
information about a deployment group, respectively.
+ Deployment instances (also known simply as instances ), which
represent Amazon EC2 instances to which application revisions are
deployed. Deployment instances are identified by their Amazon EC2
tags or Auto Scaling group names. Deployment instances belong to
deployment groups. You can work with deployment instances by
calling GetDeploymentInstance and ListDeploymentInstances to get
information about single and multiple deployment instances,
respectively.
+ Deployments , which represent the process of deploying revisions
to deployment groups. You can work with deployments by calling
CreateDeployment, GetDeployment, ListDeployments,
BatchGetDeployments, and StopDeployment to create and get
information about deployments, and to stop a deployment,
respectively.
+ Application revisions (also known simply as revisions ), which
are archive files that are stored in Amazon S3 buckets or GitHub
repositories. These revisions contain source content (such as
source code, web pages, executable files, any deployment scripts,
and similar) along with an Application Specification file (AppSpec
file). (The AppSpec file is unique to AWS CodeDeploy; it defines a
series of deployment actions that you want AWS CodeDeploy to
execute.) An application revision is uniquely identified by its
Amazon S3 object key and its ETag, version, or both. Application
revisions are deployed to deployment groups. You can work with
application revisions by calling GetApplicationRevision,
ListApplicationRevisions, and RegisterApplicationRevision to get
information about application revisions and to inform AWS
CodeDeploy about an application revision, respectively.
"""
APIVersion = "2014-10-06"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "codedeploy.us-east-1.amazonaws.com"
ServiceName = "codedeploy"
TargetPrefix = "CodeDeploy_20141006"
ResponseError = JSONResponseError
_faults = {
"InvalidDeploymentIdException": exceptions.InvalidDeploymentIdException,
"InvalidDeploymentGroupNameException": exceptions.InvalidDeploymentGroupNameException,
"DeploymentConfigAlreadyExistsException": exceptions.DeploymentConfigAlreadyExistsException,
"InvalidRoleException": exceptions.InvalidRoleException,
"RoleRequiredException": exceptions.RoleRequiredException,
"DeploymentGroupAlreadyExistsException": exceptions.DeploymentGroupAlreadyExistsException,
"DeploymentConfigLimitExceededException": exceptions.DeploymentConfigLimitExceededException,
"InvalidNextTokenException": exceptions.InvalidNextTokenException,
"InvalidDeploymentConfigNameException": exceptions.InvalidDeploymentConfigNameException,
"InvalidSortByException": exceptions.InvalidSortByException,
"InstanceDoesNotExistException": exceptions.InstanceDoesNotExistException,
"InvalidMinimumHealthyHostValueException": exceptions.InvalidMinimumHealthyHostValueException,
"ApplicationLimitExceededException": exceptions.ApplicationLimitExceededException,
"ApplicationNameRequiredException": exceptions.ApplicationNameRequiredException,
"InvalidEC2TagException": exceptions.InvalidEC2TagException,
"DeploymentDoesNotExistException": exceptions.DeploymentDoesNotExistException,
"DeploymentLimitExceededException": exceptions.DeploymentLimitExceededException,
"InvalidInstanceStatusException": exceptions.InvalidInstanceStatusException,
"RevisionRequiredException": exceptions.RevisionRequiredException,
"InvalidBucketNameFilterException": exceptions.InvalidBucketNameFilterException,
"DeploymentGroupLimitExceededException": exceptions.DeploymentGroupLimitExceededException,
"DeploymentGroupDoesNotExistException": exceptions.DeploymentGroupDoesNotExistException,
"DeploymentConfigNameRequiredException": exceptions.DeploymentConfigNameRequiredException,
"DeploymentAlreadyCompletedException": exceptions.DeploymentAlreadyCompletedException,
"RevisionDoesNotExistException": exceptions.RevisionDoesNotExistException,
"DeploymentGroupNameRequiredException": exceptions.DeploymentGroupNameRequiredException,
"DeploymentIdRequiredException": exceptions.DeploymentIdRequiredException,
"DeploymentConfigDoesNotExistException": exceptions.DeploymentConfigDoesNotExistException,
"BucketNameFilterRequiredException": exceptions.BucketNameFilterRequiredException,
"InvalidTimeRangeException": exceptions.InvalidTimeRangeException,
"ApplicationDoesNotExistException": exceptions.ApplicationDoesNotExistException,
"InvalidRevisionException": exceptions.InvalidRevisionException,
"InvalidSortOrderException": exceptions.InvalidSortOrderException,
"InvalidOperationException": exceptions.InvalidOperationException,
"InvalidAutoScalingGroupException": exceptions.InvalidAutoScalingGroupException,
"InvalidApplicationNameException": exceptions.InvalidApplicationNameException,
"DescriptionTooLongException": exceptions.DescriptionTooLongException,
"ApplicationAlreadyExistsException": exceptions.ApplicationAlreadyExistsException,
"InvalidDeployedStateFilterException": exceptions.InvalidDeployedStateFilterException,
"DeploymentNotStartedException": exceptions.DeploymentNotStartedException,
"DeploymentConfigInUseException": exceptions.DeploymentConfigInUseException,
"InstanceIdRequiredException": exceptions.InstanceIdRequiredException,
"InvalidKeyPrefixFilterException": exceptions.InvalidKeyPrefixFilterException,
"InvalidDeploymentStatusException": exceptions.InvalidDeploymentStatusException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CodeDeployConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def batch_get_applications(self, application_names=None):
"""
Gets information about one or more applications.
:type application_names: list
:param application_names: A list of application names, with multiple
application names separated by spaces.
"""
params = {}
if application_names is not None:
params['applicationNames'] = application_names
return self.make_request(action='BatchGetApplications',
body=json.dumps(params))
def batch_get_deployments(self, deployment_ids=None):
"""
Gets information about one or more deployments.
:type deployment_ids: list
:param deployment_ids: A list of deployment IDs, with multiple
deployment IDs separated by spaces.
"""
params = {}
if deployment_ids is not None:
params['deploymentIds'] = deployment_ids
return self.make_request(action='BatchGetDeployments',
body=json.dumps(params))
def create_application(self, application_name):
"""
Creates a new application.
:type application_name: string
:param application_name: The name of the application. This name must be
unique within the AWS user account.
"""
params = {'applicationName': application_name, }
return self.make_request(action='CreateApplication',
body=json.dumps(params))
def create_deployment(self, application_name, deployment_group_name=None,
revision=None, deployment_config_name=None,
description=None,
ignore_application_stop_failures=None):
"""
Deploys an application revision to the specified deployment
group.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The deployment group's name.
:type revision: dict
:param revision: The type of revision to deploy, along with information
about the revision's location.
:type deployment_config_name: string
:param deployment_config_name: The name of an existing deployment
configuration within the AWS user account.
If not specified, the value configured in the deployment group will be
used as the default. If the deployment group does not have a
deployment configuration associated with it, then
CodeDeployDefault.OneAtATime will be used by default.
:type description: string
:param description: A comment about the deployment.
:type ignore_application_stop_failures: boolean
:param ignore_application_stop_failures: If set to true, then if the
deployment causes the ApplicationStop deployment lifecycle event to
fail to a specific instance, the deployment will not be considered
to have failed to that instance at that point and will continue on
to the BeforeInstall deployment lifecycle event.
If set to false or not specified, then if the deployment causes the
ApplicationStop deployment lifecycle event to fail to a specific
instance, the deployment will stop to that instance, and the
deployment to that instance will be considered to have failed.
"""
params = {'applicationName': application_name, }
if deployment_group_name is not None:
params['deploymentGroupName'] = deployment_group_name
if revision is not None:
params['revision'] = revision
if deployment_config_name is not None:
params['deploymentConfigName'] = deployment_config_name
if description is not None:
params['description'] = description
if ignore_application_stop_failures is not None:
params['ignoreApplicationStopFailures'] = ignore_application_stop_failures
return self.make_request(action='CreateDeployment',
body=json.dumps(params))
def create_deployment_config(self, deployment_config_name,
minimum_healthy_hosts=None):
"""
Creates a new deployment configuration.
:type deployment_config_name: string
:param deployment_config_name: The name of the deployment configuration
to create.
:type minimum_healthy_hosts: dict
:param minimum_healthy_hosts: The minimum number of healthy instances
that should be available at any time during the deployment. There
are two parameters expected in the input: type and value.
The type parameter takes either of the following values:
+ HOST_COUNT: The value parameter represents the minimum number of
healthy instances, as an absolute value.
+ FLEET_PERCENT: The value parameter represents the minimum number of
healthy instances, as a percentage of the total number of instances
in the deployment. If you specify FLEET_PERCENT, then at the start
of the deployment AWS CodeDeploy converts the percentage to the
equivalent number of instances and rounds fractional instances up.
The value parameter takes an integer.
For example, to set a minimum of 95% healthy instances, specify a type
of FLEET_PERCENT and a value of 95.
"""
params = {'deploymentConfigName': deployment_config_name, }
if minimum_healthy_hosts is not None:
params['minimumHealthyHosts'] = minimum_healthy_hosts
return self.make_request(action='CreateDeploymentConfig',
body=json.dumps(params))
def create_deployment_group(self, application_name,
deployment_group_name,
deployment_config_name=None,
ec_2_tag_filters=None,
auto_scaling_groups=None,
service_role_arn=None):
"""
Creates a new deployment group for application revisions to be
deployed to.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
:type deployment_config_name: string
:param deployment_config_name: If specified, the deployment
configuration name must be one of the predefined values, or it can
be a custom deployment configuration:
+ CodeDeployDefault.AllAtOnce deploys an application revision to up to
all of the Amazon EC2 instances at once. The overall deployment
succeeds if the application revision deploys to at least one of the
instances. The overall deployment fails after the application
revision fails to deploy to all of the instances. For example, for
9 instances, deploy to up to all 9 instances at once. The overall
deployment succeeds if any of the 9 instances is successfully
deployed to, and it fails if all 9 instances fail to be deployed
to.
+ CodeDeployDefault.HalfAtATime deploys to up to half of the instances
at a time (with fractions rounded down). The overall deployment
succeeds if the application revision deploys to at least half of
the instances (with fractions rounded up); otherwise, the
deployment fails. For example, for 9 instances, deploy to up to 4
instances at a time. The overall deployment succeeds if 5 or more
instances are successfully deployed to; otherwise, the deployment
fails. Note that the deployment may successfully deploy to some
instances, even if the overall deployment fails.
+ CodeDeployDefault.OneAtATime deploys the application revision to only
one of the instances at a time. The overall deployment succeeds if
the application revision deploys to all of the instances. The
overall deployment fails after the application revision first fails
to deploy to any one instance. For example, for 9 instances, deploy
to one instance at a time. The overall deployment succeeds if all 9
instances are successfully deployed to, and it fails if any of one
of the 9 instances fail to be deployed to. Note that the deployment
may successfully deploy to some instances, even if the overall
deployment fails. This is the default deployment configuration if a
configuration isn't specified for either the deployment or the
deployment group.
To create a custom deployment configuration, call the create deployment
configuration operation.
:type ec_2_tag_filters: list
:param ec_2_tag_filters: The Amazon EC2 tags to filter on.
:type auto_scaling_groups: list
:param auto_scaling_groups: A list of associated Auto Scaling groups.
:type service_role_arn: string
:param service_role_arn: A service role ARN that allows AWS CodeDeploy
to act on the user's behalf when interacting with AWS services.
"""
params = {
'applicationName': application_name,
'deploymentGroupName': deployment_group_name,
}
if deployment_config_name is not None:
params['deploymentConfigName'] = deployment_config_name
if ec_2_tag_filters is not None:
params['ec2TagFilters'] = ec_2_tag_filters
if auto_scaling_groups is not None:
params['autoScalingGroups'] = auto_scaling_groups
if service_role_arn is not None:
params['serviceRoleArn'] = service_role_arn
return self.make_request(action='CreateDeploymentGroup',
body=json.dumps(params))
def delete_application(self, application_name):
"""
Deletes an application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
"""
params = {'applicationName': application_name, }
return self.make_request(action='DeleteApplication',
body=json.dumps(params))
def delete_deployment_config(self, deployment_config_name):
"""
Deletes a deployment configuration.
A deployment configuration cannot be deleted if it is
currently in use. Also, predefined configurations cannot be
deleted.
:type deployment_config_name: string
:param deployment_config_name: The name of an existing deployment
configuration within the AWS user account.
"""
params = {'deploymentConfigName': deployment_config_name, }
return self.make_request(action='DeleteDeploymentConfig',
body=json.dumps(params))
def delete_deployment_group(self, application_name,
deployment_group_name):
"""
Deletes a deployment group.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
"""
params = {
'applicationName': application_name,
'deploymentGroupName': deployment_group_name,
}
return self.make_request(action='DeleteDeploymentGroup',
body=json.dumps(params))
def get_application(self, application_name):
"""
Gets information about an application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
"""
params = {'applicationName': application_name, }
return self.make_request(action='GetApplication',
body=json.dumps(params))
def get_application_revision(self, application_name, revision):
"""
Gets information about an application revision.
:type application_name: string
:param application_name: The name of the application that corresponds
to the revision.
:type revision: dict
:param revision: Information about the application revision to get,
including the revision's type and its location.
"""
params = {
'applicationName': application_name,
'revision': revision,
}
return self.make_request(action='GetApplicationRevision',
body=json.dumps(params))
def get_deployment(self, deployment_id):
"""
Gets information about a deployment.
:type deployment_id: string
:param deployment_id: An existing deployment ID within the AWS user
account.
"""
params = {'deploymentId': deployment_id, }
return self.make_request(action='GetDeployment',
body=json.dumps(params))
def get_deployment_config(self, deployment_config_name):
"""
Gets information about a deployment configuration.
:type deployment_config_name: string
:param deployment_config_name: The name of an existing deployment
configuration within the AWS user account.
"""
params = {'deploymentConfigName': deployment_config_name, }
return self.make_request(action='GetDeploymentConfig',
body=json.dumps(params))
def get_deployment_group(self, application_name, deployment_group_name):
"""
Gets information about a deployment group.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
"""
params = {
'applicationName': application_name,
'deploymentGroupName': deployment_group_name,
}
return self.make_request(action='GetDeploymentGroup',
body=json.dumps(params))
def get_deployment_instance(self, deployment_id, instance_id):
"""
Gets information about an Amazon EC2 instance as part of a
deployment.
:type deployment_id: string
:param deployment_id: The unique ID of a deployment.
:type instance_id: string
:param instance_id: The unique ID of an Amazon EC2 instance in the
deployment's deployment group.
"""
params = {
'deploymentId': deployment_id,
'instanceId': instance_id,
}
return self.make_request(action='GetDeploymentInstance',
body=json.dumps(params))
def list_application_revisions(self, application_name, sort_by=None,
sort_order=None, s_3_bucket=None,
s_3_key_prefix=None, deployed=None,
next_token=None):
"""
Lists information about revisions for an application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type sort_by: string
:param sort_by: The column name to sort the list results by:
+ registerTime: Sort the list results by when the revisions were
registered with AWS CodeDeploy.
+ firstUsedTime: Sort the list results by when the revisions were first
used by in a deployment.
+ lastUsedTime: Sort the list results by when the revisions were last
used in a deployment.
If not specified or set to null, the results will be returned in an
arbitrary order.
:type sort_order: string
:param sort_order: The order to sort the list results by:
+ ascending: Sort the list results in ascending order.
+ descending: Sort the list results in descending order.
If not specified, the results will be sorted in ascending order.
If set to null, the results will be sorted in an arbitrary order.
:type s_3_bucket: string
:param s_3_bucket: A specific Amazon S3 bucket name to limit the search
for revisions.
If set to null, then all of the user's buckets will be searched.
:type s_3_key_prefix: string
:param s_3_key_prefix: A specific key prefix for the set of Amazon S3
objects to limit the search for revisions.
:type deployed: string
:param deployed:
Whether to list revisions based on whether the revision is the target
revision of an deployment group:
+ include: List revisions that are target revisions of a deployment
group.
+ exclude: Do not list revisions that are target revisions of a
deployment group.
+ ignore: List all revisions, regardless of whether they are target
revisions of a deployment group.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list application revisions call, which can be used to return the
next set of applications in the list.
"""
params = {'applicationName': application_name, }
if sort_by is not None:
params['sortBy'] = sort_by
if sort_order is not None:
params['sortOrder'] = sort_order
if s_3_bucket is not None:
params['s3Bucket'] = s_3_bucket
if s_3_key_prefix is not None:
params['s3KeyPrefix'] = s_3_key_prefix
if deployed is not None:
params['deployed'] = deployed
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListApplicationRevisions',
body=json.dumps(params))
def list_applications(self, next_token=None):
"""
Lists the applications registered within the AWS user account.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list applications call, which can be used to return the next set of
applications in the list.
"""
params = {}
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListApplications',
body=json.dumps(params))
def list_deployment_configs(self, next_token=None):
"""
Lists the deployment configurations within the AWS user
account.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployment configurations call, which can be used to return
the next set of deployment configurations in the list.
"""
params = {}
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListDeploymentConfigs',
body=json.dumps(params))
def list_deployment_groups(self, application_name, next_token=None):
"""
Lists the deployment groups for an application registered
within the AWS user account.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployment groups call, which can be used to return the next
set of deployment groups in the list.
"""
params = {'applicationName': application_name, }
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListDeploymentGroups',
body=json.dumps(params))
def list_deployment_instances(self, deployment_id, next_token=None,
instance_status_filter=None):
"""
Lists the Amazon EC2 instances for a deployment within the AWS
user account.
:type deployment_id: string
:param deployment_id: The unique ID of a deployment.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployment instances call, which can be used to return the
next set of deployment instances in the list.
:type instance_status_filter: list
:param instance_status_filter:
A subset of instances to list, by status:
+ Pending: Include in the resulting list those instances with pending
deployments.
+ InProgress: Include in the resulting list those instances with in-
progress deployments.
+ Succeeded: Include in the resulting list those instances with
succeeded deployments.
+ Failed: Include in the resulting list those instances with failed
deployments.
+ Skipped: Include in the resulting list those instances with skipped
deployments.
+ Unknown: Include in the resulting list those instances with
deployments in an unknown state.
"""
params = {'deploymentId': deployment_id, }
if next_token is not None:
params['nextToken'] = next_token
if instance_status_filter is not None:
params['instanceStatusFilter'] = instance_status_filter
return self.make_request(action='ListDeploymentInstances',
body=json.dumps(params))
def list_deployments(self, application_name=None,
deployment_group_name=None,
include_only_statuses=None, create_time_range=None,
next_token=None):
"""
Lists the deployments under a deployment group for an
application registered within the AWS user account.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type deployment_group_name: string
:param deployment_group_name: The name of an existing deployment group
for the specified application.
:type include_only_statuses: list
:param include_only_statuses: A subset of deployments to list, by
status:
+ Created: Include in the resulting list created deployments.
+ Queued: Include in the resulting list queued deployments.
+ In Progress: Include in the resulting list in-progress deployments.
+ Succeeded: Include in the resulting list succeeded deployments.
+ Failed: Include in the resulting list failed deployments.
+ Aborted: Include in the resulting list aborted deployments.
:type create_time_range: dict
:param create_time_range: A deployment creation start- and end-time
range for returning a subset of the list of deployments.
:type next_token: string
:param next_token: An identifier that was returned from the previous
list deployments call, which can be used to return the next set of
deployments in the list.
"""
params = {}
if application_name is not None:
params['applicationName'] = application_name
if deployment_group_name is not None:
params['deploymentGroupName'] = deployment_group_name
if include_only_statuses is not None:
params['includeOnlyStatuses'] = include_only_statuses
if create_time_range is not None:
params['createTimeRange'] = create_time_range
if next_token is not None:
params['nextToken'] = next_token
return self.make_request(action='ListDeployments',
body=json.dumps(params))
def register_application_revision(self, application_name, revision,
description=None):
"""
Registers with AWS CodeDeploy a revision for the specified
application.
:type application_name: string
:param application_name: The name of an existing AWS CodeDeploy
application within the AWS user account.
:type description: string
:param description: A comment about the revision.
:type revision: dict
:param revision: Information about the application revision to
register, including the revision's type and its location.
"""
params = {
'applicationName': application_name,
'revision': revision,
}
if description is not None:
params['description'] = description
return self.make_request(action='RegisterApplicationRevision',
body=json.dumps(params))
def stop_deployment(self, deployment_id):
"""
Attempts to stop an ongoing deployment.
:type deployment_id: string
:param deployment_id: The unique ID of a deployment.
"""
params = {'deploymentId': deployment_id, }
return self.make_request(action='StopDeployment',
body=json.dumps(params))
def update_application(self, application_name=None,
new_application_name=None):
"""
Changes an existing application's name.
:type application_name: string
:param application_name: The current name of the application that you
want to change.
:type new_application_name: string
:param new_application_name: The new name that you want to change the
application to.
"""
params = {}
if application_name is not None:
params['applicationName'] = application_name
if new_application_name is not None:
params['newApplicationName'] = new_application_name
return self.make_request(action='UpdateApplication',
body=json.dumps(params))
def update_deployment_group(self, application_name,
current_deployment_group_name,
new_deployment_group_name=None,
deployment_config_name=None,
ec_2_tag_filters=None,
auto_scaling_groups=None,
service_role_arn=None):
"""
Changes information about an existing deployment group.
:type application_name: string
:param application_name: The application name corresponding to the
deployment group to update.
:type current_deployment_group_name: string
:param current_deployment_group_name: The current name of the existing
deployment group.
:type new_deployment_group_name: string
:param new_deployment_group_name: The new name of the deployment group,
if you want to change it.
:type deployment_config_name: string
:param deployment_config_name: The replacement deployment configuration
name to use, if you want to change it.
:type ec_2_tag_filters: list
:param ec_2_tag_filters: The replacement set of Amazon EC2 tags to
filter on, if you want to change them.
:type auto_scaling_groups: list
:param auto_scaling_groups: The replacement list of Auto Scaling groups
to be included in the deployment group, if you want to change them.
:type service_role_arn: string
:param service_role_arn: A replacement service role's ARN, if you want
to change it.
"""
params = {
'applicationName': application_name,
'currentDeploymentGroupName': current_deployment_group_name,
}
if new_deployment_group_name is not None:
params['newDeploymentGroupName'] = new_deployment_group_name
if deployment_config_name is not None:
params['deploymentConfigName'] = deployment_config_name
if ec_2_tag_filters is not None:
params['ec2TagFilters'] = ec_2_tag_filters
if auto_scaling_groups is not None:
params['autoScalingGroups'] = auto_scaling_groups
if service_role_arn is not None:
params['serviceRoleArn'] = service_role_arn
return self.make_request(action='UpdateDeploymentGroup',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
Bauble/bauble.api | bauble/routes/auth.py | 1 | 4072 | """
All routes in Bauble use HTTP basic auth.
"""
from datetime import datetime, timedelta
import os
import smtplib
import bottle
from bottle import request
import sqlalchemy as sa
import bauble
import bauble.config as config
import bauble.db as db
import bauble.email as email
from bauble import app, API_ROOT
from bauble.middleware import basic_auth
from bauble.model import User
from bauble.utils import create_unique_token
def create_access_token():
return create_unique_token(), datetime.now() + timedelta(weeks=2)
def create_password_reset_token():
return create_unique_token(), datetime.now() + timedelta(days=1)
@app.get(API_ROOT + "/login")
def login():
auth = request.auth
if not auth:
bottle.abort(401, "No Authorization header.")
username, password = auth
session = db.Session()
try:
user = session.query(User).filter(sa.func.lower(User.email) == username.lower()).first()
if not user or not user.password == password:
bottle.abort(401) # not authorized
user.access_token, user.access_token_expiration = create_access_token()
user.last_accessed = datetime.now()
session.commit()
user_json = user.json()
finally:
session.close()
return user_json
@app.get(API_ROOT + "/logout")
@basic_auth
def logout():
request.user.access_token = None
request.user.access_token_expiration = None
request.session.commit()
@app.post(API_ROOT + "/forgot-password")
def forgot_password():
user_email = request.params.get('email', None)
if not user_email and not user_email.contains('@'):
bottle.abort("Valid email address required")
session = None
try:
session = db.Session()
user = session.query(User)\
.filter(sa.func.lower(User.email) == user_email.lower())\
.first()
if not user:
bottle.abort(422, "Could not get a user with the requested email address")
token, expiration = create_password_reset_token()
user.password_reset_token = token
user.password_reset_token_expiration = expiration
session.commit()
finally:
if session:
session.close()
app_url = config.get("BAUBLE_APP_URL")
mappings = {'token': token, 'email': user_email, 'app_url': app_url}
try:
email.send_template('reset_password.txt', mappings, **{
'to': user_email,
'from': '[email protected]',
'subject': 'Bauble Password Reset'})
except smtplib.SMTPException as exc:
print(exc)
bottle.abort(500, 'Could not send reset password email.')
@app.post(API_ROOT + "/reset-password")
def reset_password():
session = None
user_email = request.json['email']
try:
session = db.Session()
user = session.query(User).filter(sa.func.lower(User.email) == user_email.lower()).first()
if user is None:
print('use is None')
# TODO: is this the correct status code?
bottle.abort(422, 'A user could be be found with the provided email')
if request.json['token'] != user.password_reset_token or \
(request.json['token'] == user.password_reset_token and user.password_reset_token_expiration < datetime.now()):
# TODO: is this the correct status code?
bottle.abort(422, 'Invalid password reset token')
# TODO: need to set the expiration
user.password_reset_token = None
user.password_reset_token_expiration = None
user.password = request.json['password']
user.access_token, user.access_token_expiration = create_access_token()
user.last_accesseed = datetime.now()
session.commit()
user_json = user.json()
# except Exception as exc:
# print('type(exc): ', type(exc))
# print(exc)
# bottle.abort(400, "Could not get a user with the requested email address")
finally:
if session:
session.close()
return user_json
| bsd-3-clause |
lepture/terminal | setup.py | 1 | 1369 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
kwargs = {}
if sys.platform == 'win32':
kwargs['install_requires'] = ['colorama']
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import terminal
from email.utils import parseaddr
author, author_email = parseaddr(terminal.__author__)
setup(
name='terminal',
version=terminal.__version__,
author=author,
author_email=author_email,
url=terminal.__homepage__,
packages=['terminal'],
description=terminal.__doc__,
long_description=open('README.rst').read(),
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
**kwargs
)
| bsd-3-clause |
masayukig/tempest | tempest/lib/services/volume/v3/backups_client.py | 2 | 5348 | # Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.volume import base_client
class BackupsClient(base_client.BaseClient):
"""Volume V3 Backups client"""
def create_backup(self, **kwargs):
"""Creates a backup of volume.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-backup
"""
post_body = json.dumps({'backup': kwargs})
resp, body = self.post('backups', post_body)
body = json.loads(body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def update_backup(self, backup_id, **kwargs):
"""Updates the specified volume backup.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#update-a-backup
"""
put_body = json.dumps({'backup': kwargs})
resp, body = self.put('backups/%s' % backup_id, put_body)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def restore_backup(self, backup_id, **kwargs):
"""Restore volume from backup.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/index.html#restore-a-backup
"""
post_body = json.dumps({'restore': kwargs})
resp, body = self.post('backups/%s/restore' % (backup_id), post_body)
body = json.loads(body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def delete_backup(self, backup_id):
"""Delete a backup of volume."""
resp, body = self.delete('backups/%s' % backup_id)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def show_backup(self, backup_id):
"""Returns the details of a single backup."""
url = "backups/%s" % backup_id
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def list_backups(self, detail=False, **params):
"""List all the tenant's backups.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-backups-for-project
https://docs.openstack.org/api-ref/block-storage/v3/index.html#list-backups-with-detail
"""
url = "backups"
if detail:
url += "/detail"
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def export_backup(self, backup_id):
"""Export backup metadata record."""
url = "backups/%s/export_record" % backup_id
resp, body = self.get(url)
body = json.loads(body)
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
def import_backup(self, **kwargs):
"""Import backup metadata record.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/index.html#import-a-backup
"""
post_body = json.dumps({'backup-record': kwargs})
resp, body = self.post("backups/import_record", post_body)
body = json.loads(body)
self.expected_success(201, resp.status)
return rest_client.ResponseBody(resp, body)
def reset_backup_status(self, backup_id, status):
"""Reset the specified backup's status."""
post_body = json.dumps({'os-reset_status': {"status": status}})
resp, body = self.post('backups/%s/action' % backup_id, post_body)
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
def is_resource_deleted(self, id):
try:
self.show_backup(id)
except lib_exc.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'backup'
| apache-2.0 |
AllenDowney/SoftwareSystems | hw04/wave3/thinkdsp.py | 23 | 31996 | """This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2013 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import array
import math
import numpy
import random
import scipy
import scipy.stats
import struct
import subprocess
import thinkplot
from fractions import gcd
from wave import open as open_wave
import matplotlib.pyplot as pyplot
PI2 = math.pi * 2
def random_seed(x):
"""Initialize the random and numpy.random generators.
x: int seed
"""
random.seed(x)
numpy.random.seed(x)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class WavFileWriter(object):
"""Writes wav files."""
def __init__(self, filename='sound.wav', framerate=11025):
"""Opens the file and sets parameters.
filename: string
framerate: samples per second
"""
self.filename = filename
self.framerate = framerate
self.nchannels = 1
self.sampwidth = 2
self.bits = self.sampwidth * 8
self.bound = 2**(self.bits-1) - 1
self.fmt = 'h'
self.dtype = numpy.int16
self.fp = open_wave(self.filename, 'w')
self.fp.setnchannels(self.nchannels)
self.fp.setsampwidth(self.sampwidth)
self.fp.setframerate(self.framerate)
def write(self, wave):
"""Writes a wave.
wave: Wave
"""
zs = wave.quantize(self.bound, self.dtype)
self.fp.writeframes(zs.tostring())
def close(self, duration=0):
"""Closes the file.
duration: how many seconds of silence to append
"""
if duration:
self.write(rest(duration))
self.fp.close()
def read_wave(filename='sound.wav'):
"""Reads a wave file.
filename: string
returns: Wave
"""
fp = open_wave(filename, 'r')
nchannels = fp.getnchannels()
nframes = fp.getnframes()
sampwidth = fp.getsampwidth()
framerate = fp.getframerate()
z_str = fp.readframes(nframes)
fp.close()
dtype_map = {1:numpy.int8, 2:numpy.int16}
assert sampwidth in dtype_map
ys = numpy.fromstring(z_str, dtype=dtype_map[sampwidth])
wave = Wave(ys, framerate)
return wave
def play_wave(filename='sound.wav', player='aplay'):
"""Plays a wave file.
filename: string
player: string name of executable that plays wav files
"""
cmd = '%s %s' % (player, filename)
popen = subprocess.Popen(cmd, shell=True)
popen.communicate()
class _SpectrumParent(object):
"""Contains code common to Spectrum and DCT.
"""
@property
def max_freq(self):
return self.framerate / 2.0
@property
def freq_res(self):
return self.max_freq / (len(self.fs) - 1)
def plot(self, low=0, high=None, **options):
"""Plots amplitude vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.amps[low:high], **options)
def plot_power(self, low=0, high=None, **options):
"""Plots power vs frequency.
low: int index to start at
high: int index to end at
"""
thinkplot.plot(self.fs[low:high], self.power[low:high], **options)
def estimate_slope(self):
"""Runs linear regression on log power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
x = numpy.log(self.fs[1:])
y = numpy.log(self.power[1:])
t = scipy.stats.linregress(x,y)
return t
def peaks(self):
"""Finds the highest peaks and their frequencies.
returns: sorted list of (amplitude, frequency) pairs
"""
t = zip(self.amps, self.fs)
t.sort(reverse=True)
return t
class Spectrum(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, hs, framerate):
self.hs = hs
self.framerate = framerate
n = len(hs)
self.fs = numpy.linspace(0, self.max_freq, n)
def __add__(self, other):
if other == 0:
return self
assert self.framerate == other.framerate
hs = self.hs + other.hs
return Spectrum(hs, self.framerate)
__radd__ = __add__
@property
def real(self):
"""Returns the real part of the hs (read-only property)."""
return numpy.real(self.hs)
@property
def imag(self):
"""Returns the imaginary part of the hs (read-only property)."""
return numpy.imag(self.hs)
@property
def amps(self):
"""Returns a sequence of amplitudes (read-only property)."""
return numpy.absolute(self.hs)
@property
def power(self):
"""Returns a sequence of powers (read-only property)."""
return self.amps ** 2
def low_pass(self, cutoff, factor=0):
"""Attenuate frequencies above the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] > cutoff:
self.hs[i] *= factor
def high_pass(self, cutoff, factor=0):
"""Attenuate frequencies below the cutoff.
cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if self.fs[i] < cutoff:
self.hs[i] *= factor
def band_stop(self, low_cutoff, high_cutoff, factor=0):
"""Attenuate frequencies between the cutoffs.
low_cutoff: frequency in Hz
high_cutoff: frequency in Hz
factor: what to multiply the magnitude by
"""
for i in xrange(len(self.hs)):
if low_cutoff < self.fs[i] < high_cutoff:
self.hs[i] = 0
def pink_filter(self, beta=1):
"""Apply a filter that would make white noise pink.
beta: exponent of the pink noise
"""
denom = self.fs ** (beta/2.0)
denom[0] = 1
self.hs /= denom
def angles(self, i):
"""Computes phase angles in radians.
returns: list of phase angles
"""
return numpy.angle(self.hs)
def make_integrated_spectrum(self):
"""Makes an integrated spectrum.
"""
cs = numpy.cumsum(self.power)
cs /= cs[-1]
return IntegratedSpectrum(cs, self.fs)
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = numpy.fft.irfft(self.hs)
return Wave(ys, self.framerate)
class IntegratedSpectrum(object):
"""Represents the integral of a spectrum."""
def __init__(self, cs, fs):
"""Initializes an integrated spectrum:
cs: sequence of cumulative amplitudes
fs: sequence of frequences
"""
self.cs = cs
self.fs = fs
def plot_power(self, low=0, high=None, expo=False, **options):
"""Plots the integrated spectrum.
low: int index to start at
high: int index to end at
"""
cs = self.cs[low:high]
fs = self.fs[low:high]
if expo:
cs = numpy.exp(cs)
thinkplot.Plot(fs, cs, **options)
def estimate_slope(self, low=1, high=-12000):
"""Runs linear regression on log cumulative power vs log frequency.
returns: slope, inter, r2, p, stderr
"""
#print self.fs[low:high]
#print self.cs[low:high]
x = numpy.log(self.fs[low:high])
y = numpy.log(self.cs[low:high])
t = scipy.stats.linregress(x,y)
return t
class Dct(_SpectrumParent):
"""Represents the spectrum of a signal."""
def __init__(self, amps, framerate):
self.amps = amps
self.framerate = framerate
n = len(amps)
self.fs = numpy.arange(n) / float(n) * self.max_freq
def make_wave(self):
"""Transforms to the time domain.
returns: Wave
"""
ys = scipy.fftpack.dct(self.amps, type=3) / 2
return Wave(ys, self.framerate)
class Spectrogram(object):
"""Represents the spectrum of a signal."""
def __init__(self, spec_map, seg_length, window_func=None):
"""Initialize the spectrogram.
spec_map: map from float time to Spectrum
seg_length: number of samples in each segment
window_func: function that computes the window
"""
self.spec_map = spec_map
self.seg_length = seg_length
self.window_func = window_func
def any_spectrum(self):
"""Returns an arbitrary spectrum from the spectrogram."""
return self.spec_map.itervalues().next()
@property
def time_res(self):
"""Time resolution in seconds."""
spectrum = self.any_spectrum()
return float(self.seg_length) / spectrum.framerate
@property
def freq_res(self):
"""Frequency resolution in Hz."""
return self.any_spectrum().freq_res
def times(self):
"""Sorted sequence of times.
returns: sequence of float times in seconds
"""
ts = sorted(self.spec_map.iterkeys())
return ts
def frequencies(self):
"""Sequence of frequencies.
returns: sequence of float freqencies in Hz.
"""
fs = self.any_spectrum().fs
return fs
def plot(self, low=0, high=None, **options):
"""Make a pseudocolor plot.
low: index of the lowest frequency component to plot
high: index of the highest frequency component to plot
"""
ts = self.times()
fs = self.frequencies()[low:high]
# make the array
size = len(fs), len(ts)
array = numpy.zeros(size, dtype=numpy.float)
# copy amplitude from each spectrum into a column of the array
for i, t in enumerate(ts):
spectrum = self.spec_map[t]
array[:,i] = spectrum.amps[low:high]
thinkplot.pcolor(ts, fs, array, **options)
def make_wave(self):
"""Inverts the spectrogram and returns a Wave.
returns: Wave
"""
res = []
for t, spectrum in sorted(self.spec_map.iteritems()):
wave = spectrum.make_wave()
n = len(wave)
if self.window_func:
window = 1 / self.window_func(n)
wave.window(window)
i = int(round(t * wave.framerate))
start = i - n / 2
end = start + n
res.append((start, end, wave))
starts, ends, waves = zip(*res)
low = min(starts)
high = max(ends)
ys = numpy.zeros(high-low, numpy.float)
for start, end, wave in res:
ys[start:end] = wave.ys
return Wave(ys, wave.framerate)
class Wave(object):
"""Represents a discrete-time waveform.
Note: the ys attribute is a "wave array" which is a numpy
array of floats.
"""
def __init__(self, ys, framerate, start=0):
"""Initializes the wave.
ys: wave array
framerate: samples per second
"""
self.ys = ys
self.framerate = framerate
self.start = start
def __len__(self):
return len(self.ys)
@property
def duration(self):
"""Duration (property).
returns: float duration in seconds
"""
return len(self.ys) / float(self.framerate)
def __or__(self, other):
"""Concatenates two waves.
other: Wave
returns: Wave
"""
if self.framerate != other.framerate:
raise ValueError('Wave.__or__: framerates do not agree')
ys = numpy.concatenate((self.ys, other.ys))
return Wave(ys, self.framerate)
def quantize(self, bound, dtype):
"""Maps the waveform to quanta.
bound: maximum amplitude
dtype: numpy data type or string
returns: quantized signal
"""
return quantize(self.ys, bound, dtype)
def apodize(self, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
"""
self.ys = apodize(self.ys, self.framerate, denom, duration)
def hamming(self):
"""Apply a Hamming window to the wave.
"""
self.ys *= numpy.hamming(len(self.ys))
def window(self, window):
"""Apply a window to the wave.
window: sequence of multipliers, same length as self.ys
"""
self.ys *= window
def normalize(self, amp=1.0):
"""Normalizes the signal to the given amplitude.
amp: float amplitude
"""
self.ys = normalize(self.ys, amp=amp)
def unbias(self):
"""Unbiases the signal.
"""
self.ys = unbias(self.ys)
def segment(self, start=0, duration=None):
"""Extracts a segment.
start: float start time in seconds
duration: float duration in seconds
returns: Wave
"""
i = start * self.framerate
if duration is None:
j = None
else:
j = i + duration * self.framerate
ys = self.ys[i:j]
return Wave(ys, self.framerate)
def make_spectrum(self):
"""Computes the spectrum using FFT.
returns: Spectrum
"""
hs = numpy.fft.rfft(self.ys)
return Spectrum(hs, self.framerate)
def make_dct(self):
amps = scipy.fftpack.dct(self.ys, type=2)
return Dct(amps, self.framerate)
def make_spectrogram(self, seg_length, window_func=numpy.hamming):
"""Computes the spectrogram of the wave.
seg_length: number of samples in each segment
window_func: function used to compute the window
returns: Spectrogram
"""
n = len(self.ys)
window = window_func(seg_length)
start, end, step = 0, seg_length, seg_length / 2
spec_map = {}
while end < n:
ys = self.ys[start:end] * window
hs = numpy.fft.rfft(ys)
t = (start + end) / 2.0 / self.framerate
spec_map[t] = Spectrum(hs, self.framerate)
start += step
end += step
return Spectrogram(spec_map, seg_length, window_func)
def plot(self, **options):
"""Plots the wave.
"""
n = len(self.ys)
ts = numpy.linspace(0, self.duration, n)
thinkplot.plot(ts, self.ys, **options)
def corr(self, other):
"""Correlation coefficient two waves.
other: Wave
returns: 2x2 covariance matrix
"""
mat = self.cov_mat(other)
corr = mat[0][1] / math.sqrt(mat[0][0] * mat[1][1])
return corr
def cov_mat(self, other):
"""Covariance matrix of two waves.
other: Wave
returns: 2x2 covariance matrix
"""
return numpy.cov(self.ys, other.ys)
def cov(self, other):
"""Covariance of two unbiased waves.
other: Wave
returns: float
"""
total = sum(self.ys * other.ys) / len(self.ys)
return total
def cos_cov(self, k):
"""Covariance with a cosine signal.
freq: freq of the cosine signal in Hz
returns: float covariance
"""
n = len(self.ys)
factor = math.pi * k / n
ys = [math.cos(factor * (i+0.5)) for i in range(n)]
total = 2 * sum(self.ys * ys)
return total
def cos_transform(self):
"""Discrete cosine transform.
returns: list of frequency, cov pairs
"""
n = len(self.ys)
res = []
for k in range(n):
cov = self.cos_cov(k)
res.append((k, cov))
return res
def write(self, filename='sound.wav'):
"""Write a wave file.
filename: string
"""
print 'Writing', filename
wfile = WavFileWriter(filename, self.framerate)
wfile.write(self)
wfile.close()
def play(self, filename='sound.wav'):
"""Plays a wave file.
filename: string
"""
self.write(filename)
play_wave(filename)
def unbias(ys):
"""Shifts a wave array so it has mean 0.
ys: wave array
returns: wave array
"""
return ys - ys.mean()
def normalize(ys, amp=1.0):
"""Normalizes a wave array so the maximum amplitude is +amp or -amp.
ys: wave array
amp: max amplitude (pos or neg) in result
returns: wave array
"""
high, low = abs(max(ys)), abs(min(ys))
return amp * ys / max(high, low)
def quantize(ys, bound, dtype):
"""Maps the waveform to quanta.
ys: wave array
bound: maximum amplitude
dtype: numpy data type of the result
returns: quantized signal
"""
if max(ys) > 1 or min(ys) < -1:
print 'Warning: normalizing before quantizing.'
ys = normalize(ys)
zs = (ys * bound).astype(dtype)
return zs
def apodize(ys, framerate, denom=20, duration=0.1):
"""Tapers the amplitude at the beginning and end of the signal.
Tapers either the given duration of time or the given
fraction of the total duration, whichever is less.
ys: wave array
framerate: int frames per second
denom: float fraction of the segment to taper
duration: float duration of the taper in seconds
returns: wave array
"""
# a fixed fraction of the segment
n = len(ys)
k1 = n / denom
# a fixed duration of time
k2 = int(duration * framerate)
k = min(k1, k2)
w1 = numpy.linspace(0, 1, k)
w2 = numpy.ones(n - 2*k)
w3 = numpy.linspace(1, 0, k)
window = numpy.concatenate((w1, w2, w3))
return ys * window
class Signal(object):
"""Represents a time-varying signal."""
def __add__(self, other):
"""Adds two signals.
other: Signal
returns: Signal
"""
if other == 0:
return self
return SumSignal(self, other)
__radd__ = __add__
@property
def period(self):
"""Period of the signal in seconds (property).
For non-periodic signals, use the default, 0.1 seconds
returns: float seconds
"""
return 0.1
def plot(self, framerate=11025):
"""Plots the signal.
framerate: samples per second
"""
duration = self.period * 3
wave = self.make_wave(duration, start=0, framerate=framerate)
wave.plot()
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
dt = 1.0 / framerate
ts = numpy.arange(start, duration, dt)
ys = self.evaluate(ts)
return Wave(ys, framerate=framerate, start=start)
def infer_framerate(ts):
"""Given ts, find the framerate.
Assumes that the ts are equally spaced.
ts: sequence of times in seconds
returns: frames per second
"""
dt = ts[1] - ts[0]
framerate = 1.0 / dt
return framerate
class SumSignal(Signal):
"""Represents the sum of signals."""
def __init__(self, *args):
"""Initializes the sum.
args: tuple of signals
"""
self.signals = args
@property
def period(self):
"""Period of the signal in seconds.
Note: this is not correct; it's mostly a placekeeper.
But it is correct for a harmonic sequence where all
component frequencies are multiples of the fundamental.
returns: float seconds
"""
return max(sig.period for sig in self.signals)
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return sum(sig.evaluate(ts) for sig in self.signals)
class Sinusoid(Signal):
"""Represents a sinusoidal signal."""
def __init__(self, freq=440, amp=1.0, offset=0, func=numpy.sin):
"""Initializes a sinusoidal signal.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
func: function that maps phase to amplitude
"""
self.freq = freq
self.amp = amp
self.offset = offset
self.func = func
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return 1.0 / self.freq
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
phases = PI2 * self.freq * ts + self.offset
ys = self.amp * self.func(phases)
return ys
def CosSignal(freq=440, amp=1.0, offset=0):
"""Makes a consine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.cos)
def SinSignal(freq=440, amp=1.0, offset=0):
"""Makes a sine Sinusoid.
freq: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
offset: float phase offset in radians
returns: Sinusoid object
"""
return Sinusoid(freq, amp, offset, func=numpy.sin)
class SquareSignal(Sinusoid):
"""Represents a square signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = self.amp * numpy.sign(unbias(frac))
return ys
class SawtoothSignal(Sinusoid):
"""Represents a sawtooth signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = normalize(unbias(frac), self.amp)
return ys
class ParabolicSignal(Sinusoid):
"""Represents a parabolic signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**2
ys = normalize(unbias(ys), self.amp)
return ys
class GlottalSignal(Sinusoid):
"""Represents a periodic signal that resembles a glottal signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = frac**4 * (1-frac)
ys = normalize(unbias(ys), self.amp)
return ys
class TriangleSignal(Sinusoid):
"""Represents a triangle signal."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
cycles = self.freq * ts + self.offset / PI2
frac, _ = numpy.modf(cycles)
ys = numpy.abs(frac - 0.5)
ys = normalize(unbias(ys), self.amp)
return ys
class Chirp(Signal):
"""Represents a signal with variable frequency."""
def __init__(self, start=440, end=880, amp=1.0):
"""Initializes a linear chirp.
start: float frequency in Hz
end: float frequency in Hz
amp: float amplitude, 1.0 is nominal max
"""
self.start = start
self.end = end
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
freqs = numpy.linspace(self.start, self.end, len(ts)-1)
return self._evaluate(ts, freqs)
def _evaluate(self, ts, freqs):
"""Helper function that evaluates the signal.
ts: float array of times
freqs: float array of frequencies during each interval
"""
#n = len(freqs)
#print freqs[::n/2]
dts = numpy.diff(ts)
dps = PI2 * freqs * dts
phases = numpy.cumsum(dps)
phases = numpy.insert(phases, 0, 0)
ys = self.amp * numpy.cos(phases)
return ys
class ExpoChirp(Chirp):
"""Represents a signal with varying frequency."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
start, end = math.log10(self.start), math.log10(self.end)
freqs = numpy.logspace(start, end, len(ts)-1)
return self._evaluate(ts, freqs)
class SilentSignal(Signal):
"""Represents silence."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
return numpy.zeros(len(ts))
class _Noise(Signal):
"""Represents a noise signal (abstract parent class)."""
def __init__(self, amp=1.0):
"""Initializes a white noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
@property
def period(self):
"""Period of the signal in seconds.
returns: float seconds
"""
return ValueError('Non-periodic signal.')
class UncorrelatedUniformNoise(_Noise):
"""Represents uncorrelated uniform noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.uniform(-self.amp, self.amp, len(ts))
return ys
class UncorrelatedGaussianNoise(_Noise):
"""Represents uncorrelated gaussian noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
ts: float array of times
returns: float wave array
"""
ys = numpy.random.normal(0, 1, len(ts))
ys = normalize(ys, self.amp)
return ys
class BrownianNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def evaluate(self, ts):
"""Evaluates the signal at the given times.
Computes Brownian noise by taking the cumulative sum of
a uniform random series.
ts: float array of times
returns: float wave array
"""
#dys = numpy.random.normal(0, 1, len(ts))
dys = numpy.random.uniform(-1, 1, len(ts))
#ys = numpy.cumsum(dys)
ys = scipy.integrate.cumtrapz(dys, ts)
ys = normalize(unbias(ys), self.amp)
return ys
class PinkNoise(_Noise):
"""Represents Brownian noise, aka red noise."""
def __init__(self, amp=1.0, beta=1.0):
"""Initializes a pink noise signal.
amp: float amplitude, 1.0 is nominal max
"""
self.amp = amp
self.beta = beta
def make_wave(self, duration=1, start=0, framerate=11025):
"""Makes a Wave object.
duration: float seconds
start: float seconds
framerate: int frames per second
returns: Wave
"""
signal = UncorrelatedUniformNoise()
wave = signal.make_wave(duration, start, framerate)
spectrum = wave.make_spectrum()
spectrum.pink_filter(beta=self.beta)
wave2 = spectrum.make_wave()
wave2.unbias()
wave2.normalize(self.amp)
return wave2
def rest(duration):
"""Makes a rest of the given duration.
duration: float seconds
returns: Wave
"""
signal = SilentSignal()
wave = signal.make_wave(duration)
return wave
def make_note(midi_num, duration, sig_cons=CosSignal, framerate=11025):
"""Make a MIDI note with the given duration.
midi_num: int MIDI note number
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freq = midi_to_freq(midi_num)
signal = sig_cons(freq)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def make_chord(midi_nums, duration, sig_cons=CosSignal, framerate=11025):
"""Make a chord with the given duration.
midi_nums: sequence of int MIDI note numbers
duration: float seconds
sig_cons: Signal constructor function
framerate: int frames per second
returns: Wave
"""
freqs = [midi_to_freq(num) for num in midi_nums]
signal = sum(sig_cons(freq) for freq in freqs)
wave = signal.make_wave(duration, framerate=framerate)
wave.apodize()
return wave
def midi_to_freq(midi_num):
"""Converts MIDI note number to frequency.
midi_num: int MIDI note number
returns: float frequency in Hz
"""
x = (midi_num - 69) / 12.0
freq = 440.0 * 2**x
return freq
def sin_wave(freq, duration=1, offset=0):
"""Makes a sine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = SinSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def cos_wave(freq, duration=1, offset=0):
"""Makes a cosine wave with the given parameters.
freq: float cycles per second
duration: float seconds
offset: float radians
returns: Wave
"""
signal = CosSignal(freq, offset=offset)
wave = signal.make_wave(duration)
return wave
def mag(a):
"""Computes the magnitude of a numpy array.
a: numpy array
returns: float
"""
return numpy.sqrt(numpy.dot(a, a))
def main():
cos_basis = cos_wave(440)
sin_basis = sin_wave(440)
wave = cos_wave(440, offset=math.pi/2)
cos_cov = cos_basis.cov(wave)
sin_cov = sin_basis.cov(wave)
print cos_cov, sin_cov, mag((cos_cov, sin_cov))
return
wfile = WavFileWriter()
for sig_cons in [SinSignal, TriangleSignal, SawtoothSignal,
GlottalSignal, ParabolicSignal, SquareSignal]:
print sig_cons
sig = sig_cons(440)
wave = sig.make_wave(1)
wave.apodize()
wfile.write(wave)
wfile.close()
return
signal = GlottalSignal(440)
signal.plot()
pyplot.show()
return
wfile = WavFileWriter()
for m in range(60, 0, -1):
wfile.write(make_note(m, 0.25))
wfile.close()
return
wave1 = make_note(69, 1)
wave2 = make_chord([69, 72, 76], 1)
wave = wave1 | wave2
wfile = WavFileWriter()
wfile.write(wave)
wfile.close()
return
sig1 = CosSignal(freq=440)
sig2 = CosSignal(freq=523.25)
sig3 = CosSignal(freq=660)
sig4 = CosSignal(freq=880)
sig5 = CosSignal(freq=987)
sig = sig1 + sig2 + sig3 + sig4
#wave = Wave(sig, duration=0.02)
#wave.plot()
wave = sig.make_wave(duration=1)
#wave.normalize()
wfile = WavFileWriter(wave)
wfile.write()
wfile.close()
if __name__ == '__main__':
main()
| gpl-3.0 |
magfest/mivs | mivs/models.py | 1 | 16621 | from mivs import *
def href(url):
return ('http://' + url) if url and not url.startswith('http') else url
class ReviewMixin:
@property
def video_reviews(self):
return [r for r in self.reviews if r.video_status != c.PENDING]
@property
def game_reviews(self):
return [r for r in self.reviews if r.game_status != c.PENDING]
@Session.model_mixin
class SessionMixin:
def logged_in_studio(self):
try:
return self.indie_studio(cherrypy.session['studio_id'])
except:
raise HTTPRedirect('../mivs_applications/studio')
def logged_in_judge(self):
judge = self.admin_attendee().admin_account.judge
if judge:
return judge
else:
raise HTTPRedirect('../accounts/homepage?message={}', 'You have been given judge access but not had a judge entry created for you - please contact a MIVS admin to correct this.')
def code_for(self, game):
if game.unlimited_code:
return game.unlimited_code
else:
for code in self.logged_in_judge().codes:
if code.game == game:
return code
def delete_screenshot(self, screenshot):
self.delete(screenshot)
try:
os.remove(screenshot.filepath)
except:
pass
self.commit()
def indie_judges(self):
return self.query(IndieJudge).join(IndieJudge.admin_account).join(AdminAccount.attendee).order_by(Attendee.full_name).all()
def indie_games(self):
return self.query(IndieGame).options(joinedload(IndieGame.studio), joinedload(IndieGame.reviews)).order_by('name').all()
@Session.model_mixin
class AdminAccount:
judge = relationship('IndieJudge', uselist=False, backref='admin_account')
@Session.model_mixin
class Group:
studio = relationship('IndieStudio', uselist=False, backref='group')
class IndieJudge(MagModel, ReviewMixin):
admin_id = Column(UUID, ForeignKey('admin_account.id'))
genres = Column(MultiChoice(c.MIVS_INDIE_JUDGE_GENRE_OPTS))
platforms = Column(MultiChoice(c.MIVS_INDIE_PLATFORM_OPTS))
platforms_text = Column(UnicodeText)
staff_notes = Column(UnicodeText)
codes = relationship('IndieGameCode', backref='judge')
reviews = relationship('IndieGameReview', backref='judge')
email_model_name = 'judge'
@property
def judging_complete(self):
return len(self.reviews) == len(self.game_reviews)
@property
def mivs_all_genres(self):
return c.MIVS_ALL_GENRES in self.genres_ints
@property
def attendee(self):
return self.admin_account.attendee
@property
def full_name(self):
return self.attendee.full_name
@property
def email(self):
return self.attendee.email
class IndieStudio(MagModel):
group_id = Column(UUID, ForeignKey('group.id'), nullable=True)
name = Column(UnicodeText, unique=True)
address = Column(UnicodeText)
website = Column(UnicodeText)
twitter = Column(UnicodeText)
facebook = Column(UnicodeText)
status = Column(Choice(c.MIVS_STUDIO_STATUS_OPTS), default=c.NEW, admin_only=True)
staff_notes = Column(UnicodeText, admin_only=True)
registered = Column(UTCDateTime, server_default=utcnow())
games = relationship('IndieGame', backref='studio', order_by='IndieGame.title')
developers = relationship('IndieDeveloper', backref='studio', order_by='IndieDeveloper.last_name')
email_model_name = 'studio'
@property
def confirm_deadline(self):
return sorted([g for g in self.games if g.accepted], key=lambda g: g.accepted)[0].accepted\
+ timedelta(days=c.MIVS_CONFIRM_DEADLINE)
@property
def after_confirm_deadline(self):
return self.confirm_deadline < localized_now()
@property
def website_href(self):
return href(self.website)
@property
def email(self):
return [dev.email for dev in self.developers if dev.primary_contact]
@property
def primary_contact(self):
return [dev for dev in self.developers if dev.primary_contact][0]
@property
def submitted_games(self):
return [g for g in self.games if g.submitted]
@property
def comped_badges(self):
return c.MIVS_INDIE_BADGE_COMPS * len([g for g in self.games if g.status == c.ACCEPTED])
@property
def unclaimed_badges(self):
return max(0, self.comped_badges - len([d for d in self.developers if not d.matching_attendee]))
class IndieDeveloper(MagModel):
studio_id = Column(UUID, ForeignKey('indie_studio.id'))
primary_contact = Column(Boolean, default=False) # just means they receive emails
first_name = Column(UnicodeText)
last_name = Column(UnicodeText)
email = Column(UnicodeText)
cellphone = Column(UnicodeText)
@property
def full_name(self):
return self.first_name + ' ' + self.last_name
@property
def matching_attendee(self):
return self.session.query(Attendee).filter(
func.lower(Attendee.first_name) == self.first_name.lower(),
func.lower(Attendee.last_name) == self.last_name.lower(),
func.lower(Attendee.email) == self.email.lower()
).first()
class IndieGame(MagModel, ReviewMixin):
studio_id = Column(UUID, ForeignKey('indie_studio.id'))
title = Column(UnicodeText)
brief_description = Column(UnicodeText) # 140 max
genres = Column(MultiChoice(c.MIVS_INDIE_GENRE_OPTS))
platforms = Column(MultiChoice(c.MIVS_INDIE_PLATFORM_OPTS))
platforms_text = Column(UnicodeText)
description = Column(UnicodeText) # 500 max
how_to_play = Column(UnicodeText) # 1000 max
link_to_video = Column(UnicodeText)
link_to_game = Column(UnicodeText)
password_to_game = Column(UnicodeText)
code_type = Column(Choice(c.MIVS_CODE_TYPE_OPTS), default=c.NO_CODE)
code_instructions = Column(UnicodeText)
build_status = Column(Choice(c.MIVS_BUILD_STATUS_OPTS), default=c.PRE_ALPHA)
build_notes = Column(UnicodeText) # 500 max
shown_events = Column(UnicodeText)
video_submitted = Column(Boolean, default=False)
submitted = Column(Boolean, default=False)
agreed_liability = Column(Boolean, default=False)
agreed_showtimes = Column(Boolean, default=False)
agreed_reminder1 = Column(Boolean, default=False)
agreed_reminder2 = Column(Boolean, default=False)
alumni_years = Column(MultiChoice(c.PREV_MIVS_YEAR_OPTS))
alumni_update = Column(UnicodeText)
link_to_promo_video = Column(UnicodeText)
link_to_webpage = Column(UnicodeText)
twitter = Column(UnicodeText)
facebook = Column(UnicodeText)
other_social_media = Column(UnicodeText)
tournament_at_event = Column(Boolean, default=False)
tournament_prizes = Column(UnicodeText)
has_multiplayer = Column(Boolean, default=False)
player_count = Column(UnicodeText)
multiplayer_game_length = Column(Integer, nullable=True) # Length in minutes
leaderboard_challenge = Column(Boolean, default=False)
status = Column(Choice(c.MIVS_GAME_STATUS_OPTS), default=c.NEW, admin_only=True)
judge_notes = Column(UnicodeText, admin_only=True)
registered = Column(UTCDateTime, server_default=utcnow())
waitlisted = Column(UTCDateTime, nullable=True)
accepted = Column(UTCDateTime, nullable=True)
codes = relationship('IndieGameCode', backref='game')
reviews = relationship('IndieGameReview', backref='game')
images = relationship(
'IndieGameImage', backref='game', order_by='IndieGameImage.id')
email_model_name = 'game'
@presave_adjustment
def accepted_time(self):
if self.status == c.ACCEPTED and not self.accepted:
self.accepted = datetime.now(UTC)
@presave_adjustment
def waitlisted_time(self):
if self.status == c.WAITLISTED and not self.waitlisted:
self.waitlisted = datetime.now(UTC)
@property
def email(self):
return self.studio.email
@property
def reviews_to_email(self):
return [review for review in self.reviews if review.send_to_studio]
@property
def video_href(self):
return href(self.link_to_video)
@property
def href(self):
return href(self.link_to_game)
@property
def screenshots(self):
return [img for img in self.images if img.is_screenshot]
@property
def best_screenshots(self):
return [img for img in self.images if img.is_screenshot and img.use_in_promo]
def best_screenshot_downloads(self, count=2):
all_images = reversed(sorted(
self.images,
key=lambda img: (
img.is_screenshot and img.use_in_promo,
img.is_screenshot,
img.use_in_promo)))
screenshots = []
for i, screenshot in enumerate(all_images):
if os.path.exists(screenshot.filepath):
screenshots.append(screenshot)
if len(screenshots) >= count:
break
return screenshots
def best_screenshot_download_filenames(self, count=2):
nonchars = re.compile(r'[\W]+')
best_screenshots = self.best_screenshot_downloads(count)
screenshots = []
for i, screenshot in enumerate(best_screenshots):
if os.path.exists(screenshot.filepath):
name = '_'.join([s for s in self.title.lower().split() if s])
name = nonchars.sub('', name)
filename = '{}_{}.{}'.format(name, len(screenshots) + 1, screenshot.extension.lower())
screenshots.append(filename)
if len(screenshots) >= count:
break
return screenshots + ([''] * (count - len(screenshots)))
@property
def promo_image(self):
return next(iter([img for img in self.images if not img.is_screenshot]), None)
@property
def missing_steps(self):
steps = []
if not self.link_to_game:
steps.append('You have not yet included a link to where the judges can access your game')
if self.code_type != c.NO_CODE and self.link_to_game:
if not self.codes:
steps.append('You have not yet attached any codes to this game for our judges to use')
elif not self.unlimited_code and len(self.codes) < c.MIVS_CODES_REQUIRED:
steps.append('You have not attached the {} codes you must provide for our judges'.format(c.MIVS_CODES_REQUIRED))
if not self.agreed_showtimes:
steps.append('You must agree to the showtimes detailed on the game form')
if not self.agreed_liability:
steps.append('You must check the box that agrees to our liability waiver')
return steps
@property
def video_broken(self):
for r in self.reviews:
if r.video_status == c.BAD_LINK:
return True
@property
def unlimited_code(self):
for code in self.codes:
if code.unlimited_use:
return code
@property
def video_submittable(self):
return bool(self.link_to_video)
@property
def submittable(self):
return not self.missing_steps
@property
def scores(self):
return [r.game_score for r in self.reviews if r.game_score]
@property
def score_sum(self):
return sum(self.scores, 0)
@property
def average_score(self):
return (self.score_sum / len(self.scores)) if self.scores else 0
@property
def has_issues(self):
return any(r.has_issues for r in self.reviews)
@property
def confirmed(self):
return self.status == c.ACCEPTED and self.studio and self.studio.group_id
class IndieGameImage(MagModel):
game_id = Column(UUID, ForeignKey('indie_game.id'))
filename = Column(UnicodeText)
content_type = Column(UnicodeText)
extension = Column(UnicodeText)
description = Column(UnicodeText)
use_in_promo = Column(Boolean, default=False)
is_screenshot = Column(Boolean, default=True)
@property
def url(self):
return '../mivs_applications/view_image?id={}'.format(self.id)
@property
def filepath(self):
return os.path.join(c.MIVS_GAME_IMAGE_DIR, str(self.id))
class IndieGameCode(MagModel):
game_id = Column(UUID, ForeignKey('indie_game.id'))
judge_id = Column(UUID, ForeignKey('indie_judge.id'), nullable=True)
code = Column(UnicodeText)
unlimited_use = Column(Boolean, default=False)
judge_notes = Column(UnicodeText, admin_only=True)
@property
def type_label(self):
return 'Unlimited-Use' if self.unlimited_use else 'Single-Person'
class IndieGameReview(MagModel):
game_id = Column(UUID, ForeignKey('indie_game.id'))
judge_id = Column(UUID, ForeignKey('indie_judge.id'))
video_status = Column(Choice(c.MIVS_VIDEO_REVIEW_STATUS_OPTS), default=c.PENDING)
game_status = Column(Choice(c.MIVS_GAME_REVIEW_STATUS_OPTS), default=c.PENDING)
game_content_bad = Column(Boolean, default=False)
video_score = Column(Choice(c.MIVS_VIDEO_REVIEW_OPTS), default=c.PENDING)
game_score = Column(Integer, default=0) # 0 = not reviewed, 1-10 score (10 is best)
video_review = Column(UnicodeText)
game_review = Column(UnicodeText)
developer_response = Column(UnicodeText)
staff_notes = Column(UnicodeText)
send_to_studio = Column(Boolean, default=False)
__table_args__ = (UniqueConstraint('game_id', 'judge_id', name='review_game_judge_uniq'),)
@presave_adjustment
def no_score_if_broken(self):
if self.has_video_issues:
self.video_score = c.PENDING
if self.has_game_issues:
self.game_score = 0
@property
def has_video_issues(self):
return self.video_status in c.MIVS_PROBLEM_STATUSES
@property
def has_game_issues(self):
if self.game_status != c.COULD_NOT_PLAY:
return self.game_status in c.MIVS_PROBLEM_STATUSES
@property
def has_issues(self):
return self.has_video_issues or self.has_game_issues
@on_startup
def add_applicant_restriction():
"""
We use convenience functions for our form handling, e.g. to instantiate an
attendee from an id or from form data we use the session.attendee() method.
This method runs on startup and overrides the methods which are used for the
game application forms to add a new "applicant" parameter. If truthy, this
triggers three additional behaviors:
1) We check that there is currently a logged in studio, and redirect to the
initial application form if there is not.
2) We check that the item being edited belongs to the currently-logged-in
studio and raise an exception if it does not. This check is bypassed for
new things which have not yet been saved to the database.
3) If the model is one with a "studio" relationship, we set that to the
currently-logged-in studio.
We do not perform these kinds of checks for indie judges, for two reasons:
1) We're less concerned about judges abusively editing each other's reviews.
2) There are probably some legitimate use cases for one judge to be able to
edit another's reviews, e.g. to correct typos or reset a review's status
after a link has been fixed, etc.
"""
def override_getter(method_name):
orig_getter = getattr(Session.SessionMixin, method_name)
@wraps(orig_getter)
def with_applicant(self, *args, **kwargs):
applicant = kwargs.pop('applicant', False)
instance = orig_getter(self, *args, **kwargs)
if applicant:
studio = self.logged_in_studio()
if hasattr(instance.__class__, 'game'):
assert instance.is_new or studio == instance.game.studio
else:
assert instance.is_new or studio == instance.studio
instance.studio = studio
return instance
setattr(Session.SessionMixin, method_name, with_applicant)
for name in ['indie_developer', 'indie_game', 'indie_game_code', 'indie_game_image']:
override_getter(name)
| agpl-3.0 |
SQbQxeKd3JHD8/simple_ConTeXt | scripts/log.py | 2 | 1073 | from typing import Any, Dict, List, Optional
from . import cite
from . import utilities
def parse(
text: str, script: str, opts: Dict[str, Any], timeout: float = 5,
) -> Dict[str, List[str]]:
result = cite.parse_common_luatex(
text, script, opts, input_as_stdin=True, timeout=timeout,
)
return do_format(result)
def do_format(data: Optional[dict]) -> Dict[str, List[str]]:
result = {"main": [], "errors": []} # type: Dict[str, List[str]]
if not isinstance(data, list):
return result
errors = []
for entry in data:
if not isinstance(entry, list) or not entry:
continue
class_ = entry[1]
if class_.endswith("error"):
errors.append(entry)
else:
result["main"].append(entry)
result["errors"] = utilities.deduplicate_list(errors)
return result
def compile_errors(errors: List[List[str]]) -> str:
result = ""
for err in errors:
if len(err) > 2:
result += "".join(" - line {}, {}: {}\n".format(*err))
return result
| mit |
ValFadeev/ansible-modules-core | cloud/linode/linode.py | 142 | 18004 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: linode
short_description: create / delete / stop / restart an instance in Linode Public Cloud
description:
- creates / deletes a Linode Public Cloud instance and optionally waits for it to be 'running'.
version_added: "1.3"
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'active', 'started', 'absent', 'deleted', 'stopped', 'restarted']
default: present
api_key:
description:
- Linode API key
default: null
name:
description:
- Name to give the instance (alphanumeric, dashes, underscore)
- To keep sanity on the Linode Web Console, name is prepended with LinodeID_
default: null
type: string
linode_id:
description:
- Unique ID of a linode server
aliases: lid
default: null
type: integer
plan:
description:
- plan to use for the instance (Linode plan)
default: null
type: integer
payment_term:
description:
- payment term to use for the instance (payment term in months)
default: 1
type: integer
choices: [1, 12, 24]
password:
description:
- root password to apply to a new server (auto generated if missing)
default: null
type: string
ssh_pub_key:
description:
- SSH public key applied to root user
default: null
type: string
swap:
description:
- swap size in MB
default: 512
type: integer
distribution:
description:
- distribution to use for the instance (Linode Distribution)
default: null
type: integer
datacenter:
description:
- datacenter to create an instance in (Linode Datacenter)
default: null
type: integer
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
requirements:
- "python >= 2.6"
- "linode-python"
- "pycurl"
author: "Vincent Viallet (@zbal)"
notes:
- LINODE_API_KEY env variable can be used instead
'''
EXAMPLES = '''
# Create a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
# Ensure a running server (create if missing)
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
plan: 1
datacenter: 2
distribution: 99
password: 'superSecureRootPassword'
ssh_pub_key: 'ssh-rsa qwerty'
swap: 768
wait: yes
wait_timeout: 600
state: present
# Delete a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: absent
# Stop a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: stopped
# Reboot a server
- local_action:
module: linode
api_key: 'longStringFromLinodeApi'
name: linode-test1
linode_id: 12345678
state: restarted
'''
import time
import os
try:
import pycurl
HAS_PYCURL = True
except ImportError:
HAS_PYCURL = False
try:
from linode import api as linode_api
HAS_LINODE = True
except ImportError:
HAS_LINODE = False
def randompass():
'''
Generate a long random password that comply to Linode requirements
'''
# Linode API currently requires the following:
# It must contain at least two of these four character classes:
# lower case letters - upper case letters - numbers - punctuation
# we play it safe :)
import random
import string
# as of python 2.4, this reseeds the PRNG from urandom
random.seed()
lower = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
upper = ''.join(random.choice(string.ascii_uppercase) for x in range(6))
number = ''.join(random.choice(string.digits) for x in range(6))
punct = ''.join(random.choice(string.punctuation) for x in range(6))
p = lower + upper + number + punct
return ''.join(random.sample(p, len(p)))
def getInstanceDetails(api, server):
'''
Return the details of an instance, populating IPs, etc.
'''
instance = {'id': server['LINODEID'],
'name': server['LABEL'],
'public': [],
'private': []}
# Populate with ips
for ip in api.linode_ip_list(LinodeId=server['LINODEID']):
if ip['ISPUBLIC'] and 'ipv4' not in instance:
instance['ipv4'] = ip['IPADDRESS']
instance['fqdn'] = ip['RDNS_NAME']
if ip['ISPUBLIC']:
instance['public'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
else:
instance['private'].append({'ipv4': ip['IPADDRESS'],
'fqdn': ip['RDNS_NAME'],
'ip_id': ip['IPADDRESSID']})
return instance
def linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout):
instances = []
changed = False
new_server = False
servers = []
disks = []
configs = []
jobs = []
# See if we can match an existing server details with the provided linode_id
if linode_id:
# For the moment we only consider linode_id as criteria for match
# Later we can use more (size, name, etc.) and update existing
servers = api.linode_list(LinodeId=linode_id)
# Attempt to fetch details about disks and configs only if servers are
# found with linode_id
if servers:
disks = api.linode_disk_list(LinodeId=linode_id)
configs = api.linode_config_list(LinodeId=linode_id)
# Act on the state
if state in ('active', 'present', 'started'):
# TODO: validate all the plan / distribution / datacenter are valid
# Multi step process/validation:
# - need linode_id (entity)
# - need disk_id for linode_id - create disk from distrib
# - need config_id for linode_id - create config (need kernel)
# Any create step triggers a job that need to be waited for.
if not servers:
for arg in ('name', 'plan', 'distribution', 'datacenter'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Create linode entity
new_server = True
try:
res = api.linode_create(DatacenterID=datacenter, PlanID=plan,
PaymentTerm=payment_term)
linode_id = res['LinodeID']
# Update linode Label to match name
api.linode_update(LinodeId=linode_id, Label='%s_%s' % (linode_id, name))
# Save server
servers = api.linode_list(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
if not disks:
for arg in ('name', 'linode_id', 'distribution'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Create disks (1 from distrib, 1 for SWAP)
new_server = True
try:
if not password:
# Password is required on creation, if not provided generate one
password = randompass()
if not swap:
swap = 512
# Create data disk
size = servers[0]['TOTALHD'] - swap
if ssh_pub_key:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution,
rootPass=password, rootSSHKey=ssh_pub_key,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
else:
res = api.linode_disk_createfromdistribution(
LinodeId=linode_id, DistributionID=distribution, rootPass=password,
Label='%s data disk (lid: %s)' % (name, linode_id), Size=size)
jobs.append(res['JobID'])
# Create SWAP disk
res = api.linode_disk_create(LinodeId=linode_id, Type='swap',
Label='%s swap disk (lid: %s)' % (name, linode_id),
Size=swap)
jobs.append(res['JobID'])
except Exception, e:
# TODO: destroy linode ?
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
if not configs:
for arg in ('name', 'linode_id', 'distribution'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
# Check architecture
for distrib in api.avail_distributions():
if distrib['DISTRIBUTIONID'] != distribution:
continue
arch = '32'
if distrib['IS64BIT']:
arch = '64'
break
# Get latest kernel matching arch
for kernel in api.avail_kernels():
if not kernel['LABEL'].startswith('Latest %s' % arch):
continue
kernel_id = kernel['KERNELID']
break
# Get disk list
disks_id = []
for disk in api.linode_disk_list(LinodeId=linode_id):
if disk['TYPE'] == 'ext3':
disks_id.insert(0, str(disk['DISKID']))
continue
disks_id.append(str(disk['DISKID']))
# Trick to get the 9 items in the list
while len(disks_id) < 9:
disks_id.append('')
disks_list = ','.join(disks_id)
# Create config
new_server = True
try:
api.linode_config_create(LinodeId=linode_id, KernelId=kernel_id,
Disklist=disks_list, Label='%s config' % name)
configs = api.linode_config_list(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
# Start / Ensure servers are running
for server in servers:
# Refresh server state
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# Ensure existing servers are up and running, boot if necessary
if server['STATUS'] != 1:
res = api.linode_boot(LinodeId=linode_id)
jobs.append(res['JobID'])
changed = True
# wait here until the instances are up
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time():
# refresh the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
# status:
# -2: Boot failed
# 1: Running
if server['STATUS'] in (-2, 1):
break
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = 'Timeout waiting on %s (lid: %s)' %
(server['LABEL'], server['LINODEID']))
# Get a fresh copy of the server details
server = api.linode_list(LinodeId=server['LINODEID'])[0]
if server['STATUS'] == -2:
module.fail_json(msg = '%s (lid: %s) failed to boot' %
(server['LABEL'], server['LINODEID']))
# From now on we know the task is a success
# Build instance report
instance = getInstanceDetails(api, server)
# depending on wait flag select the status
if wait:
instance['status'] = 'Running'
else:
instance['status'] = 'Starting'
# Return the root password if this is a new box and no SSH key
# has been provided
if new_server and not ssh_pub_key:
instance['password'] = password
instances.append(instance)
elif state in ('stopped'):
for arg in ('name', 'linode_id'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
if not servers:
module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
if server['STATUS'] != 2:
try:
res = api.linode_shutdown(LinodeId=linode_id)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Stopping'
changed = True
else:
instance['status'] = 'Stopped'
instances.append(instance)
elif state in ('restarted'):
for arg in ('name', 'linode_id'):
if not eval(arg):
module.fail_json(msg='%s is required for active state' % arg)
if not servers:
module.fail_json(msg = 'Server %s (lid: %s) not found' % (name, linode_id))
for server in servers:
instance = getInstanceDetails(api, server)
try:
res = api.linode_reboot(LinodeId=server['LINODEID'])
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Restarting'
changed = True
instances.append(instance)
elif state in ('absent', 'deleted'):
for server in servers:
instance = getInstanceDetails(api, server)
try:
api.linode_delete(LinodeId=server['LINODEID'], skipChecks=True)
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
instance['status'] = 'Deleting'
changed = True
instances.append(instance)
# Ease parsing if only 1 instance
if len(instances) == 1:
module.exit_json(changed=changed, instance=instances[0])
module.exit_json(changed=changed, instances=instances)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['active', 'present', 'started',
'deleted', 'absent', 'stopped',
'restarted']),
api_key = dict(),
name = dict(type='str'),
plan = dict(type='int'),
distribution = dict(type='int'),
datacenter = dict(type='int'),
linode_id = dict(type='int', aliases=['lid']),
payment_term = dict(type='int', default=1, choices=[1, 12, 24]),
password = dict(type='str'),
ssh_pub_key = dict(type='str'),
swap = dict(type='int', default=512),
wait = dict(type='bool', default=True),
wait_timeout = dict(default=300),
)
)
if not HAS_PYCURL:
module.fail_json(msg='pycurl required for this module')
if not HAS_LINODE:
module.fail_json(msg='linode-python required for this module')
state = module.params.get('state')
api_key = module.params.get('api_key')
name = module.params.get('name')
plan = module.params.get('plan')
distribution = module.params.get('distribution')
datacenter = module.params.get('datacenter')
linode_id = module.params.get('linode_id')
payment_term = module.params.get('payment_term')
password = module.params.get('password')
ssh_pub_key = module.params.get('ssh_pub_key')
swap = module.params.get('swap')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
# Setup the api_key
if not api_key:
try:
api_key = os.environ['LINODE_API_KEY']
except KeyError, e:
module.fail_json(msg = 'Unable to load %s' % e.message)
# setup the auth
try:
api = linode_api.Api(api_key)
api.test_echo()
except Exception, e:
module.fail_json(msg = '%s' % e.value[0]['ERRORMESSAGE'])
linodeServers(module, api, state, name, plan, distribution, datacenter, linode_id,
payment_term, password, ssh_pub_key, swap, wait, wait_timeout)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sjsrey/pysal_core | pysal_core/io/IOHandlers/mtx.py | 2 | 6874 | import os.path
import scipy.io as sio
from .. import FileIO
from ...weights.weights import W, WSP
from ...weights.util import full, full2W
from warnings import warn
__author__ = "Myunghwa Hwang <[email protected]>"
__all__ = ["MtxIO"]
class MtxIO(FileIO.FileIO):
"""
Opens, reads, and writes weights file objects in Matrix Market MTX format.
The Matrix Market MTX format is used to facilitate the exchange of matrix data.
In PySAL, it is being tested as a new file format for delivering
the weights information of a spatial weights matrix.
Although the MTX format supports both full and sparse matrices with different
data types, it is assumed that spatial weights files in the mtx format always
use the sparse (or coordinate) format with real data values.
For now, no additional assumption (e.g., symmetry) is made of the structure
of a weights matrix.
With the above assumptions,
the structure of a MTX file containing a spatial weights matrix
can be defined as follows:
%%MatrixMarket matrix coordinate real general <--- header 1 (constant)
% Comments starts <---
% .... | 0 or more comment lines
% Comments ends <---
M N L <--- header 2, rows, columns, entries
I1 J1 A(I1,J1) <---
... | L entry lines
IL JL A(IL,JL) <---
In the MTX foramt, the index for rows or columns starts with 1.
PySAL uses mtx io tools in scipy.
Thus, it is subject to all limits that scipy currently has.
Reengineering might be required, since scipy currently reads in
the entire entry into memory.
References
----------
MTX format specification
http://math.nist.gov/MatrixMarket/formats.html
scipy matlab io
http://docs.scipy.org/doc/scipy/reference/tutorial/io.html
"""
FORMATS = ['mtx']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
FileIO.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode + 'b')
def read(self, n=-1, sparse=False):
"""
sparse: boolean
if true, return pysal WSP object
if false, return pysal W object
"""
self._sparse = sparse
self._complain_ifclosed(self.closed)
return self._read()
def seek(self, pos):
if pos == 0:
self.file.seek(0)
self.pos = 0
def _read(self):
"""Reads MatrixMarket mtx file
Returns a pysal.weights.weights.W or pysal.weights.weights.WSP object
Examples
--------
Type 'dir(w)' at the interpreter to see what methods are supported.
Open a MatrixMarket mtx file and read it into a pysal weights object
>>> f = pysal.open(pysal.examples.get_path('wmat.mtx'),'r')
>>> w = f.read()
Get the number of observations from the header
>>> w.n
49
Get the mean number of neighbors
>>> w.mean_neighbors
4.7346938775510203
Get neighbor weights for a single observation
>>> w[1]
{2: 0.33329999999999999, 5: 0.33329999999999999, 6: 0.33329999999999999}
>>> f.close()
>>> f = pysal.open(pysal.examples.get_path('wmat.mtx'),'r')
>>> wsp = f.read(sparse=True)
Get the number of observations from the header
>>> wsp.n
49
Get row from the weights matrix. Note that the first row in the sparse
matrix (the 0th row) corresponds to ID 1 from the original mtx file
read in.
>>> print wsp.sparse[0].todense()
[[ 0. 0.3333 0. 0. 0.3333 0.3333 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 0. ]]
"""
if self.pos > 0:
raise StopIteration
mtx = sio.mmread(self.file)
ids = range(1, mtx.shape[0] + 1) # matrix market indexes start at one
wsp = WSP(mtx, ids)
if self._sparse:
w = wsp
else:
w = wsp.to_W()
self.pos += 1
return w
def write(self, obj):
"""
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
------
a MatrixMarket mtx file
write a weights object to the opened mtx file.
Examples
--------
>>> import tempfile, pysal, os
>>> testfile = pysal.open(pysal.examples.get_path('wmat.mtx'),'r')
>>> w = testfile.read()
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='.mtx')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.open(fname,'w')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created mtx file
>>> wnew = pysal.open(fname,'r').read()
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname)
Go to the beginning of the test file
>>> testfile.seek(0)
Create a sparse weights instance from the test file
>>> wsp = testfile.read(sparse=True)
Open the new file in write mode
>>> o = pysal.open(fname,'w')
Write the sparse weights object into the open file
>>> o.write(wsp)
>>> o.close()
Read in the newly created mtx file
>>> wsp_new = pysal.open(fname,'r').read(sparse=True)
Compare values from old to new
>>> wsp_new.s0 == wsp.s0
True
Clean up temporary file created for this example
>>> os.remove(fname)
"""
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W) or issubclass(type(obj), WSP):
w = obj.sparse
sio.mmwrite(self.file, w, comment='Generated by PySAL',
field='real', precision=7)
self.pos += 1
else:
raise TypeError("Expected a pysal weights object, got: %s" % (
type(obj)))
def close(self):
self.file.close()
FileIO.FileIO.close(self)
| bsd-3-clause |
sahmed95/sympy | sympy/simplify/ratsimp.py | 49 | 7542 | from __future__ import print_function, division
from sympy.core import symbols, Add, Dummy
from sympy.core.compatibility import combinations_with_replacement
from sympy.core.numbers import Rational
from sympy.polys import cancel, ComputationFailed, parallel_poly_from_expr, reduced, Poly
from sympy.polys.monomials import Monomial, monomial_div
from sympy.polys.polyerrors import PolificationFailed
from sympy.utilities.misc import debug
def ratsimp(expr):
"""
Put an expression over a common denominator, cancel and reduce.
Examples
========
>>> from sympy import ratsimp
>>> from sympy.abc import x, y
>>> ratsimp(1/x + 1/y)
(x + y)/(x*y)
"""
f, g = cancel(expr).as_numer_denom()
try:
Q, r = reduced(f, [g], field=True, expand=False)
except ComputationFailed:
return f/g
return Add(*Q) + cancel(r/g)
def ratsimpmodprime(expr, G, *gens, **args):
"""
Simplifies a rational expression ``expr`` modulo the prime ideal
generated by ``G``. ``G`` should be a Groebner basis of the
ideal.
>>> from sympy.simplify.ratsimp import ratsimpmodprime
>>> from sympy.abc import x, y
>>> eq = (x + y**5 + y)/(x - y)
>>> ratsimpmodprime(eq, [x*y**5 - x - y], x, y, order='lex')
(x**2 + x*y + x + y)/(x**2 - x*y)
If ``polynomial`` is False, the algorithm computes a rational
simplification which minimizes the sum of the total degrees of
the numerator and the denominator.
If ``polynomial`` is True, this function just brings numerator and
denominator into a canonical form. This is much faster, but has
potentially worse results.
References
==========
M. Monagan, R. Pearce, Rational Simplification Modulo a Polynomial
Ideal,
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.163.6984
(specifically, the second algorithm)
"""
from sympy import solve
quick = args.pop('quick', True)
polynomial = args.pop('polynomial', False)
debug('ratsimpmodprime', expr)
# usual preparation of polynomials:
num, denom = cancel(expr).as_numer_denom()
try:
polys, opt = parallel_poly_from_expr([num, denom] + G, *gens, **args)
except PolificationFailed:
return expr
domain = opt.domain
if domain.has_assoc_Field:
opt.domain = domain.get_field()
else:
raise DomainError(
"can't compute rational simplification over %s" % domain)
# compute only once
leading_monomials = [g.LM(opt.order) for g in polys[2:]]
tested = set()
def staircase(n):
"""
Compute all monomials with degree less than ``n`` that are
not divisible by any element of ``leading_monomials``.
"""
if n == 0:
return [1]
S = []
for mi in combinations_with_replacement(range(len(opt.gens)), n):
m = [0]*len(opt.gens)
for i in mi:
m[i] += 1
if all([monomial_div(m, lmg) is None for lmg in
leading_monomials]):
S.append(m)
return [Monomial(s).as_expr(*opt.gens) for s in S] + staircase(n - 1)
def _ratsimpmodprime(a, b, allsol, N=0, D=0):
"""
Computes a rational simplification of ``a/b`` which minimizes
the sum of the total degrees of the numerator and the denominator.
The algorithm proceeds by looking at ``a * d - b * c`` modulo
the ideal generated by ``G`` for some ``c`` and ``d`` with degree
less than ``a`` and ``b`` respectively.
The coefficients of ``c`` and ``d`` are indeterminates and thus
the coefficients of the normalform of ``a * d - b * c`` are
linear polynomials in these indeterminates.
If these linear polynomials, considered as system of
equations, have a nontrivial solution, then `\frac{a}{b}
\equiv \frac{c}{d}` modulo the ideal generated by ``G``. So,
by construction, the degree of ``c`` and ``d`` is less than
the degree of ``a`` and ``b``, so a simpler representation
has been found.
After a simpler representation has been found, the algorithm
tries to reduce the degree of the numerator and denominator
and returns the result afterwards.
As an extension, if quick=False, we look at all possible degrees such
that the total degree is less than *or equal to* the best current
solution. We retain a list of all solutions of minimal degree, and try
to find the best one at the end.
"""
c, d = a, b
steps = 0
maxdeg = a.total_degree() + b.total_degree()
if quick:
bound = maxdeg - 1
else:
bound = maxdeg
while N + D <= bound:
if (N, D) in tested:
break
tested.add((N, D))
M1 = staircase(N)
M2 = staircase(D)
debug('%s / %s: %s, %s' % (N, D, M1, M2))
Cs = symbols("c:%d" % len(M1), cls=Dummy)
Ds = symbols("d:%d" % len(M2), cls=Dummy)
ng = Cs + Ds
c_hat = Poly(
sum([Cs[i] * M1[i] for i in range(len(M1))]), opt.gens + ng)
d_hat = Poly(
sum([Ds[i] * M2[i] for i in range(len(M2))]), opt.gens + ng)
r = reduced(a * d_hat - b * c_hat, G, opt.gens + ng,
order=opt.order, polys=True)[1]
S = Poly(r, gens=opt.gens).coeffs()
sol = solve(S, Cs + Ds, particular=True, quick=True)
if sol and not all([s == 0 for s in sol.values()]):
c = c_hat.subs(sol)
d = d_hat.subs(sol)
# The "free" variables occuring before as parameters
# might still be in the substituted c, d, so set them
# to the value chosen before:
c = c.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds))))))
d = d.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds))))))
c = Poly(c, opt.gens)
d = Poly(d, opt.gens)
if d == 0:
raise ValueError('Ideal not prime?')
allsol.append((c_hat, d_hat, S, Cs + Ds))
if N + D != maxdeg:
allsol = [allsol[-1]]
break
steps += 1
N += 1
D += 1
if steps > 0:
c, d, allsol = _ratsimpmodprime(c, d, allsol, N, D - steps)
c, d, allsol = _ratsimpmodprime(c, d, allsol, N - steps, D)
return c, d, allsol
# preprocessing. this improves performance a bit when deg(num)
# and deg(denom) are large:
num = reduced(num, G, opt.gens, order=opt.order)[1]
denom = reduced(denom, G, opt.gens, order=opt.order)[1]
if polynomial:
return (num/denom).cancel()
c, d, allsol = _ratsimpmodprime(
Poly(num, opt.gens), Poly(denom, opt.gens), [])
if not quick and allsol:
debug('Looking for best minimal solution. Got: %s' % len(allsol))
newsol = []
for c_hat, d_hat, S, ng in allsol:
sol = solve(S, ng, particular=True, quick=False)
newsol.append((c_hat.subs(sol), d_hat.subs(sol)))
c, d = min(newsol, key=lambda x: len(x[0].terms()) + len(x[1].terms()))
if not domain.has_Field:
cn, c = c.clear_denoms(convert=True)
dn, d = d.clear_denoms(convert=True)
r = Rational(cn, dn)
return (c*r.q)/(d*r.p)
| bsd-3-clause |
studio666/cjdns | node_build/dependencies/libuv/build/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| gpl-3.0 |
allisony/pyspeckit | pyspeckit/spectrum/models/utils/ammonia_offset_calculation.py | 7 | 2955 | from __future__ import print_function
import re
import numpy as np
from astropy import units as u
from astropy import constants
from astroquery.splatalogue import Splatalogue, utils
# Query splatalogue, keeping all of the line strength columns
# Both Lovas and CDMS/JPL can be used
nh3 = Splatalogue.query_lines(20*u.GHz, 40*u.GHz, chemical_name=' NH3 ',
show_upper_degeneracy=True,
line_strengths=['ls1','ls2','ls3','ls4'])
numbers = {1:'one',
2:'two',
3:'three',
4:'four',
5:'five',
6:'six',
7:'seven',
8:'eight',
9:'nine',}
tbls = {}
for line in (1,2,3,4,5,6,7,8,9):
reline = re.compile('^{n}\({n}\).*-{n}'.format(n=line))
tbl = utils.minimize_table(nh3[np.array([bool(reline.search(x))
if bool(x) else False
for x in
nh3['Resolved QNs']],
dtype='bool')],
columns=['Species', 'Chemical Name', 'Resolved QNs',
'Freq-GHz', 'Meas Freq-GHz',
'Log<sub>10</sub> (A<sub>ij</sub>)',
'CDMS/JPL Intensity',
'Linelist',
'E_U (K)', 'Upper State Degeneracy'])
if len(tbl) == 0:
pass
# Select only TopModel lines from CDMS/JPL
tbls[line] = tbl[tbl['Linelist'] == 'TopModel']
for par in ('tau_wts','voff_lines','aval','freq'):
print(par)
for line in (1,2,3,4,5,6,7,8): # 9 not available
tbl = tbls[line]
degeneracyline = tbl['Upper State Degeneracy']
intensityline = 10**tbl['CDMSJPL_Intensity']
main = np.argmax(intensityline)
centerline = tbl['Freq'][main]
voff_linesline = np.array((centerline-tbl['Freq'])/centerline) * constants.c
aval = (10**tbl['log10_Aij']).sum()
weightline = intensityline/intensityline.sum()
if par == 'freq':
print("'{n}{n}': {f},".format(n=numbers[line], f=centerline))
elif par == 'voff_lines':
print("'{n}{n}': [{v}],".format(n=numbers[line],
v=", ".join(str(x)
for x in voff_linesline.to(u.km/u.s).value)))
elif par == 'tau_wts':
#print "'{n}{n}': {d},".format(n=numbers[line], d=np.array(degeneracyline))
print("'{n}{n}': [{d}],".format(n=numbers[line],
d=", ".join(str(x) for x in weightline)))
elif par == 'aval':
print("'{n}{n}': {d:e},".format(n=numbers[line], d=aval))
| mit |
dusenberrymw/systemml | src/main/python/systemml/converters.py | 8 | 12296 | #-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
__all__ = [ 'getNumCols', 'convertToMatrixBlock', 'convert_caffemodel', 'convert_lmdb_to_jpeg', 'convertToNumPyArr', 'convertToPandasDF', 'SUPPORTED_TYPES' , 'convertToLabeledDF', 'convertImageToNumPyArr', 'getDatasetMean']
import numpy as np
import pandas as pd
import os
import math
from pyspark.context import SparkContext
from scipy.sparse import coo_matrix, spmatrix, csr_matrix
from .classloader import *
SUPPORTED_TYPES = (np.ndarray, pd.DataFrame, spmatrix)
DATASET_MEAN = {'VGG_ILSVRC_19_2014':[103.939, 116.779, 123.68]}
def getNumCols(numPyArr):
if numPyArr.ndim == 1:
return 1
else:
return numPyArr.shape[1]
def get_pretty_str(key, value):
return '\t"' + key + '": ' + str(value) + ',\n'
def save_tensor_csv(tensor, file_path, shouldTranspose):
w = w.reshape(w.shape[0], -1)
if shouldTranspose:
w = w.T
np.savetxt(file_path, w, delimiter=',')
with open(file_path + '.mtd', 'w') as file:
file.write('{\n\t"data_type": "matrix",\n\t"value_type": "double",\n')
file.write(get_pretty_str('rows', w.shape[0]))
file.write(get_pretty_str('cols', w.shape[1]))
file.write(get_pretty_str('nnz', np.count_nonzero(w)))
file.write('\t"format": "csv",\n\t"description": {\n\t\t"author": "SystemML"\n\t}\n}\n')
def convert_caffemodel(sc, deploy_file, caffemodel_file, output_dir, format="binary", is_caffe_installed=False):
"""
Saves the weights and bias in the caffemodel file to output_dir in the specified format.
This method does not requires caffe to be installed.
Parameters
----------
sc: SparkContext
SparkContext
deploy_file: string
Path to the input network file
caffemodel_file: string
Path to the input caffemodel file
output_dir: string
Path to the output directory
format: string
Format of the weights and bias (can be binary, csv or text)
is_caffe_installed: bool
True if caffe is installed
"""
if is_caffe_installed:
if format != 'csv':
raise ValueError('The format ' + str(format) + ' is not supported when caffe is installed. Hint: Please specify format=csv')
import caffe
net = caffe.Net(deploy_file, caffemodel_file, caffe.TEST)
for layerName in net.params.keys():
num_parameters = len(net.params[layerName])
if num_parameters == 0:
continue
elif num_parameters == 2:
# Weights and Biases
layerType = net.layers[list(net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)
save_tensor_csv(net.params[layerName][1].data, os.path.join(output_dir, layerName + '_bias.mtx'), shouldTranspose)
elif num_parameters == 1:
# Only Weight
layerType = net.layers[list(net._layer_names).index(layerName)].type
shouldTranspose = True if layerType == 'InnerProduct' else False
save_tensor_csv(net.params[layerName][0].data, os.path.join(output_dir, layerName + '_weight.mtx'), shouldTranspose)
else:
raise ValueError('Unsupported number of parameters:' + str(num_parameters))
else:
createJavaObject(sc, 'dummy')
utilObj = sc._jvm.org.apache.sysml.api.dl.Utils()
utilObj.saveCaffeModelFile(sc._jsc, deploy_file, caffemodel_file, output_dir, format)
def convert_lmdb_to_jpeg(lmdb_img_file, output_dir):
"""
Saves the images in the lmdb file as jpeg in the output_dir. This method requires caffe to be installed along with lmdb and cv2 package.
To install cv2 package, do `pip install opencv-python`.
Parameters
----------
lmdb_img_file: string
Path to the input lmdb file
output_dir: string
Output directory for images (local filesystem)
"""
import lmdb, caffe, cv2
lmdb_cursor = lmdb.open(lmdb_file, readonly=True).begin().cursor()
datum = caffe.proto.caffe_pb2.Datum()
i = 1
for _, value in lmdb_cursor:
datum.ParseFromString(value)
data = caffe.io.datum_to_array(datum)
output_file_path = os.path.join(output_dir, 'file_' + str(i) + '.jpg')
image = np.transpose(data, (1,2,0)) # CxHxW to HxWxC in cv2
cv2.imwrite(output_file_path, image)
i = i + 1
def convertToLabeledDF(sparkSession, X, y=None):
from pyspark.ml.feature import VectorAssembler
if y is not None:
pd1 = pd.DataFrame(X)
pd2 = pd.DataFrame(y, columns=['label'])
pdf = pd.concat([pd1, pd2], axis=1)
inputColumns = ['C' + str(i) for i in pd1.columns]
outputColumns = inputColumns + ['label']
else:
pdf = pd.DataFrame(X)
inputColumns = ['C' + str(i) for i in pdf.columns]
outputColumns = inputColumns
assembler = VectorAssembler(inputCols=inputColumns, outputCol='features')
out = assembler.transform(sparkSession.createDataFrame(pdf, outputColumns))
if y is not None:
return out.select('features', 'label')
else:
return out.select('features')
def _convertSPMatrixToMB(sc, src):
src = coo_matrix(src, dtype=np.float64)
numRows = src.shape[0]
numCols = src.shape[1]
data = src.data
row = src.row.astype(np.int32)
col = src.col.astype(np.int32)
nnz = len(src.col)
buf1 = bytearray(data.tostring())
buf2 = bytearray(row.tostring())
buf3 = bytearray(col.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertSciPyCOOToMB(buf1, buf2, buf3, numRows, numCols, nnz)
def _convertDenseMatrixToMB(sc, src):
numCols = getNumCols(src)
numRows = src.shape[0]
arr = src.ravel().astype(np.float64)
buf = bytearray(arr.tostring())
createJavaObject(sc, 'dummy')
return sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertPy4JArrayToMB(buf, numRows, numCols)
def _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen):
rowIndex = int(i / numRowsPerBlock)
tmp = src[i:min(i+numRowsPerBlock, rlen),]
mb = _convertSPMatrixToMB(sc, tmp) if isinstance(src, spmatrix) else _convertDenseMatrixToMB(sc, tmp)
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.copyRowBlocks(mb, rowIndex, ret, numRowsPerBlock, rlen, clen)
return i
def convertToMatrixBlock(sc, src, maxSizeBlockInMB=8):
if not isinstance(sc, SparkContext):
raise TypeError('sc needs to be of type SparkContext')
isSparse = True if isinstance(src, spmatrix) else False
src = np.asarray(src, dtype=np.float64) if not isSparse else src
if len(src.shape) != 2:
src_type = str(type(src).__name__)
raise TypeError('Expected 2-dimensional ' + src_type + ', instead passed ' + str(len(src.shape)) + '-dimensional ' + src_type)
# Ignoring sparsity for computing numRowsPerBlock for now
numRowsPerBlock = int(math.ceil((maxSizeBlockInMB*1000000) / (src.shape[1]*8)))
multiBlockTransfer = False if numRowsPerBlock >= src.shape[0] else True
if not multiBlockTransfer:
return _convertSPMatrixToMB(sc, src) if isSparse else _convertDenseMatrixToMB(sc, src)
else:
# Since coo_matrix does not have range indexing
src = csr_matrix(src) if isSparse else src
rlen = int(src.shape[0])
clen = int(src.shape[1])
ret = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.allocateDenseOrSparse(rlen, clen, isSparse)
[ _copyRowBlock(i, sc, ret, src, numRowsPerBlock, rlen, clen) for i in range(0, src.shape[0], numRowsPerBlock) ]
sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.postProcessAfterCopying(ret)
return ret
def convertToNumPyArr(sc, mb):
if isinstance(sc, SparkContext):
numRows = mb.getNumRows()
numCols = mb.getNumColumns()
createJavaObject(sc, 'dummy')
buf = sc._jvm.org.apache.sysml.runtime.instructions.spark.utils.RDDConverterUtilsExt.convertMBtoPy4JDenseArr(mb)
return np.frombuffer(buf, count=numRows*numCols, dtype=np.float64).reshape((numRows, numCols))
else:
raise TypeError('sc needs to be of type SparkContext') # TODO: We can generalize this by creating py4j gateway ourselves
# Returns the mean of a model if defined otherwise None
def getDatasetMean(dataset_name):
"""
Parameters
----------
dataset_name: Name of the dataset used to train model. This name is artificial name based on dataset used to train the model.
Returns
-------
mean: Mean value of model if its defined in the list DATASET_MEAN else None.
"""
try:
mean = DATASET_MEAN[dataset_name.upper()]
except:
mean = None
return mean
# Example usage: convertImageToNumPyArr(im, img_shape=(3, 224, 224), add_rotated_images=True, add_mirrored_images=True)
# The above call returns a numpy array of shape (6, 50176) in NCHW format
def convertImageToNumPyArr(im, img_shape=None, add_rotated_images=False, add_mirrored_images=False,
color_mode = 'RGB', mean=None):
## Input Parameters
# color_mode: In case of VGG models which expect image data in BGR format instead of RGB for other most models,
# color_mode parameter is used to process image data in BGR format.
# mean: mean value is used to subtract from input data from every pixel value. By default value is None, so mean value not subtracted.
if img_shape is not None:
num_channels = img_shape[0]
size = (img_shape[1], img_shape[2])
else:
num_channels = 1 if im.mode == 'L' else 3
size = None
if num_channels != 1 and num_channels != 3:
raise ValueError('Expected the number of channels to be either 1 or 3')
from PIL import Image
if size is not None:
im = im.resize(size, Image.LANCZOS)
expected_mode = 'L' if num_channels == 1 else 'RGB'
if expected_mode is not im.mode:
im = im.convert(expected_mode)
def _im2NumPy(im):
if expected_mode == 'L':
return np.asarray(im.getdata()).reshape((1, -1))
else:
im = (np.array(im).astype(np.float))
# (H,W,C) -> (C,H,W)
im = im.transpose(2, 0, 1)
# RGB -> BGR
if color_mode == 'BGR':
im = im[...,::-1]
# Subtract Mean
if mean is not None:
for c in range(3):
im[:, :, c] = im[:, :, c] - mean[c]
# (C,H,W) --> (1, C*H*W)
return im.reshape((1, -1))
ret = _im2NumPy(im)
if add_rotated_images:
ret = np.vstack((ret, _im2NumPy(im.rotate(90)), _im2NumPy(im.rotate(180)), _im2NumPy(im.rotate(270)) ))
if add_mirrored_images:
ret = np.vstack((ret, _im2NumPy(im.transpose(Image.FLIP_LEFT_RIGHT)), _im2NumPy(im.transpose(Image.FLIP_TOP_BOTTOM))))
return ret
def convertToPandasDF(X):
if not isinstance(X, pd.DataFrame):
return pd.DataFrame(X, columns=['C' + str(i) for i in range(getNumCols(X))])
return X
| apache-2.0 |
jcass77/mopidy | mopidy/mpd/protocol/music_db.py | 2 | 17104 | from __future__ import absolute_import, unicode_literals
import functools
import itertools
from mopidy.models import Track
from mopidy.mpd import exceptions, protocol, translator
_SEARCH_MAPPING = {
'album': 'album',
'albumartist': 'albumartist',
'any': 'any',
'artist': 'artist',
'comment': 'comment',
'composer': 'composer',
'date': 'date',
'file': 'uri',
'filename': 'uri',
'genre': 'genre',
'performer': 'performer',
'title': 'track_name',
'track': 'track_no'}
_LIST_MAPPING = {
'album': 'album',
'albumartist': 'albumartist',
'artist': 'artist',
'composer': 'composer',
'date': 'date',
'genre': 'genre',
'performer': 'performer'}
def _query_from_mpd_search_parameters(parameters, mapping):
query = {}
parameters = list(parameters)
while parameters:
# TODO: does it matter that this is now case insensitive
field = mapping.get(parameters.pop(0).lower())
if not field:
raise exceptions.MpdArgError('incorrect arguments')
if not parameters:
raise ValueError
value = parameters.pop(0)
if value.strip():
query.setdefault(field, []).append(value)
return query
def _get_field(field, search_results):
return list(itertools.chain(*[getattr(r, field) for r in search_results]))
_get_albums = functools.partial(_get_field, 'albums')
_get_artists = functools.partial(_get_field, 'artists')
_get_tracks = functools.partial(_get_field, 'tracks')
def _album_as_track(album):
return Track(
uri=album.uri,
name='Album: ' + album.name,
artists=album.artists,
album=album,
date=album.date)
def _artist_as_track(artist):
return Track(
uri=artist.uri,
name='Artist: ' + artist.name,
artists=[artist])
@protocol.commands.add('count')
def count(context, *args):
"""
*musicpd.org, music database section:*
``count {TAG} {NEEDLE}``
Counts the number of songs and their total playtime in the db
matching ``TAG`` exactly.
*GMPC:*
- does not add quotes around the tag argument.
- use multiple tag-needle pairs to make more specific searches.
"""
try:
query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except ValueError:
raise exceptions.MpdArgError('incorrect arguments')
results = context.core.library.find_exact(**query).get()
result_tracks = _get_tracks(results)
return [
('songs', len(result_tracks)),
('playtime', sum(track.length for track in result_tracks) / 1000),
]
@protocol.commands.add('find')
def find(context, *args):
"""
*musicpd.org, music database section:*
``find {TYPE} {WHAT}``
Finds songs in the db that are exactly ``WHAT``. ``TYPE`` can be any
tag supported by MPD, or one of the two special parameters - ``file``
to search by full path (relative to database root), and ``any`` to
match against all available tags. ``WHAT`` is what to find.
*GMPC:*
- does not add quotes around the field argument.
- also uses ``find album "[ALBUM]" artist "[ARTIST]"`` to list album
tracks.
*ncmpc:*
- does not add quotes around the field argument.
- capitalizes the type argument.
*ncmpcpp:*
- also uses the search type "date".
- uses "file" instead of "filename".
"""
try:
query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except ValueError:
return
results = context.core.library.find_exact(**query).get()
result_tracks = []
if ('artist' not in query and
'albumartist' not in query and
'composer' not in query and
'performer' not in query):
result_tracks += [_artist_as_track(a) for a in _get_artists(results)]
if 'album' not in query:
result_tracks += [_album_as_track(a) for a in _get_albums(results)]
result_tracks += _get_tracks(results)
return translator.tracks_to_mpd_format(result_tracks)
@protocol.commands.add('findadd')
def findadd(context, *args):
"""
*musicpd.org, music database section:*
``findadd {TYPE} {WHAT}``
Finds songs in the db that are exactly ``WHAT`` and adds them to
current playlist. Parameters have the same meaning as for ``find``.
"""
try:
query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except ValueError:
return
results = context.core.library.find_exact(**query).get()
context.core.tracklist.add(_get_tracks(results))
@protocol.commands.add('list')
def list_(context, *args):
"""
*musicpd.org, music database section:*
``list {TYPE} [ARTIST]``
Lists all tags of the specified type. ``TYPE`` should be ``album``,
``artist``, ``albumartist``, ``date``, or ``genre``.
``ARTIST`` is an optional parameter when type is ``album``,
``date``, or ``genre``. This filters the result list by an artist.
*Clarifications:*
The musicpd.org documentation for ``list`` is far from complete. The
command also supports the following variant:
``list {TYPE} {QUERY}``
Where ``QUERY`` applies to all ``TYPE``. ``QUERY`` is one or more pairs
of a field name and a value. If the ``QUERY`` consists of more than one
pair, the pairs are AND-ed together to find the result. Examples of
valid queries and what they should return:
``list "artist" "artist" "ABBA"``
List artists where the artist name is "ABBA". Response::
Artist: ABBA
OK
``list "album" "artist" "ABBA"``
Lists albums where the artist name is "ABBA". Response::
Album: More ABBA Gold: More ABBA Hits
Album: Absolute More Christmas
Album: Gold: Greatest Hits
OK
``list "artist" "album" "Gold: Greatest Hits"``
Lists artists where the album name is "Gold: Greatest Hits".
Response::
Artist: ABBA
OK
``list "artist" "artist" "ABBA" "artist" "TLC"``
Lists artists where the artist name is "ABBA" *and* "TLC". Should
never match anything. Response::
OK
``list "date" "artist" "ABBA"``
Lists dates where artist name is "ABBA". Response::
Date:
Date: 1992
Date: 1993
OK
``list "date" "artist" "ABBA" "album" "Gold: Greatest Hits"``
Lists dates where artist name is "ABBA" and album name is "Gold:
Greatest Hits". Response::
Date: 1992
OK
``list "genre" "artist" "The Rolling Stones"``
Lists genres where artist name is "The Rolling Stones". Response::
Genre:
Genre: Rock
OK
*GMPC:*
- does not add quotes around the field argument.
*ncmpc:*
- does not add quotes around the field argument.
- capitalizes the field argument.
"""
parameters = list(args)
if not parameters:
raise exceptions.MpdArgError('incorrect arguments')
field = parameters.pop(0).lower()
if field not in _LIST_MAPPING:
raise exceptions.MpdArgError('incorrect arguments')
if len(parameters) == 1:
if field != 'album':
raise exceptions.MpdArgError('should be "Album" for 3 arguments')
return _list_album(context, {'artist': parameters})
try:
query = _query_from_mpd_search_parameters(parameters, _LIST_MAPPING)
except exceptions.MpdArgError as e:
e.message = 'not able to parse args'
raise
except ValueError:
return
if field == 'artist':
return _list_artist(context, query)
if field == 'albumartist':
return _list_albumartist(context, query)
elif field == 'album':
return _list_album(context, query)
elif field == 'composer':
return _list_composer(context, query)
elif field == 'performer':
return _list_performer(context, query)
elif field == 'date':
return _list_date(context, query)
elif field == 'genre':
return _list_genre(context, query)
def _list_artist(context, query):
artists = set()
results = context.core.library.find_exact(**query).get()
for track in _get_tracks(results):
for artist in track.artists:
if artist.name:
artists.add(('Artist', artist.name))
return artists
def _list_albumartist(context, query):
albumartists = set()
results = context.core.library.find_exact(**query).get()
for track in _get_tracks(results):
if track.album:
for artist in track.album.artists:
if artist.name:
albumartists.add(('AlbumArtist', artist.name))
return albumartists
def _list_album(context, query):
albums = set()
results = context.core.library.find_exact(**query).get()
for track in _get_tracks(results):
if track.album and track.album.name:
albums.add(('Album', track.album.name))
return albums
def _list_composer(context, query):
composers = set()
results = context.core.library.find_exact(**query).get()
for track in _get_tracks(results):
for composer in track.composers:
if composer.name:
composers.add(('Composer', composer.name))
return composers
def _list_performer(context, query):
performers = set()
results = context.core.library.find_exact(**query).get()
for track in _get_tracks(results):
for performer in track.performers:
if performer.name:
performers.add(('Performer', performer.name))
return performers
def _list_date(context, query):
dates = set()
results = context.core.library.find_exact(**query).get()
for track in _get_tracks(results):
if track.date:
dates.add(('Date', track.date))
return dates
def _list_genre(context, query):
genres = set()
results = context.core.library.find_exact(**query).get()
for track in _get_tracks(results):
if track.genre:
genres.add(('Genre', track.genre))
return genres
@protocol.commands.add('listall')
def listall(context, uri=None):
"""
*musicpd.org, music database section:*
``listall [URI]``
Lists all songs and directories in ``URI``.
"""
result = []
for path, track_ref in context.browse(uri, lookup=False):
if not track_ref:
result.append(('directory', path))
else:
result.append(('file', track_ref.uri))
if not result:
raise exceptions.MpdNoExistError('Not found')
return result
@protocol.commands.add('listallinfo')
def listallinfo(context, uri=None):
"""
*musicpd.org, music database section:*
``listallinfo [URI]``
Same as ``listall``, except it also returns metadata info in the
same format as ``lsinfo``.
"""
result = []
for path, lookup_future in context.browse(uri):
if not lookup_future:
result.append(('directory', path))
else:
for track in lookup_future.get():
result.extend(translator.track_to_mpd_format(track))
return result
@protocol.commands.add('lsinfo')
def lsinfo(context, uri=None):
"""
*musicpd.org, music database section:*
``lsinfo [URI]``
Lists the contents of the directory ``URI``.
When listing the root directory, this currently returns the list of
stored playlists. This behavior is deprecated; use
``listplaylists`` instead.
MPD returns the same result, including both playlists and the files and
directories located at the root level, for both ``lsinfo``, ``lsinfo
""``, and ``lsinfo "/"``.
"""
result = []
for path, lookup_future in context.browse(uri, recursive=False):
if not lookup_future:
result.append(('directory', path.lstrip('/')))
else:
tracks = lookup_future.get()
if tracks:
result.extend(translator.track_to_mpd_format(tracks[0]))
if uri in (None, '', '/'):
result.extend(protocol.stored_playlists.listplaylists(context))
return result
@protocol.commands.add('rescan')
def rescan(context, uri=None):
"""
*musicpd.org, music database section:*
``rescan [URI]``
Same as ``update``, but also rescans unmodified files.
"""
return {'updating_db': 0} # TODO
@protocol.commands.add('search')
def search(context, *args):
"""
*musicpd.org, music database section:*
``search {TYPE} {WHAT} [...]``
Searches for any song that contains ``WHAT``. Parameters have the same
meaning as for ``find``, except that search is not case sensitive.
*GMPC:*
- does not add quotes around the field argument.
- uses the undocumented field ``any``.
- searches for multiple words like this::
search any "foo" any "bar" any "baz"
*ncmpc:*
- does not add quotes around the field argument.
- capitalizes the field argument.
*ncmpcpp:*
- also uses the search type "date".
- uses "file" instead of "filename".
"""
try:
query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except ValueError:
return
results = context.core.library.search(**query).get()
artists = [_artist_as_track(a) for a in _get_artists(results)]
albums = [_album_as_track(a) for a in _get_albums(results)]
tracks = _get_tracks(results)
return translator.tracks_to_mpd_format(artists + albums + tracks)
@protocol.commands.add('searchadd')
def searchadd(context, *args):
"""
*musicpd.org, music database section:*
``searchadd {TYPE} {WHAT} [...]``
Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds
them to current playlist.
Parameters have the same meaning as for ``find``, except that search is
not case sensitive.
"""
try:
query = _query_from_mpd_search_parameters(args, _SEARCH_MAPPING)
except ValueError:
return
results = context.core.library.search(**query).get()
context.core.tracklist.add(_get_tracks(results))
@protocol.commands.add('searchaddpl')
def searchaddpl(context, *args):
"""
*musicpd.org, music database section:*
``searchaddpl {NAME} {TYPE} {WHAT} [...]``
Searches for any song that contains ``WHAT`` in tag ``TYPE`` and adds
them to the playlist named ``NAME``.
If a playlist by that name doesn't exist it is created.
Parameters have the same meaning as for ``find``, except that search is
not case sensitive.
"""
parameters = list(args)
if not parameters:
raise exceptions.MpdArgError('incorrect arguments')
playlist_name = parameters.pop(0)
try:
query = _query_from_mpd_search_parameters(parameters, _SEARCH_MAPPING)
except ValueError:
return
results = context.core.library.search(**query).get()
playlist = context.lookup_playlist_from_name(playlist_name)
if not playlist:
playlist = context.core.playlists.create(playlist_name).get()
tracks = list(playlist.tracks) + _get_tracks(results)
playlist = playlist.copy(tracks=tracks)
context.core.playlists.save(playlist)
@protocol.commands.add('update')
def update(context, uri=None):
"""
*musicpd.org, music database section:*
``update [URI]``
Updates the music database: find new files, remove deleted files,
update modified files.
``URI`` is a particular directory or song/file to update. If you do
not specify it, everything is updated.
Prints ``updating_db: JOBID`` where ``JOBID`` is a positive number
identifying the update job. You can read the current job id in the
``status`` response.
"""
return {'updating_db': 0} # TODO
# TODO: add at least reflection tests before adding NotImplemented version
# @protocol.commands.add('readcomments')
def readcomments(context, uri):
"""
*musicpd.org, music database section:*
``readcomments [URI]``
Read "comments" (i.e. key-value pairs) from the file specified by
"URI". This "URI" can be a path relative to the music directory or a
URL in the form "file:///foo/bar.ogg".
This command may be used to list metadata of remote files (e.g. URI
beginning with "http://" or "smb://").
The response consists of lines in the form "KEY: VALUE". Comments with
suspicious characters (e.g. newlines) are ignored silently.
The meaning of these depends on the codec, and not all decoder plugins
support it. For example, on Ogg files, this lists the Vorbis comments.
"""
pass
| apache-2.0 |
nicolasnoble/grpc | examples/python/interceptors/default_value/default_value_client_interceptor.py | 18 | 2058 | # Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interceptor that adds headers to outgoing requests."""
import grpc
class _ConcreteValue(grpc.Future):
def __init__(self, result):
self._result = result
def cancel(self):
return False
def cancelled(self):
return False
def running(self):
return False
def done(self):
return True
def result(self, timeout=None):
return self._result
def exception(self, timeout=None):
return None
def traceback(self, timeout=None):
return None
def add_done_callback(self, fn):
fn(self._result)
class DefaultValueClientInterceptor(grpc.UnaryUnaryClientInterceptor,
grpc.StreamUnaryClientInterceptor):
def __init__(self, value):
self._default = _ConcreteValue(value)
def _intercept_call(self, continuation, client_call_details,
request_or_iterator):
response = continuation(client_call_details, request_or_iterator)
return self._default if response.exception() else response
def intercept_unary_unary(self, continuation, client_call_details, request):
return self._intercept_call(continuation, client_call_details, request)
def intercept_stream_unary(self, continuation, client_call_details,
request_iterator):
return self._intercept_call(continuation, client_call_details,
request_iterator)
| apache-2.0 |
takeshineshiro/heat | heat/objects/event.py | 7 | 3074 | # Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Event object
"""
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
from heat.db import api as db_api
from heat.objects import fields as heat_fields
class Event(base.VersionedObject, base.VersionedObjectDictCompat):
fields = {
'id': fields.IntegerField(),
'stack_id': fields.StringField(),
'uuid': fields.StringField(),
'resource_action': fields.StringField(nullable=True),
'resource_status': fields.StringField(nullable=True),
'resource_name': fields.StringField(nullable=True),
'physical_resource_id': fields.StringField(nullable=True),
'resource_status_reason': fields.StringField(nullable=True),
'resource_type': fields.StringField(nullable=True),
'resource_properties': heat_fields.JsonField(nullable=True),
'created_at': fields.DateTimeField(read_only=True),
'updated_at': fields.DateTimeField(nullable=True),
}
@staticmethod
def _from_db_object(context, event, db_event):
for field in event.fields:
event[field] = db_event[field]
event._context = context
event.obj_reset_changes()
return event
@classmethod
def get_by_id(cls, context, event_id):
db_event = db_api.event_get(context, event_id)
return cls._from_db_object(context, cls(context), db_event)
@classmethod
def get_all(cls, context):
return [cls._from_db_object(context, cls(), db_event)
for db_event in db_api.event_get_all(context)]
@classmethod
def get_all_by_tenant(cls, context, **kwargs):
return [cls._from_db_object(context, cls(), db_event)
for db_event in db_api.event_get_all_by_tenant(context,
**kwargs)]
@classmethod
def get_all_by_stack(cls, context, stack_id, **kwargs):
return [cls._from_db_object(context, cls(), db_event)
for db_event in db_api.event_get_all_by_stack(context,
stack_id,
**kwargs)]
@classmethod
def count_all_by_stack(cls, context, stack_id):
return db_api.event_count_all_by_stack(context, stack_id)
@classmethod
def create(cls, context, values):
return cls._from_db_object(context, cls(),
db_api.event_create(context, values))
| apache-2.0 |
capone212/crashtec | src/crashtec/symbolsmngr/bindownloader.py | 1 | 6478 | '''
Created on 27.03.2013
HB! :)
@author: capone
'''
# TODO: Style!!! rewrite it all
import urllib
import os
import socket
import urlparse
import logging
from crashtec.db.provider import routines as dbroutines
from crashtec.db.provider import filter
from crashtec.config import symbolsmngrconfig
from crashtec.utils.exceptions import CtGeneralError
from crashtec.utils.exceptions import CtCriticalError
import unzipBinaries
import dbmodel
import definitions
_logger = logging.getLogger('symbolsmngr')
# Strips dangerous info (like credentials)
def safe_log_url(url):
return url
class Cache(object):
def __init__(self, instance_name):
self.agent_name = instance_name
# Returns appropriate directory path if specified url in cache,
# returns None otherwise.
def lookup_binary_path(self, binary_url):
d = dbmodel
f = filter.FieldFilterFactory
stripped_url = self.strip_url(binary_url)
cursor = dbroutines.select_from(d.SYMBOLS_TABLE, db_filter=(
(f(d.SYMBOLS_URL) == stripped_url) &
(f(d.SYMBOLS_AGENT_ID) == self.agent_name))
)
record = cursor.fetch_one()
if record:
return record[d.SYMBOLS_LOCAL_DIR]
# Makes new record in cache
# Throws on error.
def register_binary(self, url, binary_dirrectory):
stripped_url = self.strip_url(url)
record = dbroutines.Record()
d = dbmodel
record[d.SYMBOLS_TRANSACTION_ID] = definitions.EMPTY_TRANSACTION
record[d.SYMBOLS_URL] = stripped_url
record[d.SYMBOLS_AGENT_ID] = self.agent_name
record[d.SYMBOLS_LOCAL_DIR] = binary_dirrectory
dbroutines.create_new_record(d.SYMBOLS_TABLE, record)
# Strip's all unpersistent (like server address) info from url.
def strip_url(self, binary_url):
parsed_url = urlparse.urlparse(binary_url)
if (not parsed_url):
raise CtCriticalError("Could not parse url: %s" %
safe_log_url(binary_url))
return parsed_url.path
class StorageProvider(object):
# Returns directory where binary may be placed
# the path is unique for passed url
# and guarantied to be empty (at least for first time).
# Throws on errors.
def __init__(self, config = symbolsmngrconfig):
self.config = config
def create_place_for_binary(self, binary_url):
parsed_url = urlparse.urlparse(binary_url)
if (not parsed_url):
raise CtCriticalError("Could not parse url: %s" %
safe_log_url(binary_url))
dirrectory = os.path.normpath(self.config.BINARY_LOCAL_ROOT +
parsed_url.path)
try:
if (not os.path.exists(dirrectory)):
os.makedirs(dirrectory)
except OSError as err:
raise CtGeneralError("Error while creating directory: %s" % err)
return dirrectory
class HttpDownloader(object):
# Downloads specified url to destination folder.
# Returns downloaded file path, throws on errors.
def download_binary(self, url, dest_folder):
self.reset_state()
time_out = socket.getdefaulttimeout()
parsed_url = urlparse.urlparse(url)
file_name = os.path.join(dest_folder, os.path.basename(parsed_url.path))
try:
# TODO: make it configurable
socket.setdefaulttimeout(10)
urllib.urlretrieve(url, file_name, self.reportHook);
except Exception as exc:
raise CtGeneralError("Failed to download %s error: %s" % (url, exc))
finally:
socket.setdefaulttimeout(time_out)
return file_name
def reset_state(self):
self._percents = 0;
def reportHook(self, downloaded, blockSize, totalFileSize):
blocks_amount = totalFileSize / blockSize
if (blocks_amount == 0):
return
percents = (downloaded * 100) / blocks_amount
# report every X percent downloaded
REPORT_EACH_PERCENT = 10
percents = (percents / REPORT_EACH_PERCENT) * REPORT_EACH_PERCENT;
if (percents != self._percents):
_logger.info("Downloaded %s%%", percents)
self._percents = percents
class ZipUnpacker(object):
# Unpacks specified binary package and returns destination folder.
# Throws on errors.
def unpack(self, package_file, destination):
_logger.info("Unzipping binary %s ..." % package_file)
binary_dirrectory = unzipBinaries.unzipBinary(package_file, destination)
if (not binary_dirrectory):
raise CtGeneralError("Can't extract zip file %s" % package_file)
return binary_dirrectory
class BinaryDownloader(object):
def __init__(self, cache, storage, downloader, unpacker):
self.cache = cache
self.storage = storage
self.downloader = downloader
self.unpacker = unpacker
# Downloads binaries from url, unpack them and return
# destination directory.
def download_and_unpack(self, url):
# Lookup cache for binary first.
cached_path = self.cache.lookup_binary_path(url)
if cached_path:
_logger.info("Detected binary dir: %s", cached_path)
return cached_path
_logger.debug("Start processing binary url : %s", safe_log_url(url))
destination_folder = self.storage.create_place_for_binary(url)
package_file = self.downloader.download_binary(url, destination_folder)
unpacked_binaries_folder = self.unpacker.unpack(package_file,
destination_folder)
self.drop_package_file(package_file)
self.cache.register_binary(url, unpacked_binaries_folder)
_logger.debug("Processing binary url finished : %s", safe_log_url(url))
return unpacked_binaries_folder
# Feel free to override it in subclasses
def drop_package_file(self, package_file):
# Delete package_file file
os.remove(package_file)
def craete_default_downloader(instance_name):
return BinaryDownloader(Cache(instance_name),
StorageProvider(),
HttpDownloader(),
ZipUnpacker())
| gpl-3.0 |
kinverarity1/pyexperiment | tests/test_logger.py | 3 | 5395 | """Tests the state module of pyexperiment
Written by Peter Duerr
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import io
import logging
import tempfile
import os
import time
import re
from pyexperiment import log
from pyexperiment import Logger
from pyexperiment.utils.stdout_redirector import stdout_redirector
class TestLogger(unittest.TestCase):
"""Test basic logging
"""
def setUp(self):
"""Setup test fixture
"""
self.log_stream = io.StringIO()
Logger.CONSOLE_STREAM_HANDLER = logging.StreamHandler(self.log_stream)
log.reset_instance()
def tearDown(self):
"""Teardown test fixture
"""
Logger.CONSOLE_STREAM_HANDLER = logging.StreamHandler()
log.close()
log.reset_instance()
def test_fatal_console_logging(self):
"""Test the most basic console logging at the fatal level
"""
log.initialize(console_level=logging.INFO)
log.fatal("Test")
log.close()
# Something should be logged
self.assertNotEqual(len(self.log_stream.getvalue()), 0)
log.initialize(console_level=logging.DEBUG)
log.fatal("Test")
log.close()
# Something should be logged
self.assertNotEqual(len(self.log_stream.getvalue()), 0)
def test_info_console_logging(self):
"""Test the most basic console logging at the fatal level
"""
log.initialize(console_level=logging.FATAL)
log.info("Test")
log.close()
# Something should be logged
self.assertEqual(len(self.log_stream.getvalue()), 0)
log.initialize(console_level=logging.DEBUG)
log.info("Test")
log.close()
# Something should be logged
self.assertNotEqual(len(self.log_stream.getvalue()), 0)
def test_pre_init_logger(self):
"""Test that logging before initializing the logger works
"""
log.fatal("Test")
# Nothing should be logged yet
self.assertEqual(len(self.log_stream.getvalue()), 0)
log.initialize()
# Something should be logged here
self.assertNotEqual(len(self.log_stream.getvalue()), 0)
def test_file_logger_writes_to_file(self):
"""Test logging to file writes something to the log file
"""
with tempfile.NamedTemporaryFile() as temp:
log.initialize(filename=temp.name, no_backups=0)
log.fatal("Test")
log.close()
# Make sure file exists
self.assertTrue(os.path.isfile(temp.name))
lines = temp.readlines()
# There should be exactly one line in the file now
self.assertEqual(len(lines), 1)
def test_timing_logger_logs(self):
"""Test timing code logs a message
"""
# Nothing should be logged yet
self.assertEqual(len(self.log_stream.getvalue()), 0)
log.initialize()
# Still, nothing should be logged yet
self.assertEqual(len(self.log_stream.getvalue()), 0)
with log.timed(level=logging.FATAL):
_ = 1 + 1
log.close()
# Something should be logged
self.assertNotEqual(len(self.log_stream.getvalue()), 0)
def test_print_timings_prints(self):
"""Test timing code and printing really prints a message
"""
buf = io.StringIO()
# Nothing should be logged yet
self.assertEqual(len(self.log_stream.getvalue()), 0)
log.initialize()
# Still, nothing should be logged yet
self.assertEqual(len(self.log_stream.getvalue()), 0)
with log.timed(level=logging.FATAL):
_ = 1 + 1
with stdout_redirector(buf):
log.print_timings()
# Something should be printed
self.assertNotEqual(len(buf.getvalue()), 0)
log.close()
# Something should be logged
self.assertNotEqual(len(self.log_stream.getvalue()), 0)
def test_print_timings_correct(self):
"""Test timing is about right
"""
buf = io.StringIO()
# Nothing should be logged yet
self.assertEqual(len(self.log_stream.getvalue()), 0)
log.initialize()
# Still, nothing should be logged yet
self.assertEqual(len(self.log_stream.getvalue()), 0)
for _ in range(3):
with log.timed("Foo", level=logging.FATAL):
time.sleep(0.01)
with stdout_redirector(buf):
log.print_timings()
# Should print correct stats
self.assertRegexpMatches(buf.getvalue(), r'\'Foo\'')
self.assertRegexpMatches(buf.getvalue(), r'3 times')
self.assertRegexpMatches(buf.getvalue(), r'total = 0.03')
self.assertRegexpMatches(buf.getvalue(), r'median = 0.01')
log.close()
# Correct timings should be logged three times
self.assertRegexpMatches(self.log_stream.getvalue(), r'Foo')
self.assertEqual(len(re.findall(r'Foo',
self.log_stream.getvalue())), 3)
self.assertRegexpMatches(self.log_stream.getvalue(), r'took 0.01')
self.assertEqual(len(re.findall(r'took 0.01',
self.log_stream.getvalue())), 3)
| mit |
piiswrong/mxnet | cpp-package/scripts/lint.py | 41 | 7350 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=protected-access, unused-variable, locally-disabled, redefined-variable-type
"""Lint helper to generate lint summary of source.
Copyright by Contributors
"""
import codecs
import sys
import re
import os
import cpplint
from cpplint import _cpplint_state
from pylint import epylint
CXX_SUFFIX = set(['cc', 'c', 'cpp', 'h', 'cu', 'hpp'])
PYTHON_SUFFIX = set(['py'])
class LintHelper(object):
"""Class to help runing the lint and records summary"""
@staticmethod
def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.iteritems() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.iteritems():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass
def __init__(self):
self.project_name = None
self.cpp_header_map = {}
self.cpp_src_map = {}
self.python_map = {}
pylint_disable = ['superfluous-parens',
'too-many-instance-attributes',
'too-few-public-methods']
# setup pylint
self.pylint_opts = ['--extension-pkg-whitelist=numpy',
'--disable=' + ','.join(pylint_disable)]
self.pylint_cats = set(['error', 'warning', 'convention', 'refactor'])
# setup cpp lint
cpplint_args = ['.', '--extensions=' + (','.join(CXX_SUFFIX))]
_ = cpplint.ParseArguments(cpplint_args)
cpplint._SetFilters(','.join(['-build/c++11',
'-build/namespaces',
'-build/include',
'-build/header_guard',
'+build/include_what_you_use',
'+build/include_order']))
cpplint._SetCountingStyle('toplevel')
cpplint._line_length = 100
def process_cpp(self, path, suffix):
"""Process a cpp file."""
_cpplint_state.ResetErrorCounts()
cpplint.ProcessFile(str(path), _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
errors = _cpplint_state.errors_by_category.copy()
if suffix == 'h':
self.cpp_header_map[str(path)] = errors
else:
self.cpp_src_map[str(path)] = errors
def process_python(self, path):
"""Process a python file."""
(pylint_stdout, pylint_stderr) = epylint.py_run(
' '.join([str(path)] + self.pylint_opts), return_std=True)
emap = {}
print pylint_stderr.read()
for line in pylint_stdout:
sys.stderr.write(line)
key = line.split(':')[-1].split('(')[0].strip()
if key not in self.pylint_cats:
continue
if key not in emap:
emap[key] = 1
else:
emap[key] += 1
sys.stderr.write('\n')
self.python_map[str(path)] = emap
def print_summary(self, strm):
"""Print summary of lint."""
nerr = 0
nerr += LintHelper._print_summary_map(strm, self.cpp_header_map, 'cpp-header')
nerr += LintHelper._print_summary_map(strm, self.cpp_src_map, 'cpp-soruce')
nerr += LintHelper._print_summary_map(strm, self.python_map, 'python')
if nerr == 0:
strm.write('All passed!\n')
else:
strm.write('%d files failed lint\n' % nerr)
return nerr
# singleton helper for lint check
_HELPER = LintHelper()
def get_header_guard_dmlc(filename):
"""Get Header Guard Convention for DMLC Projects.
For headers in include, directly use the path
For headers in src, use project name plus path
Examples: with project-name = dmlc
include/dmlc/timer.h -> DMLC_TIMTER_H_
src/io/libsvm_parser.h -> DMLC_IO_LIBSVM_PARSER_H_
"""
fileinfo = cpplint.FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
inc_list = ['include', 'api', 'wrapper']
if file_path_from_root.find('src/') != -1 and _HELPER.project_name is not None:
idx = file_path_from_root.find('src/')
file_path_from_root = _HELPER.project_name + file_path_from_root[idx + 3:]
else:
for spath in inc_list:
prefix = spath + os.sep
if file_path_from_root.startswith(prefix):
file_path_from_root = re.sub('^' + prefix, '', file_path_from_root)
break
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
cpplint.GetHeaderGuardCPPVariable = get_header_guard_dmlc
def process(fname, allow_type):
"""Process a file."""
fname = str(fname)
# HACK: ignore op.h which is automatically generated
if fname.endswith('op.h'):
return
arr = fname.rsplit('.', 1)
if fname.find('#') != -1 or arr[-1] not in allow_type:
return
if arr[-1] in CXX_SUFFIX:
_HELPER.process_cpp(fname, arr[-1])
if arr[-1] in PYTHON_SUFFIX:
_HELPER.process_python(fname)
def main():
"""Main entry function."""
if len(sys.argv) < 3:
print('Usage: <project-name> <filetype> <list-of-path to traverse>')
print('\tfiletype can be python/cpp/all')
exit(-1)
_HELPER.project_name = sys.argv[1]
file_type = sys.argv[2]
allow_type = []
if file_type == 'python' or file_type == 'all':
allow_type += [x for x in PYTHON_SUFFIX]
if file_type == 'cpp' or file_type == 'all':
allow_type += [x for x in CXX_SUFFIX]
allow_type = set(allow_type)
if os.name != 'nt':
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
for path in sys.argv[3:]:
if os.path.isfile(path):
process(path, allow_type)
else:
for root, dirs, files in os.walk(path):
for name in files:
process(os.path.join(root, name), allow_type)
nerr = _HELPER.print_summary(sys.stderr)
sys.exit(nerr > 0)
if __name__ == '__main__':
main()
| apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.