repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
timmyArch/timmyPaste
|
controller/ui.py
|
1
|
1498
|
from controller.base import *
class UI(FlaskView):
def index(self):
return render_template('index.haml')
def get(self, key=None):
try:
flash=None
if key == 'new':
return render_template('new.haml')
elif key:
return self.__show(key)
except CodeNotFound:
flash="Couldn't find syntax element. Redirect back!"
return render_template('new.haml', flash=flash)
@route('/<key>/raw')
def raw(self, key):
return Response(Code.find(key).code, mimetype="text/plain")
def post(self):
try:
hide = (True,False)[bool(request.form.get('hide') == 'true')]
return redirect('/'+Code.new(request.form.get('code'), hide))
except:
return render_template('new.haml', flash="""
Error while creating
syntax code stuff. Please retry.""")
def __show(self, key):
keylist=key.split('.')
ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)]
a = Code.find(ckey[0])
try:
hcode = a.highlight('.'+ckey[1])
flash=False
except:
hcode = a.highlight('.txt')
flash="""
Sorry, but the Lexxer doesn't exist. Please enter only filename
suffix like .rb or .py
"""
return render_template('show.haml',
key=ckey[0],
flash=flash,
code=hcode)
|
mit
|
belimawr/django-filer
|
filer/utils/files.py
|
12
|
4505
|
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys
from django.utils.text import get_valid_filename as get_valid_filename_django
from django.template.defaultfilters import slugify as slugify_django
from django.http.multipartparser import ChunkIter, exhaust, \
StopFutureHandlers, SkipFile, StopUpload
from unidecode import unidecode
class UploadException(Exception):
pass
def handle_upload(request):
if not request.method == "POST":
raise UploadException("AJAX request not valid: must be POST")
if request.is_ajax():
# the file is stored raw in the request
is_raw = True
filename = request.GET.get('qqfile', False) or request.GET.get('filename', False) or ''
try:
content_length = int(request.META['CONTENT_LENGTH'])
except (IndexError, TypeError, ValueError):
content_length = None
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise UploadException("Invalid content length: %r" % content_length)
upload_handlers = request.upload_handlers
for handler in upload_handlers:
handler.handle_raw_input(request,
request.META,
content_length,
None,
None)
pass
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
chunk_size = min([2 ** 31 - 4] + possible_sizes)
stream = ChunkIter(request, chunk_size)
counters = [0] * len(upload_handlers)
try:
for handler in upload_handlers:
try:
handler.new_file(None, filename,
None, content_length, None)
except StopFutureHandlers:
break
for chunk in stream:
for i, handler in enumerate(upload_handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
# Just use up the rest of this file...
exhaust(stream)
except StopUpload as e:
if not e.connection_reset:
exhaust(request)
else:
# Make sure that the request data is all fed
exhaust(request)
# Signal that the upload has completed.
for handler in upload_handlers:
retval = handler.upload_complete()
if retval:
break
for i, handler in enumerate(upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
upload = file_obj
break
else:
if len(request.FILES) == 1:
# FILES is a dictionary in Django but Ajax Upload gives the uploaded file an
# ID based on a random number, so it cannot be guessed here in the code.
# Rather than editing Ajax Upload to pass the ID in the querystring, note that
# each upload is a separate request so FILES should only have one entry.
# Thus, we can just grab the first (and only) value in the dict.
is_raw = False
upload = list(request.FILES.values())[0]
filename = upload.name
else:
raise UploadException("AJAX request not valid: Bad Upload")
return upload, filename, is_raw
if sys.version_info < (3, ):
def slugify(string):
return slugify_django(unidecode(unicode(string)))
else:
def slugify(string):
return slugify_django(unidecode(string))
def get_valid_filename(s):
"""
like the regular get_valid_filename, but also slugifies away
umlauts and stuff.
"""
s = get_valid_filename_django(s)
filename, ext = os.path.splitext(s)
filename = slugify(filename)
ext = slugify(ext)
if ext:
return "%s.%s" % (filename, ext)
else:
return "%s" % (filename,)
|
bsd-3-clause
|
onyxfish/csvkit
|
tests/test_utilities/test_csvjoin.py
|
3
|
3634
|
#!/usr/bin/env python
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkit.utilities.csvjoin import CSVJoin, launch_new_instance
from tests.utils import CSVKitTestCase, EmptyFileTests
class TestCSVJoin(CSVKitTestCase, EmptyFileTests):
Utility = CSVJoin
default_args = ['examples/dummy.csv', '-']
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/join_a.csv', 'examples/join_b.csv']):
launch_new_instance()
def test_sequential(self):
output = self.get_output_as_io(['examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_inner(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 3)
def test_left(self):
output = self.get_output_as_io(['-c', 'a', '--left', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 5)
def test_right(self):
output = self.get_output_as_io(['-c', 'a', '--right', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_outer(self):
output = self.get_output_as_io(['-c', 'a', '--outer', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 6)
def test_left_short_columns(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a_short.csv', 'examples/join_b.csv'])
with open('examples/join_short.csv') as f:
self.assertEqual(output.readlines(), f.readlines())
def test_single(self):
self.assertRows(['examples/dummy.csv', '--no-inference'], [
['a', 'b', 'c'],
['1', '2', '3'],
])
def test_no_blanks(self):
self.assertRows(['examples/blanks.csv', 'examples/blanks.csv'], [
['a', 'b', 'c', 'd', 'e', 'f', 'a2', 'b2', 'c2', 'd2', 'e2', 'f2'],
['', '', '', '', '', '', '', '', '', '', '', ''],
])
def test_blanks(self):
self.assertRows(['--blanks', 'examples/blanks.csv', 'examples/blanks.csv'], [
['a', 'b', 'c', 'd', 'e', 'f', 'a2', 'b2', 'c2', 'd2', 'e2', 'f2'],
['', 'NA', 'N/A', 'NONE', 'NULL', '.', '', 'NA', 'N/A', 'NONE', 'NULL', '.'],
])
def test_no_header_row(self):
output = self.get_output_as_io(['-c', '1', '--no-header-row', 'examples/join_a.csv', 'examples/join_no_header_row.csv'])
self.assertEqual(len(output.readlines()), 3)
def test_no_inference(self):
self.assertRows(['--no-inference', 'examples/join_a.csv', 'examples/join_short.csv'], [
['a', 'b', 'c', 'a2', 'b2', 'c2', 'b2_2', 'c2_2'],
['1', 'b', 'c', '1', 'b', '', 'b', 'c'],
['2', 'b', 'c', '1', 'b', '', 'b', 'c'],
['3', 'b', 'c', '', '', '', '', ''],
])
def test_sniff_limit_no_limit(self):
self.assertRows(['examples/join_a.csv', 'examples/sniff_limit.csv'], [
['a', 'b', 'c', 'a2', 'b2', 'c2'],
['1', 'b', 'c', 'True', '2', '3'],
['2', 'b', 'c', '', '', ''],
['3', 'b', 'c', '', '', ''],
])
def test_sniff_limit_zero_limit(self):
self.assertRows(['--snifflimit', '0', 'examples/join_a.csv', 'examples/sniff_limit.csv'], [
['a', 'b', 'c', 'a;b;c'],
['1', 'b', 'c', '1;2;3'],
['2', 'b', 'c', ''],
['3', 'b', 'c', ''],
])
|
mit
|
benoit-pierre/plover
|
test/test_formatting.py
|
2
|
38867
|
# Copyright (c) 2013 Hesky Fisher
# See LICENSE.txt for details.
"""Unit tests for formatting.py."""
import inspect
import pytest
from plover import formatting
from plover.formatting import Case
from plover_build_utils.testing import CaptureOutput, parametrize
def action(**kwargs):
# Support using something like `text_and_word='stuff'`
# as a shortcut for `text='stuff', word='stuff'`.
for k, v in list(kwargs.items()):
if '_and_' in k:
del kwargs[k]
for k in k.split('_and_'):
kwargs[k] = v
return formatting._Action(**kwargs)
class MockTranslation:
def __init__(self, rtfcre=tuple(), english=None, formatting=None):
self.rtfcre = rtfcre
self.english = english
self.formatting = formatting
def __str__(self):
return str(self.__dict__)
def translation(**kwargs):
return MockTranslation(**kwargs)
STARTING_STROKE_TESTS = (
lambda:
(True, True, [], [translation(rtfcre=('S'), english='hello')], None,
([action(prev_attach=True, text='Hello', trailing_space=' ', word='hello')],),
[('s', 'Hello')]),
lambda:
(False, False, [], [translation(rtfcre=('S'), english='hello')], None,
([action(text_and_word='hello', trailing_space=' ')],),
[('s', ' hello')]),
lambda:
(True, False, [], [translation(rtfcre=('S'), english='hello')], None,
([action(text='Hello', word='hello', trailing_space=' ')],),
[('s', ' Hello')]),
lambda:
(False, True, [], [translation(rtfcre=('S'), english='hello')], None,
([action(text_and_word='hello', prev_attach=True, trailing_space=' ')],),
[('s', 'hello')]),
)
@parametrize(STARTING_STROKE_TESTS)
def test_starting_stroke(capitalized, attached, undo, do, prev,
expected_formats, expected_outputs):
output = CaptureOutput()
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.start_capitalized = capitalized
formatter.start_attached = attached
formatter.format(undo, do, prev)
for i, t in enumerate(do):
assert t.formatting == expected_formats[i]
assert output.instructions == expected_outputs
FORMATTER_TESTS = (
lambda:
([translation(formatting=[action(text_and_word='hello', trailing_space=' ')])],
[],
None,
(),
[('b', 6)]),
lambda:
([],
[translation(rtfcre=('S'), english='hello')],
[translation(rtfcre=('T'), english='a', formatting=[action(text_and_word='f', trailing_space=' ')])]
,
([action(text_and_word='hello', trailing_space=' ')],),
[('s', ' hello')]),
lambda:
([], [translation(rtfcre=('S'), english='hello')], None,
([action(text_and_word='hello', trailing_space=' ')],),
[('s', ' hello')]),
lambda:
([], [translation(rtfcre=('ST-T',))], None,
([action(text_and_word='ST-T', trailing_space=' ')],),
[('s', ' ST-T')]),
lambda:
([],
[translation(rtfcre=('ST-T',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ')])],
([action(text_and_word='ST-T', trailing_space=' ')],),
[('s', ' ST-T')]),
lambda:
([translation(formatting=[action(text_and_word='test', trailing_space=' ')])],
[translation(english='rest')],
[translation(formatting=[action(next_case=Case.CAP_FIRST_WORD, trailing_space=' ')])],
([action(text='Rest', word='rest', trailing_space=' ')],),
[('b', 4), ('s', 'Rest')]),
lambda:
([translation(formatting=[action(text_and_word='dare'),
action(prev_attach=True, text='ing', word='daring', prev_replace='e')])],
[translation(english='rest')],
[translation(formatting=[action(next_case=Case.CAP_FIRST_WORD,
trailing_space=' ')])],
([action(text='Rest', word='rest', trailing_space=' ')],),
[('b', 6), ('s', 'Rest')]),
lambda:
([translation(formatting=[action(text_and_word='drive', trailing_space=' ')])],
[translation(english='driving')],
None,
([action(text_and_word='driving', trailing_space=' ')],),
[('b', 1), ('s', 'ing')]),
lambda:
([translation(formatting=[action(text_and_word='drive', trailing_space=' ')])],
[translation(english='{#c}driving')],
None,
([action(combo='c'), action(text_and_word='driving', trailing_space=' ')],),
[('b', 6), ('c', 'c'), ('s', ' driving')]),
lambda:
([translation(formatting=[action(text_and_word='drive', trailing_space=' ')])],
[translation(english='{PLOVER:c}driving')],
None,
([action(command='c'), action(text_and_word='driving', trailing_space=' ')],),
[('b', 6), ('e', 'c'), ('s', ' driving')]),
lambda:
([],
[translation(english='{PloveR:CMD}')],
None,
([action(command='CMD')],),
[('e', 'CMD')]),
lambda:
([],
[translation(english='{:coMManD:Cmd}')],
None,
([action(command='Cmd')],),
[('e', 'Cmd')]),
lambda:
([],
[translation(rtfcre=('1',))],
None,
([action(text_and_word='1', trailing_space=' ', glue=True)],),
[('s', ' 1')]),
lambda:
([],
[translation(rtfcre=('1',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ')])],
([action(text_and_word='1', trailing_space=' ', glue=True)],),
[('s', ' 1')]),
lambda:
([],
[translation(rtfcre=('1',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ', glue=True)])],
([action(prev_attach=True, text='1', trailing_space=' ', word='hi1', glue=True)],),
[('s', '1')]),
lambda:
([],
[translation(rtfcre=('1-9',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ', glue=True)])],
([action(prev_attach=True, text='19', trailing_space=' ', word='hi19', glue=True)],),
[('s', '19')]),
lambda:
([],
[translation(rtfcre=('ST-PL',))],
[translation(formatting=[action(text_and_word='hi', trailing_space=' ')])],
([action(text_and_word='ST-PL', trailing_space=' ')],),
[('s', ' ST-PL')]),
lambda:
([],
[translation(rtfcre=('ST-PL',))],
None,
([action(text_and_word='ST-PL', trailing_space=' ')],),
[('s', ' ST-PL')]),
)
@parametrize(FORMATTER_TESTS)
def test_formatter(undo, do, prev, expected_formats, expected_outputs):
output = CaptureOutput()
# Add some initial blank text so
# undoing with no previous state
# does not assert.
output.text = ' ' * 128
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.format(undo, do, prev)
for i, t in enumerate(do):
assert t.formatting == expected_formats[i]
assert output.instructions == expected_outputs
def test_action():
assert action(word='test') != action(word='test', next_attach=True)
assert action(text='test') == action(text='test')
assert action(text='test', word='test').copy_state() == action(word='test')
TRANSLATION_TO_ACTIONS_TESTS = (
lambda:
('test', action(),
[action(text_and_word='test', trailing_space=' ')]),
lambda:
('{^^}', action(),
[action(prev_attach=True, text_and_word='', next_attach=True, orthography=False)]),
lambda:
('1-9', action(),
[action(text_and_word='1-9', trailing_space=' ')]),
lambda:
('32', action(),
[action(text_and_word='32', trailing_space=' ', glue=True)]),
lambda:
('', action(text_and_word='test', next_attach=True),
[action(prev_attach=True, word='test', next_attach=True)]),
lambda:
(' ', action(text_and_word='test', next_attach=True),
[action(prev_attach=True, word='test', next_attach=True)]),
lambda:
('{^} {.} hello {.} {#ALT_L(Grave)}{^ ^}', action(),
[action(prev_attach=True, text_and_word='', next_attach=True, orthography=False),
action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text='Hello', word='hello', trailing_space=' '),
action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(word='.', trailing_space=' ', combo='ALT_L(Grave)', next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text=' ', word='', next_attach=True)
]),
lambda:
('{-|}{>}{&a}{>}{&b}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(next_case=Case.LOWER_FIRST_CHAR),
action(text_and_word='a', trailing_space=' ', glue=True),
action(next_case=Case.LOWER_FIRST_CHAR, word='a', trailing_space=' ', glue=True),
action(prev_attach=True, text='b', word='ab', trailing_space=' ', glue=True),
]),
lambda:
('{-|}{>}{&a}{>}{&b}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(next_case=Case.LOWER_FIRST_CHAR),
action(text_and_word='a', trailing_space=' ', glue=True),
action(next_case=Case.LOWER_FIRST_CHAR, word='a', trailing_space=' ', glue=True),
action(prev_attach=True, text='b', word='ab', trailing_space=' ', glue=True),
]),
lambda:
('{-|} equip {^s}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(text='Equip', word='equip', trailing_space=' '),
action(prev_attach=True, text='s', trailing_space=' ', word='equips'),
]),
lambda:
('{-|} equip {^ed}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(text='Equip', word='equip', trailing_space=' '),
action(prev_attach=True, text='ped', trailing_space=' ', word='equipped'),
]),
lambda:
('{>} Equip', action(),
[action(next_case=Case.LOWER_FIRST_CHAR),
action(text='equip', word='Equip', trailing_space=' ')
]),
lambda:
('{>} equip', action(),
[action(next_case=Case.LOWER_FIRST_CHAR),
action(text_and_word='equip', trailing_space=' ')
]),
lambda:
('{<} equip', action(),
[action(next_case=Case.UPPER_FIRST_WORD),
action(text='EQUIP', word='equip', trailing_space=' ', upper_carry=True)
]),
lambda:
('{<} EQUIP', action(),
[action(next_case=Case.UPPER_FIRST_WORD),
action(text_and_word='EQUIP', trailing_space=' ', upper_carry=True)
]),
lambda:
('{<} equip {^ed}', action(),
[action(next_case=Case.UPPER_FIRST_WORD),
action(text='EQUIP', word='equip', trailing_space=' ', upper_carry=True),
action(prev_attach=True, text='PED', trailing_space=' ', word='equipped', upper_carry=True)
]),
lambda:
('equip {*-|}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='Equip', trailing_space=' ', word_and_prev_replace='equip'),
]),
lambda:
('equip {^ed} {*-|}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='ped', trailing_space=' ', word='equipped'),
action(prev_attach=True, text='Equipped', trailing_space=' ', word_and_prev_replace='equipped'),
]),
lambda:
('Equip {*>}', action(),
[action(text_and_word='Equip', trailing_space=' '),
action(prev_attach=True, text='equip', trailing_space=' ', word_and_prev_replace='Equip'),
]),
lambda:
('Equip {^ed} {*>}', action(),
[action(text_and_word='Equip', trailing_space=' '),
action(prev_attach=True, text='ped', trailing_space=' ', word='Equipped'),
action(prev_attach=True, text='equipped', trailing_space=' ', word_and_prev_replace='Equipped'),
]),
lambda:
('equip {*<}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='EQUIP', trailing_space=' ', word_and_prev_replace='equip'),
]),
lambda:
('equip {^ed} {*<}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='ped', trailing_space=' ', word='equipped'),
action(prev_attach=True, text='EQUIPPED', trailing_space=' ', word_and_prev_replace='equipped'),
]),
lambda:
('notanumber {*($c)}', action(),
[action(text_and_word='notanumber', trailing_space=' '),
action(word='notanumber', trailing_space=' '),
]),
lambda:
('0 {*($c)}', action(),
[action(text_and_word='0', trailing_space=' '),
action(prev_attach=True, text='$0', word='0', trailing_space=' ', prev_replace='0'),
]),
lambda:
('0.00 {*($c)}', action(),
[action(text_and_word='0.00', trailing_space=' '),
action(prev_attach=True, text='$0.00', word='0.00', trailing_space=' ', prev_replace='0.00'),
]),
lambda:
('1234 {*($c)}', action(),
[action(text_and_word='1234', trailing_space=' '),
action(prev_attach=True, text='$1,234', word='1,234', trailing_space=' ', prev_replace='1234'),
]),
lambda:
('1234567 {*($c)}', action(),
[action(text_and_word='1234567', trailing_space=' '),
action(prev_attach=True, text='$1,234,567', word='1,234,567', trailing_space=' ', prev_replace='1234567'),
]),
lambda:
('1234.5 {*($c)}', action(),
[action(text_and_word='1234.5', trailing_space=' '),
action(prev_attach=True, text='$1,234.50', word='1,234.50', trailing_space=' ', prev_replace='1234.5'),
]),
lambda:
('1234.56 {*($c)}', action(),
[action(text_and_word='1234.56', trailing_space=' '),
action(prev_attach=True, text='$1,234.56', word='1,234.56', trailing_space=' ', prev_replace='1234.56'),
]),
lambda:
('1234.567 {*($c)}', action(),
[action(text_and_word='1234.567', trailing_space=' '),
action(prev_attach=True, text='$1,234.57', word='1,234.57', trailing_space=' ', prev_replace='1234.567'),
]),
lambda:
('equip {^} {^ed}', action(),
[action(text_and_word='equip', trailing_space=' '),
action(prev_attach=True, text='', word='equip', next_attach=True, orthography=False),
action(prev_attach=True, text='ed', trailing_space=' ', word='equiped'),
]),
lambda:
('{prefix^} test {^ing}', action(),
[action(text_and_word='prefix', next_attach=True),
action(prev_attach=True, text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='ing', trailing_space=' ', word='testing'),
]),
lambda:
('{two prefix^} test {^ing}', action(),
[action(text='two prefix', word='prefix', next_attach=True),
action(prev_attach=True, text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='ing', trailing_space=' ', word='testing'),
]),
lambda:
('{-|}{^|~|^}', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text_and_word='|~|', next_attach=True),
]),
lambda:
('{-|}{~|\'^}cause', action(),
[action(next_case=Case.CAP_FIRST_WORD),
action(text_and_word='\'', next_attach=True, next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text='Cause', trailing_space=' ', word='cause'),
]),
lambda:
('{.}{~|\'^}cuz', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text_and_word='\'', next_attach=True, next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text='Cuz', trailing_space=' ', word='cuz'),
]),
lambda:
('{.}{~|\'^}cause', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text_and_word='\'', next_attach=True, next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text='Cause', trailing_space=' ', word='cause'),
]),
lambda:
('{.}{^~|\"}heyyo', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text_and_word='"', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text='Heyyo', trailing_space=' ', word='heyyo'),
]),
lambda:
('{.}{^~|^}zshrc', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text_and_word='', next_attach=True, next_case=Case.CAP_FIRST_WORD),
action(prev_attach=True, text='Zshrc', trailing_space=' ', word='zshrc')]),
lambda:
('{.}', action(),
[action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD)]
),
lambda:
('{,}', action(),
[action(prev_attach=True, text_and_word=',', trailing_space=' ')]
),
lambda:
('test{prefix^}', action(),
[action(text_and_word='test', trailing_space=' '),
action(text_and_word='prefix', next_attach=True),
]),
lambda:
('{prefix^}{prefix^}', action(),
[action(text_and_word='prefix', next_attach=True),
action(prev_attach=True, text='prefix', word='prefixprefix', next_attach=True),
]),
lambda:
('{prefix^}{^ing}', action(),
[action(text_and_word='prefix', next_attach=True),
action(prev_attach=True, text='ing', trailing_space=' ', word='prefixing'),
]),
lambda:
('{prefix^}cancel{^ing}', action(),
[action(text_and_word='prefix', next_attach=True),
action(prev_attach=True, text='cancel', trailing_space=' ', word='cancel'),
action(prev_attach=True, text='ing', trailing_space=' ', word='canceling'),
]),
)
@parametrize(TRANSLATION_TO_ACTIONS_TESTS)
def test_translation_to_actions(translation, last_action, expected):
ctx = formatting._Context([], action())
ctx.translated(last_action)
assert formatting._translation_to_actions(translation, ctx) == expected
RAW_TO_ACTIONS_TESTS = (
lambda:
('2-6', action(),
[action(glue=True, text_and_word='26', trailing_space=' ')]),
lambda:
('2', action(),
[action(glue=True, text_and_word='2', trailing_space=' ')]),
lambda:
('-8', action(),
[action(glue=True, text_and_word='8', trailing_space=' ')]),
lambda:
('-68', action(),
[action(glue=True, text_and_word='68', trailing_space=' ')]),
lambda:
('S-T', action(),
[action(text_and_word='S-T', trailing_space=' ')]),
)
@parametrize(RAW_TO_ACTIONS_TESTS)
def test_raw_to_actions(stroke, last_action, expected):
ctx = formatting._Context([], action())
ctx.translated(last_action)
assert formatting._raw_to_actions(stroke, ctx) == expected
ATOM_TO_ACTION_TESTS = (
lambda:
('{^ed}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='ed', trailing_space=' ', word='tested')),
lambda:
('{^ed}', action(text_and_word='carry', trailing_space=' '),
action(prev_attach=True, text='ied', trailing_space=' ', prev_replace='y', word='carried')),
lambda:
('{^er}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='er', trailing_space=' ', word='tester')),
lambda:
('{^er}', action(text_and_word='carry', trailing_space=' '),
action(prev_attach=True, text='ier', trailing_space=' ', prev_replace='y', word='carrier')),
lambda:
('{^ing}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='ing', trailing_space=' ', word='testing')),
lambda:
('{^ing}', action(text_and_word='begin', trailing_space=' '),
action(prev_attach=True, text='ning', trailing_space=' ', word='beginning')),
lambda:
('{^ing}', action(text_and_word='parade', trailing_space=' '),
action(prev_attach=True, text='ing', trailing_space=' ', prev_replace='e', word='parading')),
lambda:
('{^s}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='s', trailing_space=' ', word='tests')),
lambda:
('{,}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word=',', trailing_space=' ')),
lambda:
('{:}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word=':', trailing_space=' ')),
lambda:
('{;}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word=';', trailing_space=' ')),
lambda:
('{.}', action(prev_attach=True, word='test', trailing_space=' '),
action(prev_attach=True, text_and_word='.', trailing_space=' ', next_case=Case.CAP_FIRST_WORD)),
lambda:
('{?}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word='?', trailing_space=' ', next_case=Case.CAP_FIRST_WORD)),
lambda:
('{!}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word='!', trailing_space=' ', next_case=Case.CAP_FIRST_WORD)),
lambda:
('{-|}', action(text_and_word='test', trailing_space=' '),
action(next_case=Case.CAP_FIRST_WORD, word='test', trailing_space=' ')),
lambda:
('{>}', action(text_and_word='test', trailing_space=' '),
action(next_case=Case.LOWER_FIRST_CHAR, word='test', trailing_space=' ')),
lambda:
('{<}', action(text_and_word='test', trailing_space=' '),
action(next_case=Case.UPPER_FIRST_WORD, word='test', trailing_space=' ')),
lambda:
('{*-|}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='Test', trailing_space=' ', word_and_prev_replace='test')),
lambda:
('{*>}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text_and_word_and_prev_replace='test',
trailing_space=' ')),
lambda:
('{*<}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='TEST', word_and_prev_replace='test', trailing_space=' ')),
lambda:
('{PLOVER:test_command}', action(text_and_word='test', trailing_space=' '),
action(word='test', command='test_command', trailing_space=' ')),
lambda:
('{&glue_text}', action(text_and_word='test', trailing_space=' '),
action(text_and_word='glue_text', trailing_space=' ', glue=True)),
lambda:
('{&glue_text}', action(text_and_word='test', trailing_space=' ', glue=True),
action(prev_attach=True, text='glue_text', trailing_space=' ', word='testglue_text', glue=True)),
lambda:
('{&glue_text}', action(text_and_word='test', next_attach=True),
action(prev_attach=True, text='glue_text', trailing_space=' ', word='glue_text', glue=True)),
lambda:
('{^attach_text}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='attach_text', trailing_space=' ', word='testattach_text')),
lambda:
('{^attach_text^}', action(text_and_word='test', trailing_space=' '),
action(prev_attach=True, text='attach_text', word='testattach_text', next_attach=True)),
lambda:
('{attach_text^}', action(text_and_word='test', trailing_space=' '),
action(text_and_word='attach_text', next_attach=True)),
lambda:
('{#ALT_L(A)}', action(text_and_word='test', trailing_space=' '),
action(combo='ALT_L(A)', trailing_space=' ', word='test')),
lambda:
('text', action(text_and_word='test', trailing_space=' '),
action(text_and_word='text', trailing_space=' ')),
lambda:
('text', action(text_and_word='test', trailing_space=' ', glue=True),
action(text_and_word='text', trailing_space=' ')),
lambda:
('text', action(text_and_word='test', next_attach=True),
action(prev_attach=True, text_and_word='text', trailing_space=' ')),
lambda:
('text', action(text_and_word='test', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text='Text', trailing_space=' ', word='text')),
lambda:
('some text', action(text_and_word='test', trailing_space=' '),
action(text='some text', trailing_space=' ', word='text')),
lambda:
('some text',
action(text_and_word='test', trailing_space=' ',
case=Case.TITLE,
space_char=''),
action(text='SomeText', word='text',
case=Case.TITLE,
space_char='')),
lambda:
('some text',
action(text_and_word='test', trailing_space=' ', # This is camel case
case=Case.TITLE,
space_char='', next_case=Case.LOWER_FIRST_CHAR),
action(text='someText', word='text',
case=Case.TITLE,
space_char='')),
lambda:
('some text', action(text_and_word='test', trailing_space=' ', space_char='_'),
action(text='some_text', trailing_space='_', word='text', space_char='_')),
lambda:
('some text', action(text_and_word='test', trailing_space=' ', case=Case.UPPER),
action(text='SOME TEXT', trailing_space=' ', word='text', case=Case.UPPER)),
lambda:
('sOme TexT', action(text_and_word='test', trailing_space=' ', case=Case.LOWER),
action(text='some text', trailing_space=' ', word='TexT', case=Case.LOWER)),
lambda:
('sOme TexT', action(text_and_word='test', trailing_space=' ', case=Case.TITLE),
action(text='Some Text', trailing_space=' ', word='TexT', case=Case.TITLE)),
lambda:
('{MODE:CAPS}', action(text_and_word='test', trailing_space=' '),
action(word='test', trailing_space=' ', case=Case.UPPER)),
lambda:
('{MODE:LOWER}', action(text_and_word='test', trailing_space=' '),
action(word='test', trailing_space=' ', case=Case.LOWER)),
)
@parametrize(ATOM_TO_ACTION_TESTS)
def test_atom_to_action(atom, last_action, expected):
ctx = formatting._Context((), last_action)
ctx.translated(last_action)
assert formatting._atom_to_action(atom, ctx) == expected
CHANGE_MODE_TESTS = (
# Invalid modes.
lambda:
('', action(),
ValueError),
lambda:
('ABCD', action(),
ValueError),
# CAPS: Uppercase
lambda:
('CAPS', action(),
action(case=Case.UPPER)),
# LOWER: Lowercase
lambda:
('LOWER', action(),
action(case=Case.LOWER)),
# TITLE: Titlecase
lambda:
('TITLE', action(),
action(case=Case.TITLE)),
# CAMEL: Titlecase without space
lambda:
('CAMEL', action(),
action(case=Case.TITLE, space_char='',
next_case=Case.LOWER_FIRST_CHAR)),
# SNAKE: Underscore space
lambda:
('SNAKE', action(),
action(space_char='_')),
# RESET_SPACE: Default space
lambda:
('RESET_SPACE', action(space_char='ABCD'),
action()),
# RESET_CASE: No case
lambda:
('RESET_CASE', action(case=Case.UPPER),
action()),
# SET_SPACE:xy: Set space to xy
lambda:
('SET_SPACE:', action(space_char='test'),
action(space_char='')),
lambda:
('SET_SPACE:-', action(space_char='test'),
action(space_char='-')),
lambda:
('SET_SPACE:123 45', action(space_char='test'),
action(space_char='123 45')),
# RESET: No case, default space
)
@parametrize(CHANGE_MODE_TESTS)
def test_meta_mode(meta, last_action, expected):
atom = '{MODE:' + meta + '}'
ctx = formatting._Context((), action())
ctx.translated(last_action)
if inspect.isclass(expected):
with pytest.raises(expected):
formatting._atom_to_action(atom, ctx)
else:
assert formatting._atom_to_action(atom, ctx) == expected
last_action_normal = action()
last_action_capitalized = action(next_case=Case.CAP_FIRST_WORD)
last_action_attached = action(next_attach=True)
META_CARRY_CAPITALIZE_TESTS = (
# Test word handling and space handling, standard.
lambda:
('~|*', last_action_normal,
(action(word='*', text='*', trailing_space=' '))),
# With attach flags:
lambda:
('~|*^', last_action_normal,
(action(word='*', text='*', next_attach=True))),
lambda:
('^~|*', last_action_normal,
(action(word='*', text='*', trailing_space=' ', prev_attach=True))),
lambda:
('^~|*^', last_action_normal,
(action(word='*', text='*', prev_attach=True, next_attach=True))),
# Should 'do nothing'.
lambda:
('~|', last_action_capitalized,
(last_action_capitalized)),
# Should lose 'next_attach' flag.
lambda:
('~|', last_action_attached,
(action(prev_attach=True))),
# Verify capitalize carry.
lambda:
('^~|^', last_action_capitalized,
(action(next_case=Case.CAP_FIRST_WORD, text_and_word='', prev_attach=True, next_attach=True))),
lambda:
('^~|aset^', last_action_capitalized,
(action(next_case=Case.CAP_FIRST_WORD, prev_attach=True, next_attach=True, text='Aset', word='aset'))),
lambda:
('~|aset', last_action_capitalized,
(action(next_case=Case.CAP_FIRST_WORD, text='Aset', trailing_space=' ', word='aset'))),
# Verify 'next_attach' flag overriding.
lambda:
('~|aset', last_action_attached,
(action(prev_attach=True, text_and_word='aset', trailing_space=' '))),
lambda:
('~|aset^', last_action_attached,
(action(prev_attach=True, text_and_word='aset', next_attach=True))),
)
@parametrize(META_CARRY_CAPITALIZE_TESTS)
def test_meta_carry_capitalize(meta, last_action, expected):
ctx = formatting._Context((), action())
ctx.translated(last_action)
assert formatting._atom_to_action('{' + meta + '}', ctx) == expected
def _apply_case_tests():
test = ' some test '
test2 = 'test Me'
test3 = ' SOME TEST '
return (
# INVALID
lambda: (test, '', False, ValueError),
lambda: (test, 'TEST', False, ValueError),
# NO-OP
lambda: (test, None, False, test),
lambda: (test, None, True, test),
# TITLE
lambda: (test, Case.TITLE, False, ' Some Test '),
# TITLE will not affect appended output
lambda: (test, Case.TITLE, True, ' some test '),
lambda: (test2, Case.TITLE, True, 'test Me'),
# LOWER
lambda: (test, Case.LOWER, False, ' some test '),
lambda: (test3, Case.LOWER, False, ' some test '),
lambda: (test2, Case.LOWER, True, 'test me'),
# UPPER
lambda: (test.upper(), Case.UPPER, False, ' SOME TEST '),
lambda: (test3, Case.UPPER, False, ' SOME TEST '),
lambda: (test2, Case.UPPER, True, 'TEST ME'),
)
@parametrize(_apply_case_tests())
def test_apply_case(input_text, case, appended, expected):
if inspect.isclass(expected):
with pytest.raises(expected):
formatting.apply_mode_case(input_text, case, appended)
else:
assert formatting.apply_mode_case(input_text, case, appended) == expected
def _apply_space_char_tests():
test = ' some text '
test2 = "don't"
return (
lambda: (test, '_', '_some_text_'),
lambda: (test, '', 'sometext'),
lambda: (test2, '_', test2),
lambda: (test2, '', test2),
)
@parametrize(_apply_space_char_tests())
def test_apply_space_char(text, space_char, expected):
assert formatting.apply_mode_space_char(text, space_char) == expected
@parametrize((
lambda: ('', None),
lambda: ('{abc}', 'abc'),
lambda: ('abc', None),
))
def test_get_meta(atom, meta):
assert formatting._get_meta(atom) == meta
@parametrize((
lambda: ('abc', '{&abc}'),
lambda: ('1', '{&1}'),
))
def test_glue_translation(s, expected):
assert formatting._glue_translation(s) == expected
@parametrize((
lambda: ('', ''),
lambda: ('abc', 'abc'),
lambda: (r'\{', '{'),
lambda: (r'\}', '}'),
lambda: (r'\{abc\}}{', '{abc}}{'),
))
def test_unescape_atom(atom, text):
assert formatting._unescape_atom(atom) == text
@parametrize((
lambda: ('', ''),
lambda: ('abc', 'Abc'),
lambda: ('ABC', 'ABC'),
))
def test_capitalize_first_word(s, expected):
assert formatting.capitalize_first_word(s) == expected
RIGHTMOST_WORD_TESTS = (
lambda: ('', ''),
lambda: ('\n', ''),
lambda: ('\t', ''),
lambda: ('abc', 'abc'),
lambda: ('a word', 'word'),
lambda: ('word.', '.'),
lambda: ('word ', ''),
lambda: ('word\n', ''),
lambda: ('word\t', ''),
lambda: (' word', 'word'),
lambda: ('\nword', 'word'),
lambda: ('\tword', 'word'),
)
@parametrize(RIGHTMOST_WORD_TESTS)
def test_rightmost_word(s, expected):
assert formatting.rightmost_word(s) == expected
REPLACE_TESTS = (
# Check that 'prev_replace' does not unconditionally erase
# the previous character if it does not match.
lambda:
([
translation(english='{MODE:SET_SPACE:}'),
translation(english='foobar'),
translation(english='{^}{#Return}{^}{-|}'),
], [('s', 'foobar'), ('c', 'Return')]),
# Check 'prev_replace' correctly takes into account
# the previous translation.
lambda:
([
translation(english='test '),
translation(english='{^,}'),
], [('s', 'test '), ('b', 1), ('s', ', ')]),
# While the previous translation must be taken into account,
# any meta-command must not be fired again.
lambda:
([
translation(english='{#Return}'),
translation(english='test'),
], [('c', 'Return'), ('s', 'test ')]),
)
@parametrize(REPLACE_TESTS)
def test_replace(translations, expected_instructions):
output = CaptureOutput()
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.set_space_placement('After Output')
prev = []
for t in translations:
formatter.format([], [t], prev)
prev.append(t)
assert output.instructions == expected_instructions
def test_undo_replace():
# Undoing a replace....
output = CaptureOutput()
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.set_space_placement('After Output')
prev = [translation(english='test')]
formatter.format([], prev, None)
undo = translation(english='{^,}')
formatter.format([], [undo], prev)
# Undo.
formatter.format([undo], [], prev)
assert output.instructions == [
('s', 'test '), ('b', 1), ('s', ', '), ('b', 2), ('s', ' '),
]
OUTPUT_OPTIMISATION_TESTS = (
# No change.
lambda:
([
translation(english='noop'),
], [
translation(english='noop'),
], [('s', ' noop')]),
# Append only.
lambda:
([
translation(english='test'),
], [
translation(english='testing'),
], [('s', ' test'), ('s', 'ing')]),
# Chained meta-commands.
lambda:
([
translation(english='{#a}'),
], [
translation(english='{#a}{#b}'),
], [('c', 'a'), ('c', 'b')]),
)
@parametrize(OUTPUT_OPTIMISATION_TESTS)
def test_output_optimization(undo, do, expected_instructions):
output = CaptureOutput()
formatter = formatting.Formatter()
formatter.set_output(output)
formatter.format([], undo, None)
formatter.format(undo, do, None)
assert output.instructions == expected_instructions
class TestRetroFormatter:
def setup_method(self):
self.formatter = formatting.Formatter()
self.translations = []
self.retro_formatter = formatting.RetroFormatter(self.translations)
def format(self, text):
t = translation(english=text)
self.formatter.format([], [t], self.translations)
self.translations.append(t)
return t
ITER_LAST_ACTIONS_TESTS = (
lambda:
(['Luca', 'mela'],
[action(text_and_word='mela', trailing_space=' '),
action(text_and_word='Luca', trailing_space=' ')]),
lambda:
(['{Luca^}', '{^mela}'],
[action(prev_attach=True, text='mela', trailing_space=' ', word='Lucamela'),
action(text_and_word='Luca', next_attach=True)]),
lambda:
(['Luca', '{^ ^}', 'mela'],
[action(text_and_word='mela', trailing_space=' ', prev_attach=True),
action(text=' ', word='', prev_attach=True, next_attach=True),
action(text_and_word='Luca', trailing_space=' ')]),
lambda:
(['Luca', '{-|}', 'mela'],
[action(text='Mela', trailing_space=' ', word='mela'),
action(word='Luca', trailing_space=' ', next_case=Case.CAP_FIRST_WORD),
action(text_and_word='Luca', trailing_space=' ')]),
)
@parametrize(ITER_LAST_ACTIONS_TESTS)
def test_iter_last_actions(self, translation_list, action_list):
for t in translation_list:
self.format(t)
assert list(self.retro_formatter.iter_last_actions()) == action_list
ITER_LAST_FRAGMENTS_TESTS = (
lambda:
(False,
['Luca', 'mela'],
['mela', 'Luca ']),
lambda:
(False,
['Luca{^ ^}mela'],
['mela', 'Luca ']),
lambda:
(False,
['Luca, mela.'],
['mela.', 'Luca, ']),
lambda:
(False,
['Luca{-|}mela'],
['Mela', 'Luca ']),
lambda:
(True,
['Luca{-|}mela'],
['Mela', 'Luca ']),
)
@parametrize(ITER_LAST_FRAGMENTS_TESTS)
def test_iter_last_fragments(self, spaces_after, translation_list, fragment_list):
if spaces_after:
self.formatter.set_space_placement('After Output')
for t in translation_list:
self.format(t)
assert list(self.retro_formatter.iter_last_fragments()) == fragment_list
ITER_LAST_WORDS_TESTS = (
lambda:
(False,
['Luca', 'mela'],
['mela', 'Luca ']),
lambda:
(False,
['Luca{^ ^}mela'],
['mela', 'Luca ']),
lambda:
(False,
['Luca, mela.'],
['.', 'mela', ', ', 'Luca']),
lambda:
(False,
['Luca{-|}mela'],
['Mela', 'Luca ']),
lambda:
(True,
['Luca{-|}mela'],
['Mela', 'Luca ']),
)
@parametrize(ITER_LAST_WORDS_TESTS)
def test_iter_last_words(self, spaces_after, translation_list, word_list):
if spaces_after:
self.formatter.set_space_placement('After Output')
for t in translation_list:
self.format(t)
assert list(self.retro_formatter.iter_last_words()) == word_list
LAST_TEXT_TESTS = (
lambda:
(False,
['Luca{-|}mela'],
3,
'ela'),
lambda:
(False,
['Luca{-|}mela'],
5,
' Mela'),
lambda:
(False,
['Luca{-|}mela'],
12,
'Luca Mela'),
lambda:
(False,
['Luca{-|}mela'],
20,
'Luca Mela'),
lambda:
(True,
['Luca{-|}mela'],
6,
'a Mela'),
)
@parametrize(LAST_TEXT_TESTS)
def test_last_text(self, spaces_after, translation_list, count, text):
if spaces_after:
self.formatter.set_space_placement('After Output')
for t in translation_list:
self.format(t)
assert self.retro_formatter.last_text(count) == text
|
gpl-2.0
|
navodissa/python-flask
|
flask/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/lxmletree.py
|
355
|
6215
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
from lxml import etree
from ..treebuilders.etree import tag_regexp
from gettext import gettext
_ = gettext
from . import _base
from .. import ihatexml
def ensure_str(s):
if s is None:
return None
elif isinstance(s, text_type):
return s
else:
return s.decode("utf-8", "strict")
class Root(object):
def __init__(self, et):
self.elementtree = et
self.children = []
if et.docinfo.internalDTD:
self.children.append(Doctype(self,
ensure_str(et.docinfo.root_name),
ensure_str(et.docinfo.public_id),
ensure_str(et.docinfo.system_url)))
root = et.getroot()
node = root
while node.getprevious() is not None:
node = node.getprevious()
while node is not None:
self.children.append(node)
node = node.getnext()
self.text = None
self.tail = None
def __getitem__(self, key):
return self.children[key]
def getnext(self):
return None
def __len__(self):
return 1
class Doctype(object):
def __init__(self, root_node, name, public_id, system_id):
self.root_node = root_node
self.name = name
self.public_id = public_id
self.system_id = system_id
self.text = None
self.tail = None
def getnext(self):
return self.root_node.children[1]
class FragmentRoot(Root):
def __init__(self, children):
self.children = [FragmentWrapper(self, child) for child in children]
self.text = self.tail = None
def getnext(self):
return None
class FragmentWrapper(object):
def __init__(self, fragment_root, obj):
self.root_node = fragment_root
self.obj = obj
if hasattr(self.obj, 'text'):
self.text = ensure_str(self.obj.text)
else:
self.text = None
if hasattr(self.obj, 'tail'):
self.tail = ensure_str(self.obj.tail)
else:
self.tail = None
self.isstring = isinstance(obj, str) or isinstance(obj, bytes)
# Support for bytes here is Py2
if self.isstring:
self.obj = ensure_str(self.obj)
def __getattr__(self, name):
return getattr(self.obj, name)
def getnext(self):
siblings = self.root_node.children
idx = siblings.index(self)
if idx < len(siblings) - 1:
return siblings[idx + 1]
else:
return None
def __getitem__(self, key):
return self.obj[key]
def __bool__(self):
return bool(self.obj)
def getparent(self):
return None
def __str__(self):
return str(self.obj)
def __unicode__(self):
return str(self.obj)
def __len__(self):
return len(self.obj)
class TreeWalker(_base.NonRecursiveTreeWalker):
def __init__(self, tree):
if hasattr(tree, "getroot"):
tree = Root(tree)
elif isinstance(tree, list):
tree = FragmentRoot(tree)
_base.NonRecursiveTreeWalker.__init__(self, tree)
self.filter = ihatexml.InfosetFilter()
def getNodeDetails(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
return _base.TEXT, ensure_str(getattr(node, key))
elif isinstance(node, Root):
return (_base.DOCUMENT,)
elif isinstance(node, Doctype):
return _base.DOCTYPE, node.name, node.public_id, node.system_id
elif isinstance(node, FragmentWrapper) and node.isstring:
return _base.TEXT, node.obj
elif node.tag == etree.Comment:
return _base.COMMENT, ensure_str(node.text)
elif node.tag == etree.Entity:
return _base.ENTITY, ensure_str(node.text)[1:-1] # strip &;
else:
# This is assumed to be an ordinary element
match = tag_regexp.match(ensure_str(node.tag))
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = ensure_str(node.tag)
attrs = {}
for name, value in list(node.attrib.items()):
name = ensure_str(name)
value = ensure_str(value)
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (_base.ELEMENT, namespace, self.filter.fromXmlName(tag),
attrs, len(node) > 0 or node.text)
def getFirstChild(self, node):
assert not isinstance(node, tuple), _("Text nodes have no children")
assert len(node) or node.text, "Node has no children"
if node.text:
return (node, "text")
else:
return node[0]
def getNextSibling(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
# XXX: we cannot use a "bool(node) and node[0] or None" construct here
# because node[0] might evaluate to False if it has no child element
if len(node):
return node[0]
else:
return None
else: # tail
return node.getnext()
return (node, "tail") if node.tail else node.getnext()
def getParentNode(self, node):
if isinstance(node, tuple): # Text node
node, key = node
assert key in ("text", "tail"), _("Text nodes are text or tail, found %s") % key
if key == "text":
return node
# else: fallback to "normal" processing
return node.getparent()
|
bsd-3-clause
|
vitaly-krugl/nupic
|
tests/unit/nupic/algorithms/anomaly_likelihood_test.py
|
12
|
30911
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Unit tests for anomaly likelihood module."""
# disable pylint warning: "Access to a protected member xxxxx of a client class"
# pylint: disable=W0212
import copy
import datetime
import math
import numpy
import pickle
import unittest2 as unittest
import mock
from nupic.algorithms import anomaly_likelihood as an
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
def _sampleDistribution(params, numSamples, verbosity=0):
"""
Given the parameters of a distribution, generate numSamples points from it.
This routine is mostly for testing.
:returns: A numpy array of samples.
"""
if params.has_key("name"):
if params["name"] == "normal":
samples = numpy.random.normal(loc=params["mean"],
scale=math.sqrt(params["variance"]),
size=numSamples)
elif params["name"] == "pareto":
samples = numpy.random.pareto(params["alpha"], size=numSamples)
elif params["name"] == "beta":
samples = numpy.random.beta(a=params["alpha"], b=params["beta"],
size=numSamples)
else:
raise ValueError("Undefined distribution: " + params["name"])
else:
raise ValueError("Bad distribution params: " + str(params))
if verbosity > 0:
print "\nSampling from distribution:", params
print "After estimation, mean=", numpy.mean(samples), \
"var=", numpy.var(samples), "stdev=", math.sqrt(numpy.var(samples))
return samples
def _generateSampleData(mean=0.2, variance=0.2, metricMean=0.2,
metricVariance=0.2):
"""
Generate 1440 samples of fake metrics data with a particular distribution
of anomaly scores and metric values. Here we generate values every minute.
"""
data = []
p = {"mean": mean,
"name": "normal",
"stdev": math.sqrt(variance),
"variance": variance}
samples = _sampleDistribution(p, 1440)
p = {"mean": metricMean,
"name": "normal",
"stdev": math.sqrt(metricVariance),
"variance": metricVariance}
metricValues = _sampleDistribution(p, 1440)
for hour in range(0, 24):
for minute in range(0, 60):
data.append(
[
datetime.datetime(2013, 2, 2, hour, minute, 0),
metricValues[hour * 60 + minute],
samples[hour * 60 + minute],
]
)
return data
class AnomalyLikelihoodClassTest(TestCaseBase):
"""Tests the high-level AnomalyLikelihood class"""
def testCalcSkipRecords(self):
# numIngested is less than both learningPeriod and windowSize
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=5,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 5)
# numIngested is equal to learningPeriod, but less than windowSize
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=10,
windowSize=15,
learningPeriod=10)
self.assertEqual(numSkip, 10)
# edge case: learningPeriod is 0
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=10,
windowSize=10,
learningPeriod=0)
self.assertEqual(numSkip, 0)
# boundary case: numIngested is equal to learningPeriod and windowSize
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=10,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 10)
# learning samples partially shifted out
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=14,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 6)
# learning samples fully shifted out
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=20,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 0)
# learning samples plus others shifted out
numSkip = an.AnomalyLikelihood._calcSkipRecords(
numIngested=25,
windowSize=10,
learningPeriod=10)
self.assertEqual(numSkip, 0)
def testHistoricWindowSize(self):
l = an.AnomalyLikelihood(claLearningPeriod=2,
estimationSamples=2,
historicWindowSize=3)
l.anomalyProbability(5, 0.1, timestamp=1) # burn in
self.assertEqual(len(l._historicalScores), 1)
l.anomalyProbability(5, 0.1, timestamp=2)
self.assertEqual(len(l._historicalScores), 2)
l.anomalyProbability(5, 0.1, timestamp=3)
self.assertEqual(len(l._historicalScores), 3)
l.anomalyProbability(5, 0.1, timestamp=4)
self.assertEqual(len(l._historicalScores), 3)
def testdWindowSizeImpactOnEstimateAnomalyLikelihoodsArgs(self):
# Verify that AnomalyLikelihood's historicWindowSize plays nice with args
# passed to estimateAnomalyLikelihoods"""
originalEstimateAnomalyLikelihoods = an.estimateAnomalyLikelihoods
estimationArgs = []
def estimateAnomalyLikelihoodsWrap(anomalyScores,
averagingWindow=10,
skipRecords=0,
verbosity=0):
estimationArgs.append((tuple(anomalyScores), skipRecords))
return originalEstimateAnomalyLikelihoods(anomalyScores,
averagingWindow=averagingWindow,
skipRecords=skipRecords,
verbosity=verbosity)
estimateAnomalyLikelihoodsPatch = mock.patch(
"nupic.algorithms.anomaly_likelihood.estimateAnomalyLikelihoods",
side_effect=estimateAnomalyLikelihoodsWrap, autospec=True)
with estimateAnomalyLikelihoodsPatch as estimateAnomalyLikelihoodsMock:
l = an.AnomalyLikelihood(claLearningPeriod=2,
estimationSamples=2,
historicWindowSize=3)
l.anomalyProbability(10, 0.1, timestamp=1)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 0)
l.anomalyProbability(20, 0.2, timestamp=2)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 0)
l.anomalyProbability(30, 0.3, timestamp=3)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 0)
l.anomalyProbability(40, 0.4, timestamp=4)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 0)
# Estimation should kick in after claLearningPeriod + estimationSamples
# samples have been ingested
l.anomalyProbability(50, 0.5, timestamp=5)
self.assertEqual(estimateAnomalyLikelihoodsMock.call_count, 1)
# NOTE: we cannot use mock's assert_called_with, because the sliding
# window container changes in-place after estimateAnomalyLikelihoods is
# called
scores, numSkip = estimationArgs.pop()
self.assertEqual(scores, ((2, 20, 0.2), (3, 30, 0.3), (4, 40, 0.4)))
self.assertEqual(numSkip, 1)
def testReestimationPeriodArg(self):
estimateAnomalyLikelihoodsWrap = mock.Mock(
wraps=an.estimateAnomalyLikelihoods,
autospec=True)
estimateAnomalyLikelihoodsPatch = mock.patch(
"nupic.algorithms.anomaly_likelihood.estimateAnomalyLikelihoods",
side_effect=estimateAnomalyLikelihoodsWrap, autospec=True)
with estimateAnomalyLikelihoodsPatch:
l = an.AnomalyLikelihood(claLearningPeriod=2,
estimationSamples=2,
historicWindowSize=3,
reestimationPeriod=2)
# burn-in
l.anomalyProbability(10, 0.1, timestamp=1)
l.anomalyProbability(10, 0.1, timestamp=2)
l.anomalyProbability(10, 0.1, timestamp=3)
l.anomalyProbability(10, 0.1, timestamp=4)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 0)
l.anomalyProbability(10, 0.1, timestamp=5)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 1)
l.anomalyProbability(10, 0.1, timestamp=6)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 1)
l.anomalyProbability(10, 0.1, timestamp=7)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 2)
l.anomalyProbability(10, 0.1, timestamp=8)
self.assertEqual(estimateAnomalyLikelihoodsWrap.call_count, 2)
def testAnomalyProbabilityResultsDuringProbationaryPeriod(self):
originalUpdateAnomalyLikelihoods = an.updateAnomalyLikelihoods
def updateAnomalyLikelihoodsWrap(anomalyScores, params, verbosity=0):
likelihoods, avgRecordList, params = originalUpdateAnomalyLikelihoods(
anomalyScores=anomalyScores,
params=params,
verbosity=verbosity)
self.assertEqual(len(likelihoods), 1)
return [0.1], avgRecordList, params
updateAnomalyLikelihoodsPatch = mock.patch(
"nupic.algorithms.anomaly_likelihood.updateAnomalyLikelihoods",
side_effect=updateAnomalyLikelihoodsWrap, autospec=True)
with updateAnomalyLikelihoodsPatch:
l = an.AnomalyLikelihood(claLearningPeriod=2,
estimationSamples=2,
historicWindowSize=3)
# 0.5 result is expected during burn-in
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=1), 0.5)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=2), 0.5)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=3), 0.5)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=4), 0.5)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=5), 0.9)
self.assertEqual(l.anomalyProbability(10, 0.1, timestamp=6), 0.9)
def testEquals(self):
l = an.AnomalyLikelihood(claLearningPeriod=2, estimationSamples=2)
l2 = an.AnomalyLikelihood(claLearningPeriod=2, estimationSamples=2)
self.assertEqual(l, l2)
# Use 5 iterations to force the distribution to be created (4 probationary
# samples + 1)
l2.anomalyProbability(5, 0.1, timestamp=1) # burn in
l2.anomalyProbability(5, 0.1, timestamp=2)
l2.anomalyProbability(5, 0.1, timestamp=3)
l2.anomalyProbability(5, 0.1, timestamp=4)
self.assertIsNone(l2._distribution)
l2.anomalyProbability(1, 0.3, timestamp=5)
self.assertIsNotNone(l2._distribution)
self.assertNotEqual(l, l2)
l.anomalyProbability(5, 0.1, timestamp=1) # burn in
l.anomalyProbability(5, 0.1, timestamp=2)
l.anomalyProbability(5, 0.1, timestamp=3)
l.anomalyProbability(5, 0.1, timestamp=4)
self.assertIsNone(l._distribution)
l.anomalyProbability(1, 0.3, timestamp=5)
self.assertIsNotNone(l._distribution)
self.assertEqual(l, l2, "equal? \n%s\n vs. \n%s" % (l, l2))
def testSerialization(self):
"""serialization using pickle"""
l = an.AnomalyLikelihood(claLearningPeriod=2, estimationSamples=2)
l.anomalyProbability("hi", 0.1, timestamp=1) # burn in
l.anomalyProbability("hi", 0.1, timestamp=2)
l.anomalyProbability("hello", 0.3, timestamp=3)
stored = pickle.dumps(l)
restored = pickle.loads(stored)
self.assertEqual(l, restored)
class AnomalyLikelihoodAlgorithmTest(TestCaseBase):
"""Tests the low-level algorithm functions"""
def assertWithinEpsilon(self, a, b, epsilon=0.005):
self.assertLessEqual(abs(a - b), epsilon,
"Values %g and %g are not within %g" % (a, b, epsilon))
def testNormalProbability(self):
"""
Test that the tailProbability function returns correct normal values
"""
# Test a standard normal distribution
# Values taken from http://en.wikipedia.org/wiki/Standard_normal_table
p = {"name": "normal", "mean": 0.0, "variance": 1.0, "stdev": 1.0}
self.assertWithinEpsilon(an.tailProbability(0.0, p), 0.5)
self.assertWithinEpsilon(an.tailProbability(0.3, p), 0.3820885780)
self.assertWithinEpsilon(an.tailProbability(1.0, p), 0.1587)
self.assertWithinEpsilon(an.tailProbability(1.0, p),
an.tailProbability(-1.0, p))
self.assertWithinEpsilon(an.tailProbability(-0.3, p),
an.tailProbability(0.3, p))
# Non standard normal distribution
p = {"name": "normal", "mean": 1.0, "variance": 4.0, "stdev": 2.0}
self.assertWithinEpsilon(an.tailProbability(1.0, p), 0.5)
self.assertWithinEpsilon(an.tailProbability(2.0, p), 0.3085)
self.assertWithinEpsilon(an.tailProbability(3.0, p), 0.1587)
self.assertWithinEpsilon(an.tailProbability(3.0, p),
an.tailProbability(-1.0, p))
self.assertWithinEpsilon(an.tailProbability(0.0, p),
an.tailProbability(2.0, p))
# Non standard normal distribution
p = {"name": "normal", "mean": -2.0, "variance": 0.5,
"stdev": math.sqrt(0.5)}
self.assertWithinEpsilon(an.tailProbability(-2.0, p), 0.5)
self.assertWithinEpsilon(an.tailProbability(-1.5, p), 0.241963652)
self.assertWithinEpsilon(an.tailProbability(-2.5, p),
an.tailProbability(-1.5, p))
def testEstimateNormal(self):
"""
This passes in a known set of data and ensures the estimateNormal
function returns the expected results.
"""
# 100 samples drawn from mean=0.4, stdev = 0.5
samples = numpy.array(
[0.32259025, -0.44936321, -0.15784842, 0.72142628, 0.8794327,
0.06323451, -0.15336159, -0.02261703, 0.04806841, 0.47219226,
0.31102718, 0.57608799, 0.13621071, 0.92446815, 0.1870912,
0.46366935, -0.11359237, 0.66582357, 1.20613048, -0.17735134,
0.20709358, 0.74508479, 0.12450686, -0.15468728, 0.3982757,
0.87924349, 0.86104855, 0.23688469, -0.26018254, 0.10909429,
0.65627481, 0.39238532, 0.77150761, 0.47040352, 0.9676175,
0.42148897, 0.0967786, -0.0087355, 0.84427985, 1.46526018,
1.19214798, 0.16034816, 0.81105554, 0.39150407, 0.93609919,
0.13992161, 0.6494196, 0.83666217, 0.37845278, 0.0368279,
-0.10201944, 0.41144746, 0.28341277, 0.36759426, 0.90439446,
0.05669459, -0.11220214, 0.34616676, 0.49898439, -0.23846184,
1.06400524, 0.72202135, -0.2169164, 1.136582, -0.69576865,
0.48603271, 0.72781008, -0.04749299, 0.15469311, 0.52942518,
0.24816816, 0.3483905, 0.7284215, 0.93774676, 0.07286373,
1.6831539, 0.3851082, 0.0637406, -0.92332861, -0.02066161,
0.93709862, 0.82114131, 0.98631562, 0.05601529, 0.72214694,
0.09667526, 0.3857222, 0.50313998, 0.40775344, -0.69624046,
-0.4448494, 0.99403206, 0.51639049, 0.13951548, 0.23458214,
1.00712699, 0.40939048, -0.06436434, -0.02753677, -0.23017904])
params = an.estimateNormal(samples)
self.assertWithinEpsilon(params["mean"], 0.3721)
self.assertWithinEpsilon(params["variance"], 0.22294)
self.assertWithinEpsilon(params["stdev"], 0.47216)
self.assertEqual(params["name"], "normal")
def testSampleDistribution(self):
"""
Test that sampleDistribution from a generated distribution returns roughly
the same parameters.
"""
# 1000 samples drawn from mean=0.4, stdev = 0.1
p = {"mean": 0.5,
"name": "normal",
"stdev": math.sqrt(0.1),
"variance": 0.1}
samples = _sampleDistribution(p, 1000)
# Ensure estimate is reasonable
np = an.estimateNormal(samples)
self.assertWithinEpsilon(p["mean"], np["mean"], 0.1)
self.assertWithinEpsilon(p["variance"], np["variance"], 0.1)
self.assertWithinEpsilon(p["stdev"], np["stdev"], 0.1)
self.assertTrue(np["name"], "normal")
def testEstimateAnomalyLikelihoods(self):
"""
This calls estimateAnomalyLikelihoods to estimate the distribution on fake
data and validates the results
"""
# Generate an estimate using fake distribution of anomaly scores.
data1 = _generateSampleData(mean=0.2)
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1000])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Check that the sum is correct
avgParams = estimatorParams["movingAverage"]
total = 0
for v in avgRecordList:
total = total + v[2]
self.assertTrue(avgParams["total"], total)
# Check that the estimated mean is correct
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"],
total / float(len(avgRecordList)))
# Number of points with lower than 2% probability should be pretty low
# but not zero. Can't use exact 2% here due to random variations
self.assertLessEqual(numpy.sum(likelihoods < 0.02), 50)
self.assertGreaterEqual(numpy.sum(likelihoods < 0.02), 1)
def testEstimateAnomalyLikelihoodsCategoryValues(self):
start = datetime.datetime(2017, 1, 1, 0, 0, 0)
delta = datetime.timedelta(minutes=5)
dts = [start + (i * delta) for i in xrange(10)]
values = ["a", "b", "c", "d", "e"] * 2
rawScores = [0.1 * i for i in xrange(10)]
data = zip(dts, values, rawScores)
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data)
)
self.assertEqual(len(likelihoods), 10)
self.assertEqual(len(avgRecordList), 10)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
def testEstimateAnomalyLikelihoodsMalformedRecords(self):
"""
This calls estimateAnomalyLikelihoods with malformed records, which should
be quietly skipped.
"""
# Generate a fake distribution of anomaly scores, and add malformed records
data1 = _generateSampleData(mean=0.2)
data1 = data1[0:1000] + [(2, 2)] + [(2, 2, 2, 2)] + [()] + [(2)]
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1004])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Check that the sum is correct
avgParams = estimatorParams["movingAverage"]
total = 0
for v in avgRecordList:
total = total + v[2]
self.assertTrue(avgParams["total"], total)
# Check that the estimated mean is correct
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"],
total / float(len(avgRecordList)))
def testSkipRecords(self):
"""
This calls estimateAnomalyLikelihoods with various values of skipRecords
"""
# Check happy path
data1 = _generateSampleData(mean=0.1)[0:200]
data1 = data1 + (_generateSampleData(mean=0.9)[0:200])
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, skipRecords=200)
)
# Check results are correct, i.e. we are actually skipping the first 50
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"], 0.9, epsilon=0.1)
# Check case where skipRecords > num records
# In this case a null distribution should be returned which makes all
# the likelihoods reasonably high
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, skipRecords=500)
)
self.assertEqual(len(likelihoods), len(data1))
self.assertTrue(likelihoods.sum() >= 0.3 * len(likelihoods))
# Check the case where skipRecords == num records
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, skipRecords=len(data1))
)
self.assertEqual(len(likelihoods), len(data1))
self.assertTrue(likelihoods.sum() >= 0.3 * len(likelihoods))
def testUpdateAnomalyLikelihoods(self):
"""
A slight more complex test. This calls estimateAnomalyLikelihoods
to estimate the distribution on fake data, followed by several calls
to updateAnomalyLikelihoods.
"""
#------------------------------------------
# Step 1. Generate an initial estimate using fake distribution of anomaly
# scores.
data1 = _generateSampleData(mean=0.2)[0:1000]
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1, averagingWindow=5)
)
#------------------------------------------
# Step 2. Generate some new data with a higher average anomaly
# score. Using the estimator from step 1, to compute likelihoods. Now we
# should see a lot more anomalies.
data2 = _generateSampleData(mean=0.6)[0:300]
likelihoods2, avgRecordList2, estimatorParams2 = (
an.updateAnomalyLikelihoods(data2, estimatorParams)
)
self.assertEqual(len(likelihoods2), len(data2))
self.assertEqual(len(avgRecordList2), len(data2))
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# The new running total should be different
self.assertNotEqual(estimatorParams2["movingAverage"]["total"],
estimatorParams["movingAverage"]["total"])
# We should have many more samples where likelihood is < 0.01, but not all
self.assertGreaterEqual(numpy.sum(likelihoods2 < 0.01), 25)
self.assertLessEqual(numpy.sum(likelihoods2 < 0.01), 250)
#------------------------------------------
# Step 3. Generate some new data with the expected average anomaly score. We
# should see fewer anomalies than in Step 2.
data3 = _generateSampleData(mean=0.2)[0:1000]
likelihoods3, avgRecordList3, estimatorParams3 = (
an.updateAnomalyLikelihoods(data3, estimatorParams2)
)
self.assertEqual(len(likelihoods3), len(data3))
self.assertEqual(len(avgRecordList3), len(data3))
self.assertTrue(an.isValidEstimatorParams(estimatorParams3))
# The new running total should be different
self.assertNotEqual(estimatorParams3["movingAverage"]["total"],
estimatorParams["movingAverage"]["total"])
self.assertNotEqual(estimatorParams3["movingAverage"]["total"],
estimatorParams2["movingAverage"]["total"])
# We should have a small number samples where likelihood is < 0.02, but at
# least one
self.assertGreaterEqual(numpy.sum(likelihoods3 < 0.01), 1)
self.assertLessEqual(numpy.sum(likelihoods3 < 0.01), 100)
#------------------------------------------
# Step 4. Validate that sending data incrementally is the same as sending
# in one batch
allData = data1
allData.extend(data2)
allData.extend(data3)
# Compute moving average of all the data and check it's the same
_, historicalValuesAll, totalAll = (
an._anomalyScoreMovingAverage(allData, windowSize=5)
)
self.assertEqual(sum(historicalValuesAll),
sum(estimatorParams3["movingAverage"]["historicalValues"]))
self.assertEqual(totalAll,
estimatorParams3["movingAverage"]["total"])
def testFlatAnomalyScores(self):
"""
This calls estimateAnomalyLikelihoods with flat distributions and
ensures things don't crash.
"""
# Generate an estimate using fake distribution of anomaly scores.
data1 = _generateSampleData(mean=42.0, variance=1e-10)
likelihoods, avgRecordList, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1000])
)
self.assertEqual(len(likelihoods), 1000)
self.assertEqual(len(avgRecordList), 1000)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
## Check that the estimated mean is correct
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"], data1[0][2])
# If you deviate from the mean, you should get probability 0
# Test this by sending in just slightly different values.
data2 = _generateSampleData(mean=42.5, variance=1e-10)
likelihoods2, _, _ = (
an.updateAnomalyLikelihoods(data2[0:10], estimatorParams)
)
# The likelihoods should go to zero very quickly
self.assertLessEqual(likelihoods2.sum(), 0.01)
# Test edge case where anomaly scores are very close to 0
# In this case we don't let likelihood to get too low. An average
# anomaly score of 0.1 should be essentially zero, but an average
# of 0.04 should be higher
data3 = _generateSampleData(mean=0.01, variance=1e-6)
_, _, estimatorParams3 = (
an.estimateAnomalyLikelihoods(data3[0:1000])
)
data4 = _generateSampleData(mean=0.1, variance=1e-6)
likelihoods4, _, estimatorParams4 = (
an.updateAnomalyLikelihoods(data4[0:20], estimatorParams3)
)
# Average of 0.1 should go to zero
self.assertLessEqual(likelihoods4[10:].mean(), 0.002)
data5 = _generateSampleData(mean=0.05, variance=1e-6)
likelihoods5, _, _ = (
an.updateAnomalyLikelihoods(data5[0:20], estimatorParams4)
)
# The likelihoods should be low but not near zero
self.assertLessEqual(likelihoods5[10:].mean(), 0.28)
self.assertGreater(likelihoods5[10:].mean(), 0.015)
def testFlatMetricScores(self):
"""
This calls estimateAnomalyLikelihoods with flat metric values. In this case
we should use the null distribution, which gets reasonably high likelihood
for everything.
"""
# Generate samples with very flat metric values
data1 = _generateSampleData(
metricMean=42.0, metricVariance=1e-10)[0:1000]
likelihoods, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1)
)
# Check that we do indeed get reasonable likelihood values
self.assertEqual(len(likelihoods), len(data1))
self.assertTrue(likelihoods.sum() >= 0.4 * len(likelihoods))
# Check that we do indeed get null distribution
self.assertDictEqual(estimatorParams["distribution"], an.nullDistribution())
def testVeryFewScores(self):
"""
This calls estimateAnomalyLikelihoods and updateAnomalyLikelihoods
with one or no scores.
"""
# Generate an estimate using two data points
data1 = _generateSampleData(mean=42.0, variance=1e-10)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:2])
)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Check that the estimated mean is that value
dParams = estimatorParams["distribution"]
self.assertWithinEpsilon(dParams["mean"], data1[0][2])
# Can't generate an estimate using no data points
data1 = numpy.zeros(0)
with self.assertRaises(ValueError):
an.estimateAnomalyLikelihoods(data1)
# Can't update with no scores
with self.assertRaises(ValueError):
an.updateAnomalyLikelihoods(data1, estimatorParams)
def testBadParams(self):
"""
Calls updateAnomalyLikelihoods with bad params.
"""
# Generate an estimate using one data point
data1 = _generateSampleData(mean=42.0, variance=1e-10)
_, _, estimatorParams = (
an.estimateAnomalyLikelihoods(data1[0:1])
)
self.assertTrue(an.isValidEstimatorParams(estimatorParams))
# Can't pass in a bad params structure
with self.assertRaises(ValueError):
an.updateAnomalyLikelihoods(data1, {"haha": "heehee"})
# Can't pass in something not a dict
with self.assertRaises(ValueError):
an.updateAnomalyLikelihoods(data1, 42.0)
def testFilterLikelihodsInputType(self):
"""
Calls _filterLikelihoods with both input types -- numpy array of floats and
list of floats.
"""
l =[0.0, 0.0, 0.3, 0.3, 0.5]
l2 = an._filterLikelihoods(l)
n = numpy.array(l)
n2 = an._filterLikelihoods(n)
filtered = [0.0, 0.001, 0.3, 0.3, 0.5]
for i in range(len(l)):
self.assertAlmostEqual(
l2[i], filtered[i],
msg="Input of type list returns incorrect result")
for i in range(len(n)):
self.assertAlmostEqual(
n2[i], filtered[i],
msg="Input of type numpy array returns incorrect result")
def testFilterLikelihoods(self):
"""
Tests _filterLikelihoods function for several cases:
i. Likelihood goes straight to redzone, skipping over yellowzone, repeats
ii. Case (i) with different values, and numpy array instead of float list
iii. A scenario where changing the redzone from four to five 9s should
filter differently
"""
redThreshold = 0.9999
yellowThreshold = 0.999
# Case (i): values at indices 1 and 7 should be filtered to yellowzone
l = [1.0, 1.0, 0.9, 0.8, 0.5, 0.4, 1.0, 1.0, 0.6, 0.0]
l = [1 - x for x in l]
l2 = copy.copy(l)
l2[1] = 1 - yellowThreshold
l2[7] = 1 - yellowThreshold
l3 = an._filterLikelihoods(l, redThreshold=redThreshold)
for i in range(len(l2)):
self.assertAlmostEqual(l2[i], l3[i], msg="Failure in case (i)")
# Case (ii): values at indices 1-10 should be filtered to yellowzone
l = numpy.array([0.999978229, 0.999978229, 0.999999897, 1, 1, 1, 1,
0.999999994, 0.999999966, 0.999999966, 0.999994331,
0.999516576, 0.99744487])
l = 1.0 - l
l2 = copy.copy(l)
l2[1:11] = 1 - yellowThreshold
l3 = an._filterLikelihoods(l, redThreshold=redThreshold)
for i in range(len(l2)):
self.assertAlmostEqual(l2[i], l3[i], msg="Failure in case (ii)")
# Case (iii): redThreshold difference should be at index 2
l = numpy.array([0.999968329, 0.999999897, 1, 1, 1,
1, 0.999999994, 0.999999966, 0.999999966,
0.999994331, 0.999516576, 0.99744487])
l = 1.0 - l
l2a = copy.copy(l)
l2b = copy.copy(l)
l2a[1:10] = 1 - yellowThreshold
l2b[2:10] = 1 - yellowThreshold
l3a = an._filterLikelihoods(l, redThreshold=redThreshold)
l3b = an._filterLikelihoods(l, redThreshold=0.99999)
for i in range(len(l2a)):
self.assertAlmostEqual(l2a[i], l3a[i],
msg="Failure in case (iii), list a")
for i in range(len(l2b)):
self.assertAlmostEqual(l2b[i], l3b[i],
msg="Failure in case (iii), list b")
self.assertFalse(numpy.array_equal(l3a, l3b),
msg="Failure in case (iii), list 3")
if __name__ == "__main__":
unittest.main()
|
agpl-3.0
|
oisinmulvihill/stats-client
|
stats_client/client/detail.py
|
1
|
1948
|
# -*- coding: utf-8 -*-
"""
Tools to glean more useful information out of a User Agents (Browser,
Mobile, etc).
Oisin Mulvihill
2016-04-22
"""
#import pycountry
from user_agents import parse
def ua_string_dict(ua_string):
"""Recover useful fields from the user agent string.
This uses https://pypi.python.org/pypi/user-agents to achieve this.
"""
user_agent = parse(ua_string)
ua = dict(
is_mobile=user_agent.is_mobile,
is_tablet=user_agent.is_tablet,
is_touch_capable=user_agent.is_touch_capable,
is_pc=user_agent.is_pc,
is_bot=user_agent.is_bot,
browser_family=user_agent.browser.family,
browser_version=user_agent.browser.version_string,
os_family=user_agent.os.family,
os_version=user_agent.os.version_string,
device_family=user_agent.device.family,
)
return ua
# fields to extract from the request if possible:
AGENT_FIELDS = [
'HTTP_USER_AGENT',
'REQUEST_METHOD',
'PATH_INFO',
'HTTP_X_REAL_IP',
'HTTP_ACCEPT_LANGUAGE',
'REMOTE_ADDR',
'HTTP_X_FORWARDED_FOR',
]
def agent(request):
"""Returns a dict of details about the remove UA from the request.
The request object will have a META field (django) or and environ
(pyramid)
:returns: a dict.
The dict returned will have lower cased fields found in the AGENT_FIELDS
list.
If the "http_user_agent" is present ua_string_dict() will be called to
parse this. It will then place the results in a field called ua
"""
agent_dict = {}
if hasattr(request, 'META'):
data_from = request.META
else:
data_from = request.environ
for field in AGENT_FIELDS:
agent_dict[field.lower()] = data_from.get(field, '')
# if "http_user_agent" in agent_dict:
# agent_dict['ua'] = ua_string_dict(
# agent_dict["http_user_agent"]
# )
return agent_dict
|
mit
|
defzzd/UserDataBase-Heroku
|
venv/Lib/site-packages/pip/_vendor/requests/packages/urllib3/util/ssl_.py
|
305
|
4235
|
from binascii import hexlify, unhexlify
from hashlib import md5, sha1
from ..exceptions import SSLError
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, rest = divmod(len(fingerprint), 2)
if rest or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
# Disable TLS compression to migitate CRIME attack (issue #309)
OP_NO_COMPRESSION = 0x20000
context.options |= OP_NO_COMPRESSION
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception as e: # Reraise as SSLError
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)
|
mit
|
JelteF/bottor
|
tracker/app/api/matrix.py
|
1
|
1416
|
"""matrix.py - Controller for Matrix."""
from flask import Blueprint, jsonify, request
from app.controllers import MatrixController
from app.utils import serialize_sqla
from app.views import login
matrix_api = Blueprint('matrix_api', __name__, url_prefix='/api/matrix')
@matrix_api.route('', methods=['POST'])
@login.login_redirect
def create():
""" Create new matrix """
matrix_dict = request.json
matrix = MatrixController.createFromFile(matrix_dict['filename'])
return jsonify(id=matrix.id)
@matrix_api.route('/<int:matrix_id>', methods=['DELETE'])
@login.login_redirect
def delete(matrix_id):
""" Delete matrix """
matrix = MatrixController.get(matrix_id)
if not matrix:
return jsonify(error='Matrix not found'), 500
MatrixController.delete(matrix)
return jsonify()
@matrix_api.route('/<int:matrix_id>', methods=['GET'])
@login.login_redirect
def get(matrix_id):
""" Get matrix """
matrix = MatrixController.get(matrix_id)
if not matrix:
return jsonify(error='Matrix not found'), 500
return jsonify(matrix=serialize_sqla(matrix))
@matrix_api.route('/all', methods=['GET'])
@login.login_redirect
def get_all():
""" Get all data matrices """
matrices = MatrixController.get_all_data()
if not matrices:
return jsonify(error='No matrices were found'), 500
return jsonify(matrices=serialize_sqla(matrices))
|
mit
|
kamenim/samba-old
|
script/generate_param.py
|
13
|
10980
|
# Unix SMB/CIFS implementation.
# Copyright (C) 2014 Catalyst.Net Ltd
#
# Auto generate param_functions.c
#
# ** NOTE! The following LGPL license applies to the ldb
# ** library. This does NOT imply that all of Samba is released
# ** under the LGPL
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
#
import errno
import os
import re
import subprocess
import xml.etree.ElementTree as ET
import sys
import optparse
# parse command line arguments
parser = optparse.OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="input file", metavar="FILE")
parser.add_option("-o", "--output", dest="output",
help='output file', metavar="FILE")
parser.add_option("--mode", type="choice", metavar="<FUNCTIONS|S3PROTO|LIBPROTO|PARAMDEFS>",
choices=["FUNCTIONS", "S3PROTO", "LIBPROTO", "PARAMDEFS"], default="FUNCTIONS")
parser.add_option("--scope", metavar="<GLOBAL|LOCAL>",
choices = ["GLOBAL", "LOCAL"], default="GLOBAL")
(options, args) = parser.parse_args()
if options.filename is None:
parser.error("No input file specified")
if options.output is None:
parser.error("No output file specified")
def iterate_all(path):
"""Iterate and yield all the parameters.
:param path: path to parameters xml file
"""
try:
p = open(path, 'r')
except IOError, e:
raise Exception("Error opening parameters file")
out = p.read()
# parse the parameters xml file
root = ET.fromstring(out)
for parameter in root:
name = parameter.attrib.get("name")
param_type = parameter.attrib.get("type")
context = parameter.attrib.get("context")
func = parameter.attrib.get("function")
synonym = parameter.attrib.get("synonym")
removed = parameter.attrib.get("removed")
generated = parameter.attrib.get("generated_function")
if synonym == "1" or removed == "1" or generated == "0":
continue
constant = parameter.attrib.get("constant")
parm = parameter.attrib.get("parm")
if name is None or param_type is None or context is None:
raise Exception("Error parsing parameter: " + name)
if func is None:
func = name.replace(" ", "_").lower()
yield {'name': name,
'type': param_type,
'context': context,
'function': func,
'constant': (constant == '1'),
'parm': (parm == '1')}
# map doc attributes to a section of the generated function
context_dict = {"G": "_GLOBAL", "S": "_LOCAL"}
param_type_dict = {"boolean": "_BOOL", "list": "_LIST", "string": "_STRING",
"integer": "_INTEGER", "enum": "_INTEGER", "char" : "_CHAR",
"boolean-auto": "_INTEGER"}
def generate_functions(path_in, path_out):
f = open(path_out, 'w')
try:
f.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n')
for parameter in iterate_all(options.filename):
# filter out parameteric options
if ':' in parameter['name']:
continue
output_string = "FN"
temp = context_dict.get(parameter['context'])
if temp is None:
raise Exception(parameter['name'] + " has an invalid context " + parameter['context'])
output_string += temp
if parameter['constant']:
output_string += "_CONST"
if parameter['parm']:
output_string += "_PARM"
temp = param_type_dict.get(parameter['type'])
if temp is None:
raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
output_string += temp
f.write(output_string + "(" + parameter['function'] +", " + parameter['function'] + ')\n')
finally:
f.close()
mapping = {'boolean': 'bool ', 'string': 'char *', 'integer': 'int ', 'char': 'char ',
'list': 'const char **', 'enum': 'int ', 'boolean-auto': 'int '}
def make_s3_param_proto(path_in, path_out):
file_out = open(path_out, 'w')
try:
file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n')
header = get_header(path_out)
file_out.write("#ifndef %s\n" % header)
file_out.write("#define %s\n\n" % header)
for parameter in iterate_all(path_in):
# filter out parameteric options
if ':' in parameter['name']:
continue
output_string = ""
if parameter['constant']:
output_string += 'const '
param_type = mapping.get(parameter['type'])
if param_type is None:
raise Exception(parameter['name'] + " has an invalid context " + parameter['context'])
output_string += param_type
output_string += "lp_%s" % parameter['function']
param = None
if parameter['parm']:
param = "const struct share_params *p"
else:
param = "int"
if parameter['type'] == 'string' and not parameter['constant']:
if parameter['context'] == 'G':
output_string += '(TALLOC_CTX *ctx);\n'
elif parameter['context'] == 'S':
output_string += '(TALLOC_CTX *ctx, %s);\n' % param
else:
raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
else:
if parameter['context'] == 'G':
output_string += '(void);\n'
elif parameter['context'] == 'S':
output_string += '(%s);\n' % param
else:
raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
file_out.write(output_string)
file_out.write("\n#endif /* %s */\n\n" % header)
finally:
file_out.close()
def make_lib_proto(path_in, path_out):
file_out = open(path_out, 'w')
try:
file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n')
for parameter in iterate_all(path_in):
# filter out parameteric options
if ':' in parameter['name']:
continue
output_string = ""
if parameter['constant']:
output_string += 'const '
param_type = mapping.get(parameter['type'])
if param_type is None:
raise Exception(parameter['name'] + " has an invalid context " + parameter['context'])
output_string += param_type
output_string += "lpcfg_%s" % parameter['function']
if parameter['type'] == 'string' and not parameter['constant']:
if parameter['context'] == 'G':
output_string += '(struct loadparm_context *, TALLOC_CTX *ctx);\n'
elif parameter['context'] == 'S':
output_string += '(struct loadparm_service *, struct loadparm_service *, TALLOC_CTX *ctx);\n'
else:
raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
else:
if parameter['context'] == 'G':
output_string += '(struct loadparm_context *);\n'
elif parameter['context'] == 'S':
output_string += '(struct loadparm_service *, struct loadparm_service *);\n'
else:
raise Exception(parameter['name'] + " has an invalid param type " + parameter['type'])
file_out.write(output_string)
finally:
file_out.close()
def get_header(path):
header = os.path.basename(path).upper()
header = header.replace(".", "_").replace("\\", "_").replace("-", "_")
return "__%s__" % header
def make_param_defs(path_in, path_out, scope):
file_out = open(path_out, 'w')
try:
file_out.write('/* This file was automatically generated by generate_param.py. DO NOT EDIT */\n\n')
header = get_header(path_out)
file_out.write("#ifndef %s\n" % header)
file_out.write("#define %s\n\n" % header)
if scope == "GLOBAL":
file_out.write("/**\n")
file_out.write(" * This structure describes global (ie., server-wide) parameters.\n")
file_out.write(" */\n")
file_out.write("struct loadparm_global \n")
file_out.write("{\n")
file_out.write("\tTALLOC_CTX *ctx; /* Context for talloced members */\n")
file_out.write("\tchar * dnsdomain;\n")
elif scope == "LOCAL":
file_out.write("/**\n")
file_out.write(" * This structure describes a single service.\n")
file_out.write(" */\n")
file_out.write("struct loadparm_service \n")
file_out.write("{\n")
file_out.write("\tbool autoloaded;\n")
for parameter in iterate_all(path_in):
# filter out parameteric options
if ':' in parameter['name']:
continue
if (scope == "GLOBAL" and parameter['context'] != "G" or
scope == "LOCAL" and parameter['context'] != "S"):
continue
output_string = "\t"
param_type = mapping.get(parameter['type'])
if param_type is None:
raise Exception(parameter['name'] + " has an invalid context " + parameter['context'])
output_string += param_type
output_string += " %s;\n" % parameter['function']
file_out.write(output_string)
file_out.write("LOADPARM_EXTRA_%sS\n" % scope)
file_out.write("};\n")
file_out.write("\n#endif /* %s */\n\n" % header)
finally:
file_out.close()
if options.mode == 'FUNCTIONS':
generate_functions(options.filename, options.output)
elif options.mode == 'S3PROTO':
make_s3_param_proto(options.filename, options.output)
elif options.mode == 'LIBPROTO':
make_lib_proto(options.filename, options.output)
elif options.mode == 'PARAMDEFS':
make_param_defs(options.filename, options.output, options.scope)
|
gpl-3.0
|
edmorley/django
|
tests/gis_tests/test_gis_tests_utils.py
|
67
|
1596
|
from django.db import connection, models
from django.db.models.expressions import Func
from django.test import SimpleTestCase
from .utils import FuncTestMixin
def test_mutation(raises=True):
def wrapper(mutation_func):
def test(test_case_instance, *args, **kwargs):
class TestFunc(Func):
output_field = models.IntegerField()
def __init__(self):
self.attribute = 'initial'
super().__init__('initial', ['initial'])
def as_sql(self, *args, **kwargs):
mutation_func(self)
return '', ()
if raises:
msg = 'TestFunc Func was mutated during compilation.'
with test_case_instance.assertRaisesMessage(AssertionError, msg):
getattr(TestFunc(), 'as_' + connection.vendor)(None, None)
else:
getattr(TestFunc(), 'as_' + connection.vendor)(None, None)
return test
return wrapper
class FuncTestMixinTests(FuncTestMixin, SimpleTestCase):
@test_mutation()
def test_mutated_attribute(func):
func.attribute = 'mutated'
@test_mutation()
def test_mutated_expressions(func):
func.source_expressions.clear()
@test_mutation()
def test_mutated_expression(func):
func.source_expressions[0].name = 'mutated'
@test_mutation()
def test_mutated_expression_deep(func):
func.source_expressions[1].value[0] = 'mutated'
@test_mutation(raises=False)
def test_not_mutated(func):
pass
|
bsd-3-clause
|
pf4d/dolfin-adjoint
|
dolfin_adjoint/optimization/optimization.py
|
1
|
11701
|
import numpy as np
import dolfin
from ..reduced_functional_numpy import ReducedFunctionalNumPy, get_global
from ..reduced_functional import ReducedFunctional
from ..utils import gather
from ..misc import rank
def serialise_bounds(rf_np, bounds):
''' Converts bounds to an array of (min, max) tuples and serialises it in a parallel environment. '''
if len(np.array(bounds).shape) == 1:
bounds = np.array([[b] for b in bounds])
if len(bounds) != 2:
raise ValueError, "The 'bounds' parameter must be of the form [lower_bound, upper_bound] for one parameter or [ [lower_bound1, lower_bound2, ...], [upper_bound1, upper_bound2, ...] ] for multiple parameters."
bounds_arr = [[], []]
for i in range(2):
for j in range(len(bounds[i])):
bound = bounds[i][j]
if type(bound) in [int, float, np.int32, np.int64, np.float32, np.float64]:
bound_len = len(get_global(rf_np.controls[j].data()))
const_bound = bound*np.ones(bound_len)
bounds_arr[i] += const_bound.tolist()
else:
bounds_arr[i] += rf_np.obj_to_array(bound).tolist()
# Transpose and return the array to get the form [ [lower_bound1, upper_bound1], [lower_bound2, upper_bound2], ... ]
return np.array(bounds_arr).T
def minimize_scipy_generic(rf_np, method, bounds = None, **kwargs):
''' Interface to the generic minimize method in scipy '''
try:
from scipy.optimize import minimize as scipy_minimize
except ImportError:
print "**************** Deprecated warning *****************"
print "You have an unusable installation of scipy. This version is not supported by dolfin-adjoint."
try:
import scipy
print "Version: %s\tFile: %s" % (scipy.__version__, scipy.__file__)
except:
pass
raise
if method in ["Newton-CG"]:
forget = None
else:
forget = False
project = kwargs.pop("project", False)
m = [p.data() for p in rf_np.controls]
m_global = rf_np.obj_to_array(m)
J = rf_np.__call__
dJ = lambda m: rf_np.derivative(m, taylor_test=dolfin.parameters["optimization"]["test_gradient"],
seed=dolfin.parameters["optimization"]["test_gradient_seed"],
forget=forget,
project=project)
H = rf_np.hessian
if not "options" in kwargs:
kwargs["options"] = {}
if rank() != 0:
# Shut up all processors except the first one.
kwargs["options"]["disp"] = False
else:
# Print out progress information by default
if not "disp" in kwargs["options"]:
kwargs["options"]["disp"] = True
# Make the default SLSLQP options more verbose
if method == "SLSQP" and "iprint" not in kwargs["options"]:
kwargs["options"]["iprint"] = 2
# For gradient-based methods add the derivative function to the argument list
if method not in ["COBYLA", "Nelder-Mead", "Anneal", "Powell"]:
kwargs["jac"] = dJ
# For Hessian-based methods add the Hessian action function to the argument list
if method in ["Newton-CG"]:
kwargs["hessp"] = H
if "constraints" in kwargs:
from constraints import canonicalise, InequalityConstraint, EqualityConstraint
constraints = canonicalise(kwargs["constraints"])
scipy_c = []
for c in constraints:
if isinstance(c, InequalityConstraint):
typestr = "ineq"
elif isinstance(c, EqualityConstraint):
typestr = "eq"
else:
raise Exception, "Unknown constraint class"
def jac(x):
out = c.jacobian(x)
return [gather(y) for y in out]
scipy_c.append(dict(type=typestr, fun=c.function, jac=jac))
kwargs["constraints"] = scipy_c
if method=="basinhopping":
try:
from scipy.optimize import basinhopping
except ImportError:
print "**************** Outdated scipy version warning *****************"
print "The basin hopping optimisation algorithm requires scipy >= 0.12."
raise ImportError
del kwargs["options"]
del kwargs["jac"]
kwargs["minimizer_kwargs"]["jac"]=dJ
if "bounds" in kwargs["minimizer_kwargs"]:
kwargs["minimizer_kwargs"]["bounds"] = \
serialise_bounds(rf_np, kwargs["minimizer_kwargs"]["bounds"])
res = basinhopping(J, m_global, **kwargs)
elif bounds != None:
bounds = serialise_bounds(rf_np, bounds)
res = scipy_minimize(J, m_global, method=method, bounds=bounds, **kwargs)
else:
res = scipy_minimize(J, m_global, method=method, **kwargs)
rf_np.set_controls(np.array(res["x"]))
m = [p.data() for p in rf_np.controls]
return m,res
def minimize_custom(rf_np, bounds=None, **kwargs):
''' Interface to the user-provided minimisation method '''
try:
algo = kwargs["algorithm"]
del kwargs["algorithm"]
except KeyError:
raise KeyError, 'When using a "Custom" optimisation method, you must pass the optimisation function as the "algorithm" parameter. Make sure that this function accepts the same arguments as scipy.optimize.minimize.'
m = [p.data() for p in rf_np.controls]
m_global = rf_np.obj_to_array(m)
J = rf_np.__call__
dJ = lambda m: rf_np.derivative(m, taylor_test=dolfin.parameters["optimization"]["test_gradient"],
seed=dolfin.parameters["optimization"]["test_gradient_seed"],
forget=None)
H = rf_np.hessian
if bounds != None:
bounds = serialise_bounds(rf_np, bounds)
res = algo(J, m_global, dJ, H, bounds, **kwargs)
try:
rf_np.set_controls(np.array(res))
except Exception as e:
raise e, "Failed to update the optimised control values. Are you sure your custom optimisation algorithm returns an array containing the optimised values?"
m = [p.data() for p in rf_np.controls]
return m
optimization_algorithms_dict = {'L-BFGS-B': ('The L-BFGS-B implementation in scipy.', minimize_scipy_generic),
'SLSQP': ('The SLSQP implementation in scipy.', minimize_scipy_generic),
'TNC': ('The truncated Newton algorithm implemented in scipy.', minimize_scipy_generic),
'CG': ('The nonlinear conjugate gradient algorithm implemented in scipy.', minimize_scipy_generic),
'BFGS': ('The BFGS implementation in scipy.', minimize_scipy_generic),
'Nelder-Mead': ('Gradient-free Simplex algorithm.', minimize_scipy_generic),
'Powell': ('Gradient-free Powells method', minimize_scipy_generic),
'Newton-CG': ('Newton-CG method', minimize_scipy_generic),
'Anneal': ('Gradient-free simulated annealing', minimize_scipy_generic),
'basinhopping': ('Global basin hopping method', minimize_scipy_generic),
'COBYLA': ('Gradient-free constrained optimization by linear approxition method', minimize_scipy_generic),
'Custom': ('User-provided optimization algorithm', minimize_custom)
}
def print_optimization_methods():
''' Prints the available optimization methods '''
print 'Available optimization methods:'
for function_name, (description, func) in optimization_algorithms_dict.iteritems():
print function_name, ': ', description
def minimize(rf, method='L-BFGS-B', scale=1.0, **kwargs):
''' Solves the minimisation problem with PDE constraint:
min_m func(u, m)
s.t.
e(u, m) = 0
lb <= m <= ub
g(m) <= u
where m is the control variable, u is the solution of the PDE system e(u, m) = 0, func is the functional of interest and lb, ub and g(m) constraints the control variables.
The optimization problem is solved using a gradient based optimization algorithm and the functional gradients are computed by solving the associated adjoint system.
The function arguments are as follows:
* 'rf' must be a ReducedFunctional object.
* 'method' specifies the optimization method to be used to solve the problem. The available methods can be listed with the print_optimization_methods function.
* 'scale' is a factor to scale to problem (default: 1.0).
* 'bounds' is an optional keyword parameter to support control constraints: bounds = (lb, ub). lb and ub must be of the same type than the parameters m.
Additional arguments specific for the optimization algorithms can be added to the minimize functions (e.g. iprint = 2). These arguments will be passed to the underlying optimization algorithm. For detailed information about which arguments are supported for each optimization algorithm, please refer to the documentaton of the optimization algorithm.
'''
rf.scale = scale
if isinstance(rf, ReducedFunctionalNumPy):
rf_np = rf
elif isinstance(rf, ReducedFunctional):
rf_np = ReducedFunctionalNumPy(rf)
else:
# Assume the user knows what he is doing - he might for example written
# his own reduced functional class.
rf_np = rf
try:
algorithm = optimization_algorithms_dict[method][1]
except KeyError:
raise KeyError, 'Unknown optimization method ' + method + '. Use print_optimization_methods() to get a list of the available methods.'
if algorithm == minimize_scipy_generic:
# For scipy's generic inteface we need to pass the optimisation method as a parameter.
kwargs["method"] = method
opt,res = algorithm(rf_np, **kwargs)
if len(opt) == 1:
return opt[0], res
else:
return opt
def maximize(rf, method='L-BFGS-B', scale=1.0, **kwargs):
''' Solves the maximisation problem with PDE constraint:
max_m func(u, m)
s.t.
e(u, m) = 0
lb <= m <= ub
g(m) <= u
where m is the control variable, u is the solution of the PDE system e(u, m) = 0, func is the functional of interest and lb, ub and g(m) constraints the control variables.
The optimization problem is solved using a gradient based optimization algorithm and the functional gradients are computed by solving the associated adjoint system.
The function arguments are as follows:
* 'rf' must be a ReducedFunctional object.
* 'method' specifies the optimization method to be used to solve the problem. The available methods can be listed with the print_optimization_methods function.
* 'scale' is a factor to scale to problem (default: 1.0).
* 'bounds' is an optional keyword parameter to support control constraints: bounds = (lb, ub). lb and ub must be of the same type than the parameters m.
Additional arguments specific for the optimization methods can be added to the minimize functions (e.g. iprint = 2). These arguments will be passed to the underlying optimization method. For detailed information about which arguments are supported for each optimization method, please refer to the documentaton of the optimization algorithm.
'''
return minimize(rf, method, scale=-scale, **kwargs)
minimise = minimize
maximise = maximize
|
lgpl-3.0
|
andr-pash/timestamp-fcc
|
node_modules/nodemon/travis_after_all.py
|
76
|
4308
|
import os
import sys
import json
import time
import logging
try:
import urllib.request as urllib2
except ImportError:
import urllib2
log = logging.getLogger("travis.leader")
log.addHandler(logging.StreamHandler())
log.setLevel(logging.INFO)
TRAVIS_JOB_NUMBER = 'TRAVIS_JOB_NUMBER'
TRAVIS_BUILD_ID = 'TRAVIS_BUILD_ID'
POLLING_INTERVAL = 'LEADER_POLLING_INTERVAL'
GITHUB_TOKEN = 'GITHUB_TOKEN'
# Travis API entry point, there are at least https://api.travis-ci.com and https://api.travis-ci.org
travis_entry = sys.argv[1] if len(sys.argv) > 1 else 'https://api.travis-ci.org'
build_id = os.getenv(TRAVIS_BUILD_ID)
polling_interval = int(os.getenv(POLLING_INTERVAL, '5'))
gh_token = os.getenv(GITHUB_TOKEN)
# assume, first job is the leader
is_leader = lambda job_number: job_number.endswith('.1')
job_number = os.getenv(TRAVIS_JOB_NUMBER)
if not job_number:
# seems even for builds with only one job, this won't get here
log.fatal("Don't use defining leader for build without matrix")
exit(1)
elif is_leader(job_number):
log.info("This is a leader")
else:
# since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_MINION=YES")
log.info("This is a minion")
exit(0)
class MatrixElement(object):
def __init__(self, json_raw):
self.is_finished = json_raw['finished_at'] is not None
self.is_succeeded = json_raw['result'] == 0
self.number = json_raw['number']
self.is_leader = is_leader(self.number)
def matrix_snapshot(travis_token):
"""
:return: Matrix List
"""
headers = {'content-type': 'application/json', 'Authorization': 'token {}'.format(travis_token)}
req = urllib2.Request("{0}/builds/{1}".format(travis_entry, build_id), headers=headers)
response = urllib2.urlopen(req).read()
raw_json = json.loads(response)
matrix_without_leader = [MatrixElement(job) for job in raw_json["matrix"] if not is_leader(job['number'])]
return matrix_without_leader
def wait_others_to_finish(travis_token):
def others_finished():
"""
Dumps others to finish
Leader cannot finish, it is working now
:return: tuple(True or False, List of not finished jobs)
"""
snapshot = matrix_snapshot(travis_token)
finished = [job.is_finished for job in snapshot if not job.is_leader]
return reduce(lambda a, b: a and b, finished), [job.number for job in snapshot if
not job.is_leader and not job.is_finished]
while True:
finished, waiting_list = others_finished()
if finished:
break
log.info("Leader waits for minions {0}...".format(waiting_list)) # just in case do not get "silence timeout"
time.sleep(polling_interval)
def get_token():
assert gh_token, 'GITHUB_TOKEN is not set'
data = {"github_token": gh_token}
headers = {'content-type': 'application/json'}
req = urllib2.Request("{0}/auth/github".format(travis_entry), json.dumps(data), headers)
response = urllib2.urlopen(req).read()
travis_token = json.loads(response).get('access_token')
return travis_token
try:
token = get_token()
wait_others_to_finish(token)
final_snapshot = matrix_snapshot(token)
log.info("Final Results: {0}".format([(e.number, e.is_succeeded) for e in final_snapshot]))
BUILD_AGGREGATE_STATUS = 'BUILD_AGGREGATE_STATUS'
others_snapshot = [el for el in final_snapshot if not el.is_leader]
if reduce(lambda a, b: a and b, [e.is_succeeded for e in others_snapshot]):
os.environ[BUILD_AGGREGATE_STATUS] = "others_succeeded"
elif reduce(lambda a, b: a and b, [not e.is_succeeded for e in others_snapshot]):
log.error("Others Failed")
os.environ[BUILD_AGGREGATE_STATUS] = "others_failed"
else:
log.warn("Others Unknown")
os.environ[BUILD_AGGREGATE_STATUS] = "unknown"
# since python is subprocess, env variables are exported back via file
with open(".to_export_back", "w") as export_var:
export_var.write("BUILD_LEADER=YES {0}={1}".format(BUILD_AGGREGATE_STATUS, os.environ[BUILD_AGGREGATE_STATUS]))
except Exception as e:
log.fatal(e)
|
mit
|
franek/weboob
|
modules/redmine/browser.py
|
2
|
7988
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from urlparse import urlsplit
import urllib
import lxml.html
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword
from .pages.index import LoginPage, IndexPage, MyPage, ProjectsPage
from .pages.wiki import WikiPage, WikiEditPage
from .pages.issues import IssuesPage, IssuePage, NewIssuePage, IssueLogTimePage, \
IssueTimeEntriesPage
__all__ = ['RedmineBrowser']
# Browser
class RedmineBrowser(BaseBrowser):
ENCODING = 'utf-8'
PAGES = {
'https?://[^/]+/': IndexPage,
'https?://[^/]+/login': LoginPage,
# compatibility with redmine 0.9
'https?://[^/]+/login\?back_url.*': MyPage,
'https?://[^/]+/my/page': MyPage,
'https?://[^/]+/projects': ProjectsPage,
'https?://[^/]+/projects/([\w-]+)/wiki/([^\/]+)/edit(?:\?version=\d+)?': WikiEditPage,
'https?://[^/]+/projects/[\w-]+/wiki/[^\/]*': WikiPage,
'https?://[^/]+/projects/[\w-]+/issues/new': NewIssuePage,
'https?://[^/]+/projects/[\w-]+/issues': IssuesPage,
'https?://[^/]+/issues(|/?\?.*)': IssuesPage,
'https?://[^/]+/issues/(\d+)': IssuePage,
'https?://[^/]+/issues/(\d+)/time_entries/new': IssueLogTimePage,
'https?://[^/]+/projects/[\w-]+/time_entries': IssueTimeEntriesPage,
}
def __init__(self, url, *args, **kwargs):
self._userid = 0
v = urlsplit(url)
self.PROTOCOL = v.scheme
self.DOMAIN = v.netloc
self.BASEPATH = v.path
if self.BASEPATH.endswith('/'):
self.BASEPATH = self.BASEPATH[:-1]
BaseBrowser.__init__(self, *args, **kwargs)
self.projects = {}
def is_logged(self):
return self.is_on_page(LoginPage) or self.page and len(self.page.document.getroot().cssselect('a.my-account')) == 1
def login(self):
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
if not self.is_on_page(LoginPage):
self.location('%s/login' % self.BASEPATH, no_login=True)
self.page.login(self.username, self.password)
if self.is_on_page(LoginPage):
raise BrowserIncorrectPassword()
divs = self.page.document.getroot().cssselect('div#loggedas')
if len(divs) > 0:
parts = divs[0].find('a').attrib['href'].split('/')
self._userid = int(parts[2])
def get_userid(self):
return self._userid
def get_wiki_source(self, project, page, version=None):
url = '%s/projects/%s/wiki/%s/edit' % (self.BASEPATH, project, urllib.quote(page.encode('utf-8')))
if version:
url += '?version=%s' % version
self.location(url)
return self.page.get_source()
def set_wiki_source(self, project, page, data, message):
self.location('%s/projects/%s/wiki/%s/edit' % (self.BASEPATH, project, urllib.quote(page.encode('utf-8'))))
self.page.set_source(data, message)
def get_wiki_preview(self, project, page, data):
if (not self.is_on_page(WikiEditPage) or self.page.groups[0] != project
or self.page.groups[1] != page):
self.location('%s/projects/%s/wiki/%s/edit' % (self.BASEPATH,
project, urllib.quote(page.encode('utf-8'))))
url = '%s/projects/%s/wiki/%s/preview' % (self.BASEPATH, project, urllib.quote(page.encode('utf-8')))
params = {}
params['content[text]'] = data.encode('utf-8')
params['authenticity_token'] = "%s" % self.page.get_authenticity_token()
preview_html = lxml.html.fragment_fromstring(self.readurl(url,
urllib.urlencode(params)),
create_parent='div')
preview_html.find("fieldset").drop_tag()
preview_html.find("legend").drop_tree()
return lxml.html.tostring(preview_html)
def query_issues(self, project_name, **kwargs):
self.location('/projects/%s/issues' % project_name)
token = self.page.get_authenticity_token()
data = (('project_id', project_name),
('query[column_names][]', 'tracker'),
('authenticity_token', token),
('query[column_names][]', 'status'),
('query[column_names][]', 'priority'),
('query[column_names][]', 'subject'),
('query[column_names][]', 'assigned_to'),
('query[column_names][]', 'updated_on'),
('query[column_names][]', 'category'),
('query[column_names][]', 'fixed_version'),
('query[column_names][]', 'done_ratio'),
('query[column_names][]', 'author'),
('query[column_names][]', 'start_date'),
('query[column_names][]', 'due_date'),
('query[column_names][]', 'estimated_hours'),
('query[column_names][]', 'created_on'),
)
for key, value in kwargs.iteritems():
if value:
data += (('values[%s][]' % key, value),)
data += (('fields[]', key),)
data += (('operators[%s]' % key, '~'),)
self.location('/issues?set_filter=1&per_page=100', urllib.urlencode(data))
assert self.is_on_page(IssuesPage)
return {'project': self.page.get_project(project_name),
'iter': self.page.iter_issues(),
}
def get_issue(self, id):
self.location('/issues/%s' % id)
assert self.is_on_page(IssuePage)
return self.page.get_params()
def logtime_issue(self, id, hours, message):
self.location('/issues/%s/time_entries/new' % id)
assert self.is_on_page(IssueLogTimePage)
self.page.logtime(hours.seconds/3600, message)
def comment_issue(self, id, message):
self.location('/issues/%s' % id)
assert self.is_on_page(IssuePage)
self.page.fill_form(note=message)
def create_issue(self, project, **kwargs):
self.location('/projects/%s/issues/new' % project)
assert self.is_on_page(NewIssuePage)
self.page.fill_form(**kwargs)
assert self.is_on_page(IssuePage)
return int(self.page.groups[0])
def edit_issue(self, id, **kwargs):
self.location('/issues/%s' % id)
assert self.is_on_page(IssuePage)
self.page.fill_form(**kwargs)
assert self.is_on_page(IssuePage)
return int(self.page.groups[0])
def remove_issue(self, id):
self.location('/issues/%s' % id)
assert self.is_on_page(IssuePage)
token = self.page.get_authenticity_token()
data = (('authenticity_token', token),)
self.openurl('/issues/%s/destroy' % id, urllib.urlencode(data))
def iter_projects(self):
self.location('/projects')
return self.page.iter_projects()
|
agpl-3.0
|
danieljjh/oppia
|
core/controllers/feedback_test.py
|
30
|
13790
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the feedback controllers."""
__author__ = 'Sean Lip'
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.tests import test_utils
import feconf
EXPECTED_THREAD_KEYS = [
'status', 'original_author_username', 'state_name', 'summary',
'thread_id', 'subject', 'last_updated']
EXPECTED_MESSAGE_KEYS = [
'author_username', 'created_on', 'exploration_id', 'message_id',
'text', 'updated_status', 'updated_subject']
class FeedbackThreadPermissionsTests(test_utils.GenericTestBase):
EXP_ID = '0'
def setUp(self):
super(FeedbackThreadPermissionsTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
# Load exploration 0.
exp_services.delete_demo(self.EXP_ID)
exp_services.load_demo(self.EXP_ID)
# Get the CSRF token and create a single thread with a single message.
# The corresponding user has already registered as an editor, and has a
# username.
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/%s' % self.EXP_ID)
self.csrf_token = self.get_csrf_token_from_response(response)
self.post_json('%s/%s' % (
feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID),
{
'state_name': self._get_unicode_test_string('statename'),
'subject': self._get_unicode_test_string('subject'),
'text': self._get_unicode_test_string('text'),
}, self.csrf_token)
self.logout()
def test_invalid_exploration_ids_return_empty_threadlist(self):
response_dict = self.get_json(
'%s/bad_exp_id' % feconf.FEEDBACK_THREADLIST_URL_PREFIX)
self.assertEqual(response_dict['threads'], [])
def test_invalid_thread_ids_return_empty_message_list(self):
response_dict = self.get_json(
'%s/%s/bad_thread_id' % (
feconf.FEEDBACK_THREAD_URL_PREFIX, self.EXP_ID))
self.assertEqual(response_dict['messages'], [])
def test_non_logged_in_users_can_view_threads_and_messages(self):
# Non-logged-in users can see the thread list.
response_dict = self.get_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID))
self.assertEqual(len(response_dict['threads']), 1)
self.assertDictContainsSubset({
'status': 'open',
'state_name': self._get_unicode_test_string('statename'),
}, response_dict['threads'][0])
# Non-logged-in users can see individual messages.
first_thread_id = response_dict['threads'][0]['thread_id']
thread_url = '%s/%s/%s' % (
feconf.FEEDBACK_THREAD_URL_PREFIX, self.EXP_ID, first_thread_id)
response_dict = self.get_json(thread_url)
self.assertEqual(len(response_dict['messages']), 1)
self.assertDictContainsSubset({
'updated_status': 'open',
'updated_subject': self._get_unicode_test_string('subject'),
'text': self._get_unicode_test_string('text'),
}, response_dict['messages'][0])
def test_non_logged_in_users_cannot_create_threads_and_messages(self):
self.post_json('%s/%s' % (
feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID),
{
'state_name': 'Welcome!',
'subject': self.UNICODE_TEST_STRING,
'text': self.UNICODE_TEST_STRING,
}, self.csrf_token, expect_errors=True, expected_status_int=401)
thread_url = '%s/%s/%s' % (
feconf.FEEDBACK_THREAD_URL_PREFIX, self.EXP_ID, 'dummy_thread_id')
self.post_json(thread_url, {
'exploration_id': '0',
'text': self.UNICODE_TEST_STRING,
}, self.csrf_token, expect_errors=True, expected_status_int=401)
class FeedbackThreadIntegrationTests(test_utils.GenericTestBase):
EXP_ID = '0'
def setUp(self):
super(FeedbackThreadIntegrationTests, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
# Load exploration 0.
exp_services.delete_demo(self.EXP_ID)
exp_services.load_demo(self.EXP_ID)
def test_create_thread(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID), {
'state_name': None,
'subject': u'New Thread ¡unicode!',
'text': u'Thread Text ¡unicode!',
}, csrf_token)
self.logout()
response_dict = self.get_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID))
threadlist = response_dict['threads']
self.assertEqual(len(threadlist), 1)
self.assertEqual(
set(threadlist[0].keys()), set(EXPECTED_THREAD_KEYS))
self.assertDictContainsSubset({
'status': 'open',
'original_author_username': self.EDITOR_USERNAME,
'state_name': None,
'subject': u'New Thread ¡unicode!',
}, threadlist[0])
thread_url = '%s/%s/%s' % (
feconf.FEEDBACK_THREAD_URL_PREFIX,
self.EXP_ID,
threadlist[0]['thread_id'])
response_dict = self.get_json(thread_url)
self.assertEqual(len(response_dict['messages']), 1)
self.assertDictContainsSubset({
'updated_status': 'open',
'updated_subject': u'New Thread ¡unicode!',
'text': u'Thread Text ¡unicode!',
}, response_dict['messages'][0])
def test_missing_thread_subject_raises_400_error(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID), {
'state_name': None,
'text': u'Thread Text ¡unicode!',
}, csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(
response_dict['error'], 'A thread subject must be specified.')
self.logout()
def test_missing_thread_text_raises_400_error(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID),
{
'state_name': None,
'subject': u'New Thread ¡unicode!',
}, csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(
response_dict['error'],
'Text for the first message in the thread must be specified.')
self.logout()
def test_post_message_to_existing_thread(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
# First, create a thread.
self.post_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID), {
'state_name': None,
'subject': u'New Thread ¡unicode!',
'text': u'Message 0 ¡unicode!',
}, csrf_token)
# Then, get the thread id.
response_dict = self.get_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID))
threadlist = response_dict['threads']
self.assertEqual(len(threadlist), 1)
thread_id = threadlist[0]['thread_id']
# Then, create a new message in that thread.
thread_url = '%s/%s/%s' % (
feconf.FEEDBACK_THREAD_URL_PREFIX, self.EXP_ID, thread_id)
self.post_json(thread_url, {
'updated_status': None,
'updated_subject': None,
'text': 'Message 1'
}, csrf_token)
# The resulting thread should contain two messages.
response_dict = self.get_json(thread_url)
self.assertEqual(len(response_dict['messages']), 2)
self.assertEqual(
set(response_dict['messages'][0].keys()),
set(EXPECTED_MESSAGE_KEYS))
self.assertDictContainsSubset({
'author_username': self.EDITOR_USERNAME,
'exploration_id': self.EXP_ID,
'message_id': 0,
'updated_status': 'open',
'updated_subject': u'New Thread ¡unicode!',
'text': u'Message 0 ¡unicode!',
}, response_dict['messages'][0])
self.assertDictContainsSubset({
'author_username': self.EDITOR_USERNAME,
'exploration_id': self.EXP_ID,
'message_id': 1,
'updated_status': None,
'updated_subject': None,
'text': u'Message 1',
}, response_dict['messages'][1])
self.logout()
def test_no_username_shown_for_logged_out_learners(self):
NEW_EXP_ID = 'new_eid'
exploration = exp_domain.Exploration.create_default_exploration(
NEW_EXP_ID, 'A title', 'A category')
exp_services.save_new_exploration(self.EDITOR_ID, exploration)
rights_manager.publish_exploration(self.EDITOR_ID, NEW_EXP_ID)
response = self.testapp.get('/create/%s' % NEW_EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
'/explorehandler/give_feedback/%s' % NEW_EXP_ID,
{
'state_name': None,
'subject': 'Test thread',
'feedback': 'Test thread text',
'include_author': False,
}, csrf_token)
response_dict = self.get_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, NEW_EXP_ID))
threadlist = response_dict['threads']
self.assertIsNone(threadlist[0]['original_author_username'])
response_dict = self.get_json('%s/%s/%s' % (
feconf.FEEDBACK_THREAD_URL_PREFIX, NEW_EXP_ID,
threadlist[0]['thread_id']))
self.assertIsNone(response_dict['messages'][0]['author_username'])
def test_message_id_assignment_for_multiple_posts_to_same_thread(self):
# Create a thread for others to post to.
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/create/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID), {
'state_name': None,
'subject': u'New Thread ¡unicode!',
'text': 'Message 0',
}, csrf_token)
self.logout()
# Get the thread id.
response_dict = self.get_json(
'%s/%s' % (feconf.FEEDBACK_THREADLIST_URL_PREFIX, self.EXP_ID))
thread_id = response_dict['threads'][0]['thread_id']
thread_url = '%s/%s/%s' % (
feconf.FEEDBACK_THREAD_URL_PREFIX, self.EXP_ID, thread_id)
def _get_username(n):
return 'editor%s' % n
def _get_email(n):
return '%[email protected]' % n
# Generate 10 users.
NUM_USERS = 10
for num in range(NUM_USERS):
username = _get_username(num)
email = _get_email(num)
self.signup(email, username)
# Each of these users posts a new message to the same thread.
for num in range(NUM_USERS):
self.login(_get_email(num))
response = self.testapp.get('/create/%s' % self.EXP_ID)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(thread_url, {
'text': 'New Message %s' % num
}, csrf_token)
self.logout()
# Get the message list.
response_dict = self.get_json(thread_url)
self.assertEqual(len(response_dict['messages']), NUM_USERS + 1)
# The resulting message list is not sorted. It needs to be sorted
# by message id.
response_dict['messages'] = sorted(
response_dict['messages'], key=lambda x: x['message_id'])
self.assertEqual(
response_dict['messages'][0]['author_username'],
self.EDITOR_USERNAME)
self.assertEqual(response_dict['messages'][0]['message_id'], 0)
self.assertEqual(response_dict['messages'][0]['text'], 'Message 0')
for num in range(NUM_USERS):
self.assertEqual(
response_dict['messages'][num + 1]['author_username'],
_get_username(num))
self.assertEqual(
response_dict['messages'][num + 1]['message_id'], num + 1)
self.assertEqual(
response_dict['messages'][num + 1]['text'],
'New Message %s' % num)
|
apache-2.0
|
yetanotherindie/jMonkey-Engine
|
sdk/jme3-blender/src/com/jme3/gde/blender/scripts/import_fbx.py
|
13
|
2424
|
# This script invokes blender to import and save external model formats as
# .blend files to be processed further.
#
# Example usage for this importer:
# blender --background --factory-startup --python $HOME/import_3ds.py -- \
# --i="/tmp/hello.3ds" \
# --o="/tmp/hello.blend" \
#
# See blender --help for details.
import bpy
# Imports a file using importer
def import_file(file_path):
# Import the model
bpy.ops.import_scene.fbx(filepath = file_path)
# Clear existing objects.
def clear_scene():
scene = bpy.context.scene
scene.camera = None
for obj in scene.objects:
scene.objects.unlink(obj)
# Save current scene as .blend file
def save_file(save_path):
# Check if output file exists already
try:
f = open(save_path, 'w')
f.close()
ok = True
except:
print("Cannot save to path %r" % save_path)
import traceback
traceback.print_exc()
# Save .blend file
if ok:
bpy.ops.wm.save_as_mainfile(filepath=save_path)
def main():
import sys # to get command line args
import argparse # to parse options for us and print a nice help message
# get the args passed to blender after "--", all of which are ignored by
# blender so scripts may receive their own arguments
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1:] # get all args after "--"
# When --help or no args are given, print this help
usage_text = \
"Run blender in background mode with this script:"
" blender --background --factory-startup --python " + __file__ + " -- [options]"
parser = argparse.ArgumentParser(description=usage_text)
# Possible types are: string, int, long, choice, float and complex.
parser.add_argument("-i", "--input", dest="file_path", metavar='FILE',
help="Import the specified file")
parser.add_argument("-o", "--output", dest="save_path", metavar='FILE',
help="Save the generated file to the specified path")
args = parser.parse_args(argv) # In this example we wont use the args
if not argv:
parser.print_help()
return
# Run the conversion
clear_scene()
import_file(args.file_path)
save_file(args.save_path)
print("batch job finished, exiting")
if __name__ == "__main__":
main()
|
bsd-3-clause
|
MathieuDuponchelle/gobject-introspection
|
giscanner/gdumpparser.py
|
4
|
21125
|
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008 Johan Dahlin
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
import os
import sys
import tempfile
import shutil
import subprocess
from xml.etree.cElementTree import parse
from . import ast
from . import message
from . import utils
from .transformer import TransformerException
from .utils import to_underscores
# GParamFlags
G_PARAM_READABLE = 1 << 0
G_PARAM_WRITABLE = 1 << 1
G_PARAM_CONSTRUCT = 1 << 2
G_PARAM_CONSTRUCT_ONLY = 1 << 3
G_PARAM_LAX_VALIDATION = 1 << 4
G_PARAM_STATIC_NAME = 1 << 5
G_PARAM_STATIC_NICK = 1 << 6
G_PARAM_STATIC_BLURB = 1 << 7
class IntrospectionBinary(object):
def __init__(self, args, tmpdir=None):
self.args = args
if tmpdir is None:
self.tmpdir = tempfile.mkdtemp('', 'tmp-introspect')
else:
self.tmpdir = tmpdir
class Unresolved(object):
def __init__(self, target):
self.target = target
class UnknownTypeError(Exception):
pass
class GDumpParser(object):
def __init__(self, transformer):
self._transformer = transformer
self._namespace = transformer.namespace
self._binary = None
self._get_type_functions = []
self._error_quark_functions = []
self._error_domains = {}
self._boxed_types = {}
self._private_internal_types = {}
# Public API
def init_parse(self):
"""Do parsing steps that don't involve the introspection binary
This does enough work that get_type_functions() can be called.
"""
# First pass: parsing
for node in self._namespace.itervalues():
if isinstance(node, ast.Function):
self._initparse_function(node)
if self._namespace.name == 'GObject' or self._namespace.name == 'GLib':
for node in self._namespace.itervalues():
if isinstance(node, ast.Record):
self._initparse_gobject_record(node)
def get_get_type_functions(self):
return self._get_type_functions
def get_error_quark_functions(self):
return self._error_quark_functions
def set_introspection_binary(self, binary):
self._binary = binary
def parse(self):
"""Do remaining parsing steps requiring introspection binary"""
# Get all the GObject data by passing our list of get_type
# functions to the compiled binary, returning an XML blob.
tree = self._execute_binary_get_tree()
root = tree.getroot()
for child in root:
if child.tag == 'error-quark':
self._introspect_error_quark(child)
else:
self._introspect_type(child)
# Pair up boxed types and class records
for name, boxed in self._boxed_types.iteritems():
self._pair_boxed_type(boxed)
for node in self._namespace.itervalues():
if isinstance(node, (ast.Class, ast.Interface)):
self._find_class_record(node)
# Clear the _get_type functions out of the namespace;
# Anyone who wants them can get them from the ast.Class/Interface/Boxed
to_remove = []
for name, node in self._namespace.iteritems():
if isinstance(node, ast.Registered) and node.get_type is not None:
get_type_name = node.get_type
if get_type_name == 'intern':
continue
assert get_type_name, node
(ns, name) = self._transformer.split_csymbol(get_type_name)
assert ns is self._namespace
get_type_func = self._namespace.get(name)
assert get_type_func, name
to_remove.append(get_type_func)
for node in to_remove:
self._namespace.remove(node)
# Helper functions
def _execute_binary_get_tree(self):
"""Load the library (or executable), returning an XML
blob containing data gleaned from GObject's primitive introspection."""
in_path = os.path.join(self._binary.tmpdir, 'functions.txt')
f = open(in_path, 'w')
for func in self._get_type_functions:
f.write('get-type:')
f.write(func)
f.write('\n')
for func in self._error_quark_functions:
f.write('error-quark:')
f.write(func)
f.write('\n')
f.close()
out_path = os.path.join(self._binary.tmpdir, 'dump.xml')
args = []
args.extend(self._binary.args)
args.append('--introspect-dump=%s,%s' % (in_path, out_path))
# Invoke the binary, having written our get_type functions to types.txt
try:
try:
subprocess.check_call(args, stdout=sys.stdout, stderr=sys.stderr)
except subprocess.CalledProcessError as e:
# Clean up temporaries
raise SystemExit(e)
return parse(out_path)
finally:
if not utils.have_debug_flag('save-temps'):
shutil.rmtree(self._binary.tmpdir)
# Parser
def _initparse_function(self, func):
symbol = func.symbol
if symbol.startswith('_'):
return
elif (symbol.endswith('_get_type') or symbol.endswith('_get_gtype')):
self._initparse_get_type_function(func)
elif symbol.endswith('_error_quark'):
self._initparse_error_quark_function(func)
def _initparse_get_type_function(self, func):
if func.symbol == 'g_variant_get_gtype':
# We handle variants internally, see _initparse_gobject_record
return True
if func.is_type_meta_function():
self._get_type_functions.append(func.symbol)
return True
return False
def _initparse_error_quark_function(self, func):
if (func.retval.type.ctype != 'GQuark'):
return False
self._error_quark_functions.append(func.symbol)
return True
def _initparse_gobject_record(self, record):
if (record.name.startswith('ParamSpec')
and not record.name in ('ParamSpecPool', 'ParamSpecClass', 'ParamSpecTypeInfo')):
parent = None
if record.name != 'ParamSpec':
parent = ast.Type(target_giname='GObject.ParamSpec')
prefix = to_underscores(record.name).lower()
node = ast.Class(record.name, parent,
ctype=record.ctype,
# GParamSpecXxx has g_type_name 'GParamXxx'
gtype_name=record.ctype.replace('Spec', ''),
get_type='intern',
c_symbol_prefix=prefix)
node.fundamental = True
if record.name == 'ParamSpec':
node.is_abstract = True
self._add_record_fields(node)
self._namespace.append(node, replace=True)
elif record.name == 'Variant':
self._boxed_types['GVariant'] = ast.Boxed('Variant',
gtype_name='GVariant',
get_type='intern',
c_symbol_prefix='variant')
elif record.name == 'InitiallyUnownedClass':
record.fields = self._namespace.get('ObjectClass').fields
record.disguised = False
# Introspection over the data we get from the dynamic
# GObject/GType system out of the binary
def _introspect_type(self, xmlnode):
if xmlnode.tag in ('enum', 'flags'):
self._introspect_enum(xmlnode)
elif xmlnode.tag == 'class':
self._introspect_object(xmlnode)
elif xmlnode.tag == 'interface':
self._introspect_interface(xmlnode)
elif xmlnode.tag == 'boxed':
self._introspect_boxed(xmlnode)
elif xmlnode.tag == 'fundamental':
self._introspect_fundamental(xmlnode)
else:
raise ValueError("Unhandled introspection XML tag %s", xmlnode.tag)
def _introspect_enum(self, xmlnode):
type_name = xmlnode.attrib['name']
(get_type, c_symbol_prefix) = self._split_type_and_symbol_prefix(xmlnode)
try:
enum_name = self._transformer.strip_identifier(type_name)
except TransformerException as e:
message.fatal(e)
# The scanned member values are more accurate than the values that the
# we dumped from GEnumValue.value because GEnumValue.value has the
# values as a 32-bit signed integer, even if they were unsigned
# in the source code.
previous_values = {}
previous = self._namespace.get(enum_name)
if isinstance(previous, (ast.Enum, ast.Bitfield)):
for member in previous.members:
previous_values[member.name] = member.value
members = []
for member in xmlnode.findall('member'):
# Keep the name closer to what we'd take from C by default;
# see http://bugzilla.gnome.org/show_bug.cgi?id=575613
name = member.attrib['nick'].replace('-', '_')
if name in previous_values:
value = previous_values[name]
else:
value = member.attrib['value']
members.append(ast.Member(name,
value,
member.attrib['name'],
member.attrib['nick']))
if xmlnode.tag == 'flags':
klass = ast.Bitfield
else:
klass = ast.Enum
node = klass(enum_name, type_name,
gtype_name=type_name,
c_symbol_prefix=c_symbol_prefix,
members=members,
get_type=xmlnode.attrib['get-type'])
self._namespace.append(node, replace=True)
def _split_type_and_symbol_prefix(self, xmlnode):
"""Infer the C symbol prefix from the _get_type function."""
get_type = xmlnode.attrib['get-type']
(ns, name) = self._transformer.split_csymbol(get_type)
assert ns is self._namespace
if name in ('get_type', '_get_gtype'):
message.fatal("""The GObject name %r isn't compatible
with the configured identifier prefixes:
%r
The class would have no name. Most likely you want to specify a
different --identifier-prefix.""" % (xmlnode.attrib['name'], self._namespace.identifier_prefixes))
if name.endswith('_get_type'):
type_suffix = '_get_type'
else:
type_suffix = '_get_gtype'
return (get_type, name[:-len(type_suffix)])
def _introspect_object(self, xmlnode):
type_name = xmlnode.attrib['name']
is_abstract = bool(xmlnode.attrib.get('abstract', False))
(get_type, c_symbol_prefix) = self._split_type_and_symbol_prefix(xmlnode)
try:
object_name = self._transformer.strip_identifier(type_name)
except TransformerException as e:
message.fatal(e)
node = ast.Class(object_name, None,
gtype_name=type_name,
get_type=get_type,
c_symbol_prefix=c_symbol_prefix,
is_abstract=is_abstract)
self._parse_parents(xmlnode, node)
self._introspect_properties(node, xmlnode)
self._introspect_signals(node, xmlnode)
self._introspect_implemented_interfaces(node, xmlnode)
self._add_record_fields(node)
self._namespace.append(node, replace=True)
def _introspect_interface(self, xmlnode):
type_name = xmlnode.attrib['name']
(get_type, c_symbol_prefix) = self._split_type_and_symbol_prefix(xmlnode)
try:
interface_name = self._transformer.strip_identifier(type_name)
except TransformerException as e:
message.fatal(e)
node = ast.Interface(interface_name, None,
gtype_name=type_name,
get_type=get_type,
c_symbol_prefix=c_symbol_prefix)
self._introspect_properties(node, xmlnode)
self._introspect_signals(node, xmlnode)
for child in xmlnode.findall('prerequisite'):
name = child.attrib['name']
prereq = ast.Type.create_from_gtype_name(name)
node.prerequisites.append(prereq)
record = self._namespace.get(node.name)
if isinstance(record, ast.Record):
node.ctype = record.ctype
else:
message.warn_node(node, "Couldn't find associated structure for '%r'" % (node.name, ))
# GtkFileChooserEmbed is an example of a private interface, we
# just filter them out
if xmlnode.attrib['get-type'].startswith('_'):
self._private_internal_types[type_name] = node
else:
self._namespace.append(node, replace=True)
## WORKAROUND ##
# https://bugzilla.gnome.org/show_bug.cgi?id=550616
def _introspect_boxed_gstreamer_workaround(self, xmlnode):
node = ast.Boxed('ParamSpecMiniObject', gtype_name='GParamSpecMiniObject',
get_type='gst_param_spec_mini_object_get_type',
c_symbol_prefix='param_spec_mini_object')
self._boxed_types[node.gtype_name] = node
def _introspect_boxed(self, xmlnode):
type_name = xmlnode.attrib['name']
# Work around GStreamer legacy naming issue
# https://bugzilla.gnome.org/show_bug.cgi?id=550616
if type_name == 'GParamSpecMiniObject':
self._introspect_boxed_gstreamer_workaround(xmlnode)
return
try:
name = self._transformer.strip_identifier(type_name)
except TransformerException as e:
message.fatal(e)
# This one doesn't go in the main namespace; we associate it with
# the struct or union
(get_type, c_symbol_prefix) = self._split_type_and_symbol_prefix(xmlnode)
node = ast.Boxed(name, gtype_name=type_name,
get_type=get_type,
c_symbol_prefix=c_symbol_prefix)
self._boxed_types[node.gtype_name] = node
def _introspect_implemented_interfaces(self, node, xmlnode):
gt_interfaces = []
for interface in xmlnode.findall('implements'):
gitype = ast.Type.create_from_gtype_name(interface.attrib['name'])
gt_interfaces.append(gitype)
node.interfaces = gt_interfaces
def _introspect_properties(self, node, xmlnode):
for pspec in xmlnode.findall('property'):
ctype = pspec.attrib['type']
flags = int(pspec.attrib['flags'])
readable = (flags & G_PARAM_READABLE) != 0
writable = (flags & G_PARAM_WRITABLE) != 0
construct = (flags & G_PARAM_CONSTRUCT) != 0
construct_only = (flags & G_PARAM_CONSTRUCT_ONLY) != 0
node.properties.append(ast.Property(
pspec.attrib['name'],
ast.Type.create_from_gtype_name(ctype),
readable, writable, construct, construct_only))
node.properties = node.properties
def _introspect_signals(self, node, xmlnode):
for signal_info in xmlnode.findall('signal'):
rctype = signal_info.attrib['return']
rtype = ast.Type.create_from_gtype_name(rctype)
return_ = ast.Return(rtype)
parameters = []
when = signal_info.attrib.get('when')
no_recurse = signal_info.attrib.get('no-recurse', '0') == '1'
detailed = signal_info.attrib.get('detailed', '0') == '1'
action = signal_info.attrib.get('action', '0') == '1'
no_hooks = signal_info.attrib.get('no-hooks', '0') == '1'
for i, parameter in enumerate(signal_info.findall('param')):
if i == 0:
argname = 'object'
else:
argname = 'p%s' % (i - 1, )
pctype = parameter.attrib['type']
ptype = ast.Type.create_from_gtype_name(pctype)
param = ast.Parameter(argname, ptype)
param.transfer = ast.PARAM_TRANSFER_NONE
parameters.append(param)
signal = ast.Signal(signal_info.attrib['name'], return_, parameters,
when=when, no_recurse=no_recurse, detailed=detailed,
action=action, no_hooks=no_hooks)
node.signals.append(signal)
node.signals = node.signals
def _parse_parents(self, xmlnode, node):
parents_str = xmlnode.attrib.get('parents', '')
if parents_str != '':
parent_types = map(lambda s: ast.Type.create_from_gtype_name(s),
parents_str.split(','))
else:
parent_types = []
node.parent_chain = parent_types
def _introspect_fundamental(self, xmlnode):
type_name = xmlnode.attrib['name']
is_abstract = bool(xmlnode.attrib.get('abstract', False))
(get_type, c_symbol_prefix) = self._split_type_and_symbol_prefix(xmlnode)
try:
fundamental_name = self._transformer.strip_identifier(type_name)
except TransformerException as e:
message.warn(e)
return
node = ast.Class(fundamental_name, None,
gtype_name=type_name,
get_type=get_type,
c_symbol_prefix=c_symbol_prefix,
is_abstract=is_abstract)
self._parse_parents(xmlnode, node)
node.fundamental = True
self._introspect_implemented_interfaces(node, xmlnode)
self._add_record_fields(node)
self._namespace.append(node, replace=True)
def _add_record_fields(self, node):
# add record fields
record = self._namespace.get(node.name)
if not isinstance(record, ast.Record):
return
node.ctype = record.ctype
node.fields = record.fields
for field in node.fields:
if isinstance(field, ast.Field):
# Object instance fields are assumed to be read-only
# (see also _find_class_record and transformer.py)
field.writable = False
def _introspect_error_quark(self, xmlnode):
symbol = xmlnode.attrib['function']
error_domain = xmlnode.attrib['domain']
function = self._namespace.get_by_symbol(symbol)
if function is None:
return
node = ast.ErrorQuarkFunction(function.name, function.retval,
function.parameters, function.throws,
function.symbol, error_domain)
self._namespace.append(node, replace=True)
def _pair_boxed_type(self, boxed):
try:
name = self._transformer.strip_identifier(boxed.gtype_name)
except TransformerException as e:
message.fatal(e)
pair_node = self._namespace.get(name)
if not pair_node:
# Keep the "bare" boxed instance
self._namespace.append(boxed)
elif isinstance(pair_node, (ast.Record, ast.Union)):
pair_node.add_gtype(boxed.gtype_name, boxed.get_type)
assert boxed.c_symbol_prefix is not None
pair_node.c_symbol_prefix = boxed.c_symbol_prefix
# Quick hack - reset the disguised flag; we're setting it
# incorrectly in the scanner
pair_node.disguised = False
else:
return False
def _find_class_record(self, cls):
pair_record = None
if isinstance(cls, ast.Class):
pair_record = self._namespace.get(cls.name + 'Class')
else:
for suffix in ('Iface', 'Interface'):
pair_record = self._namespace.get(cls.name + suffix)
if pair_record:
break
if not (pair_record and isinstance(pair_record, ast.Record)):
return
cls.glib_type_struct = pair_record.create_type()
cls.inherit_file_positions(pair_record)
pair_record.is_gtype_struct_for = cls.create_type()
|
gpl-2.0
|
jackTheRipper/iotrussia
|
web_server/lib/werkzeug-master/docs/_themes/werkzeug_theme_support.py
|
45
|
4812
|
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class WerkzeugStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#1B5C66", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
|
gpl-2.0
|
wxgeo/geophar
|
wxgeometrie/sympy/integrals/meijerint.py
|
3
|
76850
|
"""
Integrate functions by rewriting them as Meijer G-functions.
There are three user-visible functions that can be used by other parts of the
sympy library to solve various integration problems:
- meijerint_indefinite
- meijerint_definite
- meijerint_inversion
They can be used to compute, respectively, indefinite integrals, definite
integrals over intervals of the real line, and inverse laplace-type integrals
(from c-I*oo to c+I*oo). See the respective docstrings for details.
The main references for this are:
[L] Luke, Y. L. (1969), The Special Functions and Their Approximations,
Volume 1
[R] Kelly B. Roach. Meijer G Function Representations.
In: Proceedings of the 1997 International Symposium on Symbolic and
Algebraic Computation, pages 205-211, New York, 1997. ACM.
[P] A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
Integrals and Series: More Special Functions, Vol. 3,.
Gordon and Breach Science Publisher
"""
from __future__ import print_function, division
from sympy.core import oo, S, pi, Expr
from sympy.core.exprtools import factor_terms
from sympy.core.function import expand, expand_mul, expand_power_base
from sympy.core.add import Add
from sympy.core.mul import Mul
from sympy.core.compatibility import range
from sympy.core.cache import cacheit
from sympy.core.symbol import Dummy, Wild
from sympy.simplify import hyperexpand, powdenest, collect
from sympy.simplify.fu import sincos_to_sum
from sympy.logic.boolalg import And, Or, BooleanAtom
from sympy.functions.special.delta_functions import Heaviside
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.piecewise import Piecewise, piecewise_fold
from sympy.functions.elementary.hyperbolic import \
_rewrite_hyperbolics_as_exp, HyperbolicFunction
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.functions.special.hyper import meijerg
from sympy.utilities.iterables import multiset_partitions, ordered
from sympy.utilities.misc import debug as _debug
from sympy.utilities import default_sort_key
# keep this at top for easy reference
z = Dummy('z')
def _has(res, *f):
# return True if res has f; in the case of Piecewise
# only return True if *all* pieces have f
res = piecewise_fold(res)
if getattr(res, 'is_Piecewise', False):
return all(_has(i, *f) for i in res.args)
return res.has(*f)
def _create_lookup_table(table):
""" Add formulae for the function -> meijerg lookup table. """
def wild(n):
return Wild(n, exclude=[z])
p, q, a, b, c = list(map(wild, 'pqabc'))
n = Wild('n', properties=[lambda x: x.is_Integer and x > 0])
t = p*z**q
def add(formula, an, ap, bm, bq, arg=t, fac=S(1), cond=True, hint=True):
table.setdefault(_mytype(formula, z), []).append((formula,
[(fac, meijerg(an, ap, bm, bq, arg))], cond, hint))
def addi(formula, inst, cond, hint=True):
table.setdefault(
_mytype(formula, z), []).append((formula, inst, cond, hint))
def constant(a):
return [(a, meijerg([1], [], [], [0], z)),
(a, meijerg([], [1], [0], [], z))]
table[()] = [(a, constant(a), True, True)]
# [P], Section 8.
from sympy import unpolarify, Function, Not
class IsNonPositiveInteger(Function):
@classmethod
def eval(cls, arg):
arg = unpolarify(arg)
if arg.is_Integer is True:
return arg <= 0
# Section 8.4.2
from sympy import (gamma, pi, cos, exp, re, sin, sinc, sqrt, sinh, cosh,
factorial, log, erf, erfc, erfi, polar_lift)
# TODO this needs more polar_lift (c/f entry for exp)
add(Heaviside(t - b)*(t - b)**(a - 1), [a], [], [], [0], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add(Heaviside(b - t)*(b - t)**(a - 1), [], [a], [0], [], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add(Heaviside(z - (b/p)**(1/q))*(t - b)**(a - 1), [a], [], [], [0], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add(Heaviside((b/p)**(1/q) - z)*(b - t)**(a - 1), [], [a], [0], [], t/b,
gamma(a)*b**(a - 1), And(b > 0))
add((b + t)**(-a), [1 - a], [], [0], [], t/b, b**(-a)/gamma(a),
hint=Not(IsNonPositiveInteger(a)))
add(abs(b - t)**(-a), [1 - a], [(1 - a)/2], [0], [(1 - a)/2], t/b,
2*sin(pi*a/2)*gamma(1 - a)*abs(b)**(-a), re(a) < 1)
add((t**a - b**a)/(t - b), [0, a], [], [0, a], [], t/b,
b**(a - 1)*sin(a*pi)/pi)
# 12
def A1(r, sign, nu):
return pi**(-S(1)/2)*(-sign*nu/2)**(1 - 2*r)
def tmpadd(r, sgn):
# XXX the a**2 is bad for matching
add((sqrt(a**2 + t) + sgn*a)**b/(a**2 + t)**r,
[(1 + b)/2, 1 - 2*r + b/2], [],
[(b - sgn*b)/2], [(b + sgn*b)/2], t/a**2,
a**(b - 2*r)*A1(r, sgn, b))
tmpadd(0, 1)
tmpadd(0, -1)
tmpadd(S(1)/2, 1)
tmpadd(S(1)/2, -1)
# 13
def tmpadd(r, sgn):
add((sqrt(a + p*z**q) + sgn*sqrt(p)*z**(q/2))**b/(a + p*z**q)**r,
[1 - r + sgn*b/2], [1 - r - sgn*b/2], [0, S(1)/2], [],
p*z**q/a, a**(b/2 - r)*A1(r, sgn, b))
tmpadd(0, 1)
tmpadd(0, -1)
tmpadd(S(1)/2, 1)
tmpadd(S(1)/2, -1)
# (those after look obscure)
# Section 8.4.3
add(exp(polar_lift(-1)*t), [], [], [0], [])
# TODO can do sin^n, sinh^n by expansion ... where?
# 8.4.4 (hyperbolic functions)
add(sinh(t), [], [1], [S(1)/2], [1, 0], t**2/4, pi**(S(3)/2))
add(cosh(t), [], [S(1)/2], [0], [S(1)/2, S(1)/2], t**2/4, pi**(S(3)/2))
# Section 8.4.5
# TODO can do t + a. but can also do by expansion... (XXX not really)
add(sin(t), [], [], [S(1)/2], [0], t**2/4, sqrt(pi))
add(cos(t), [], [], [0], [S(1)/2], t**2/4, sqrt(pi))
# Section 8.4.6 (sinc function)
add(sinc(t), [], [], [0], [S(-1)/2], t**2/4, sqrt(pi)/2)
# Section 8.5.5
def make_log1(subs):
N = subs[n]
return [((-1)**N*factorial(N),
meijerg([], [1]*(N + 1), [0]*(N + 1), [], t))]
def make_log2(subs):
N = subs[n]
return [(factorial(N),
meijerg([1]*(N + 1), [], [], [0]*(N + 1), t))]
# TODO these only hold for positive p, and can be made more general
# but who uses log(x)*Heaviside(a-x) anyway ...
# TODO also it would be nice to derive them recursively ...
addi(log(t)**n*Heaviside(1 - t), make_log1, True)
addi(log(t)**n*Heaviside(t - 1), make_log2, True)
def make_log3(subs):
return make_log1(subs) + make_log2(subs)
addi(log(t)**n, make_log3, True)
addi(log(t + a),
constant(log(a)) + [(S(1), meijerg([1, 1], [], [1], [0], t/a))],
True)
addi(log(abs(t - a)), constant(log(abs(a))) +
[(pi, meijerg([1, 1], [S(1)/2], [1], [0, S(1)/2], t/a))],
True)
# TODO log(x)/(x+a) and log(x)/(x-1) can also be done. should they
# be derivable?
# TODO further formulae in this section seem obscure
# Sections 8.4.9-10
# TODO
# Section 8.4.11
from sympy import Ei, I, expint, Si, Ci, Shi, Chi, fresnels, fresnelc
addi(Ei(t),
constant(-I*pi) + [(S(-1), meijerg([], [1], [0, 0], [],
t*polar_lift(-1)))],
True)
# Section 8.4.12
add(Si(t), [1], [], [S(1)/2], [0, 0], t**2/4, sqrt(pi)/2)
add(Ci(t), [], [1], [0, 0], [S(1)/2], t**2/4, -sqrt(pi)/2)
# Section 8.4.13
add(Shi(t), [S(1)/2], [], [0], [S(-1)/2, S(-1)/2], polar_lift(-1)*t**2/4,
t*sqrt(pi)/4)
add(Chi(t), [], [S(1)/2, 1], [0, 0], [S(1)/2, S(1)/2], t**2/4, -
pi**S('3/2')/2)
# generalized exponential integral
add(expint(a, t), [], [a], [a - 1, 0], [], t)
# Section 8.4.14
add(erf(t), [1], [], [S(1)/2], [0], t**2, 1/sqrt(pi))
# TODO exp(-x)*erf(I*x) does not work
add(erfc(t), [], [1], [0, S(1)/2], [], t**2, 1/sqrt(pi))
# This formula for erfi(z) yields a wrong(?) minus sign
#add(erfi(t), [1], [], [S(1)/2], [0], -t**2, I/sqrt(pi))
add(erfi(t), [S(1)/2], [], [0], [-S(1)/2], -t**2, t/sqrt(pi))
# Fresnel Integrals
add(fresnels(t), [1], [], [S(3)/4], [0, S(1)/4], pi**2*t**4/16, S(1)/2)
add(fresnelc(t), [1], [], [S(1)/4], [0, S(3)/4], pi**2*t**4/16, S(1)/2)
##### bessel-type functions #####
from sympy import besselj, bessely, besseli, besselk
# Section 8.4.19
add(besselj(a, t), [], [], [a/2], [-a/2], t**2/4)
# all of the following are derivable
#add(sin(t)*besselj(a, t), [S(1)/4, S(3)/4], [], [(1+a)/2],
# [-a/2, a/2, (1-a)/2], t**2, 1/sqrt(2))
#add(cos(t)*besselj(a, t), [S(1)/4, S(3)/4], [], [a/2],
# [-a/2, (1+a)/2, (1-a)/2], t**2, 1/sqrt(2))
#add(besselj(a, t)**2, [S(1)/2], [], [a], [-a, 0], t**2, 1/sqrt(pi))
#add(besselj(a, t)*besselj(b, t), [0, S(1)/2], [], [(a + b)/2],
# [-(a+b)/2, (a - b)/2, (b - a)/2], t**2, 1/sqrt(pi))
# Section 8.4.20
add(bessely(a, t), [], [-(a + 1)/2], [a/2, -a/2], [-(a + 1)/2], t**2/4)
# TODO all of the following should be derivable
#add(sin(t)*bessely(a, t), [S(1)/4, S(3)/4], [(1 - a - 1)/2],
# [(1 + a)/2, (1 - a)/2], [(1 - a - 1)/2, (1 - 1 - a)/2, (1 - 1 + a)/2],
# t**2, 1/sqrt(2))
#add(cos(t)*bessely(a, t), [S(1)/4, S(3)/4], [(0 - a - 1)/2],
# [(0 + a)/2, (0 - a)/2], [(0 - a - 1)/2, (1 - 0 - a)/2, (1 - 0 + a)/2],
# t**2, 1/sqrt(2))
#add(besselj(a, t)*bessely(b, t), [0, S(1)/2], [(a - b - 1)/2],
# [(a + b)/2, (a - b)/2], [(a - b - 1)/2, -(a + b)/2, (b - a)/2],
# t**2, 1/sqrt(pi))
#addi(bessely(a, t)**2,
# [(2/sqrt(pi), meijerg([], [S(1)/2, S(1)/2 - a], [0, a, -a],
# [S(1)/2 - a], t**2)),
# (1/sqrt(pi), meijerg([S(1)/2], [], [a], [-a, 0], t**2))],
# True)
#addi(bessely(a, t)*bessely(b, t),
# [(2/sqrt(pi), meijerg([], [0, S(1)/2, (1 - a - b)/2],
# [(a + b)/2, (a - b)/2, (b - a)/2, -(a + b)/2],
# [(1 - a - b)/2], t**2)),
# (1/sqrt(pi), meijerg([0, S(1)/2], [], [(a + b)/2],
# [-(a + b)/2, (a - b)/2, (b - a)/2], t**2))],
# True)
# Section 8.4.21 ?
# Section 8.4.22
add(besseli(a, t), [], [(1 + a)/2], [a/2], [-a/2, (1 + a)/2], t**2/4, pi)
# TODO many more formulas. should all be derivable
# Section 8.4.23
add(besselk(a, t), [], [], [a/2, -a/2], [], t**2/4, S(1)/2)
# TODO many more formulas. should all be derivable
# Complete elliptic integrals K(z) and E(z)
from sympy import elliptic_k, elliptic_e
add(elliptic_k(t), [S.Half, S.Half], [], [0], [0], -t, S.Half)
add(elliptic_e(t), [S.Half, 3*S.Half], [], [0], [0], -t, -S.Half/2)
####################################################################
# First some helper functions.
####################################################################
from sympy.utilities.timeutils import timethis
timeit = timethis('meijerg')
def _mytype(f, x):
""" Create a hashable entity describing the type of f. """
if x not in f.free_symbols:
return ()
elif f.is_Function:
return (type(f),)
else:
types = [_mytype(a, x) for a in f.args]
res = []
for t in types:
res += list(t)
res.sort()
return tuple(res)
class _CoeffExpValueError(ValueError):
"""
Exception raised by _get_coeff_exp, for internal use only.
"""
pass
def _get_coeff_exp(expr, x):
"""
When expr is known to be of the form c*x**b, with c and/or b possibly 1,
return c, b.
>>> from sympy.abc import x, a, b
>>> from sympy.integrals.meijerint import _get_coeff_exp
>>> _get_coeff_exp(a*x**b, x)
(a, b)
>>> _get_coeff_exp(x, x)
(1, 1)
>>> _get_coeff_exp(2*x, x)
(2, 1)
>>> _get_coeff_exp(x**3, x)
(1, 3)
"""
from sympy import powsimp
(c, m) = expand_power_base(powsimp(expr)).as_coeff_mul(x)
if not m:
return c, S(0)
[m] = m
if m.is_Pow:
if m.base != x:
raise _CoeffExpValueError('expr not of form a*x**b')
return c, m.exp
elif m == x:
return c, S(1)
else:
raise _CoeffExpValueError('expr not of form a*x**b: %s' % expr)
def _exponents(expr, x):
"""
Find the exponents of ``x`` (not including zero) in ``expr``.
>>> from sympy.integrals.meijerint import _exponents
>>> from sympy.abc import x, y
>>> from sympy import sin
>>> _exponents(x, x)
{1}
>>> _exponents(x**2, x)
{2}
>>> _exponents(x**2 + x, x)
{1, 2}
>>> _exponents(x**3*sin(x + x**y) + 1/x, x)
{-1, 1, 3, y}
"""
def _exponents_(expr, x, res):
if expr == x:
res.update([1])
return
if expr.is_Pow and expr.base == x:
res.update([expr.exp])
return
for arg in expr.args:
_exponents_(arg, x, res)
res = set()
_exponents_(expr, x, res)
return res
def _functions(expr, x):
""" Find the types of functions in expr, to estimate the complexity. """
from sympy import Function
return set(e.func for e in expr.atoms(Function) if x in e.free_symbols)
def _find_splitting_points(expr, x):
"""
Find numbers a such that a linear substitution x -> x + a would
(hopefully) simplify expr.
>>> from sympy.integrals.meijerint import _find_splitting_points as fsp
>>> from sympy import sin
>>> from sympy.abc import a, x
>>> fsp(x, x)
{0}
>>> fsp((x-1)**3, x)
{1}
>>> fsp(sin(x+3)*x, x)
{-3, 0}
"""
p, q = [Wild(n, exclude=[x]) for n in 'pq']
def compute_innermost(expr, res):
if not isinstance(expr, Expr):
return
m = expr.match(p*x + q)
if m and m[p] != 0:
res.add(-m[q]/m[p])
return
if expr.is_Atom:
return
for arg in expr.args:
compute_innermost(arg, res)
innermost = set()
compute_innermost(expr, innermost)
return innermost
def _split_mul(f, x):
"""
Split expression ``f`` into fac, po, g, where fac is a constant factor,
po = x**s for some s independent of s, and g is "the rest".
>>> from sympy.integrals.meijerint import _split_mul
>>> from sympy import sin
>>> from sympy.abc import s, x
>>> _split_mul((3*x)**s*sin(x**2)*x, x)
(3**s, x*x**s, sin(x**2))
"""
from sympy import polarify, unpolarify
fac = S(1)
po = S(1)
g = S(1)
f = expand_power_base(f)
args = Mul.make_args(f)
for a in args:
if a == x:
po *= x
elif x not in a.free_symbols:
fac *= a
else:
if a.is_Pow and x not in a.exp.free_symbols:
c, t = a.base.as_coeff_mul(x)
if t != (x,):
c, t = expand_mul(a.base).as_coeff_mul(x)
if t == (x,):
po *= x**a.exp
fac *= unpolarify(polarify(c**a.exp, subs=False))
continue
g *= a
return fac, po, g
def _mul_args(f):
"""
Return a list ``L`` such that Mul(*L) == f.
If f is not a Mul or Pow, L=[f].
If f=g**n for an integer n, L=[g]*n.
If f is a Mul, L comes from applying _mul_args to all factors of f.
"""
args = Mul.make_args(f)
gs = []
for g in args:
if g.is_Pow and g.exp.is_Integer:
n = g.exp
base = g.base
if n < 0:
n = -n
base = 1/base
gs += [base]*n
else:
gs.append(g)
return gs
def _mul_as_two_parts(f):
"""
Find all the ways to split f into a product of two terms.
Return None on failure.
Although the order is canonical from multiset_partitions, this is
not necessarily the best order to process the terms. For example,
if the case of len(gs) == 2 is removed and multiset is allowed to
sort the terms, some tests fail.
>>> from sympy.integrals.meijerint import _mul_as_two_parts
>>> from sympy import sin, exp, ordered
>>> from sympy.abc import x
>>> list(ordered(_mul_as_two_parts(x*sin(x)*exp(x))))
[(x, exp(x)*sin(x)), (x*exp(x), sin(x)), (x*sin(x), exp(x))]
"""
gs = _mul_args(f)
if len(gs) < 2:
return None
if len(gs) == 2:
return [tuple(gs)]
return [(Mul(*x), Mul(*y)) for (x, y) in multiset_partitions(gs, 2)]
def _inflate_g(g, n):
""" Return C, h such that h is a G function of argument z**n and
g = C*h. """
# TODO should this be a method of meijerg?
# See: [L, page 150, equation (5)]
def inflate(params, n):
""" (a1, .., ak) -> (a1/n, (a1+1)/n, ..., (ak + n-1)/n) """
res = []
for a in params:
for i in range(n):
res.append((a + i)/n)
return res
v = S(len(g.ap) - len(g.bq))
C = n**(1 + g.nu + v/2)
C /= (2*pi)**((n - 1)*g.delta)
return C, meijerg(inflate(g.an, n), inflate(g.aother, n),
inflate(g.bm, n), inflate(g.bother, n),
g.argument**n * n**(n*v))
def _flip_g(g):
""" Turn the G function into one of inverse argument
(i.e. G(1/x) -> G'(x)) """
# See [L], section 5.2
def tr(l):
return [1 - a for a in l]
return meijerg(tr(g.bm), tr(g.bother), tr(g.an), tr(g.aother), 1/g.argument)
def _inflate_fox_h(g, a):
r"""
Let d denote the integrand in the definition of the G function ``g``.
Consider the function H which is defined in the same way, but with
integrand d/Gamma(a*s) (contour conventions as usual).
If a is rational, the function H can be written as C*G, for a constant C
and a G-function G.
This function returns C, G.
"""
if a < 0:
return _inflate_fox_h(_flip_g(g), -a)
p = S(a.p)
q = S(a.q)
# We use the substitution s->qs, i.e. inflate g by q. We are left with an
# extra factor of Gamma(p*s), for which we use Gauss' multiplication
# theorem.
D, g = _inflate_g(g, q)
z = g.argument
D /= (2*pi)**((1 - p)/2)*p**(-S(1)/2)
z /= p**p
bs = [(n + 1)/p for n in range(p)]
return D, meijerg(g.an, g.aother, g.bm, list(g.bother) + bs, z)
_dummies = {}
def _dummy(name, token, expr, **kwargs):
"""
Return a dummy. This will return the same dummy if the same token+name is
requested more than once, and it is not already in expr.
This is for being cache-friendly.
"""
d = _dummy_(name, token, **kwargs)
if d in expr.free_symbols:
return Dummy(name, **kwargs)
return d
def _dummy_(name, token, **kwargs):
"""
Return a dummy associated to name and token. Same effect as declaring
it globally.
"""
global _dummies
if not (name, token) in _dummies:
_dummies[(name, token)] = Dummy(name, **kwargs)
return _dummies[(name, token)]
def _is_analytic(f, x):
""" Check if f(x), when expressed using G functions on the positive reals,
will in fact agree with the G functions almost everywhere """
from sympy import Heaviside, Abs
return not any(x in expr.free_symbols for expr in f.atoms(Heaviside, Abs))
def _condsimp(cond):
"""
Do naive simplifications on ``cond``.
Note that this routine is completely ad-hoc, simplification rules being
added as need arises rather than following any logical pattern.
>>> from sympy.integrals.meijerint import _condsimp as simp
>>> from sympy import Or, Eq, unbranched_argument as arg, And
>>> from sympy.abc import x, y, z
>>> simp(Or(x < y, z, Eq(x, y)))
z | (x <= y)
>>> simp(Or(x <= y, And(x < y, z)))
x <= y
"""
from sympy import (
symbols, Wild, Eq, unbranched_argument, exp_polar, pi, I,
arg, periodic_argument, oo, polar_lift)
from sympy.logic.boolalg import BooleanFunction
if not isinstance(cond, BooleanFunction):
return cond
cond = cond.func(*list(map(_condsimp, cond.args)))
change = True
p, q, r = symbols('p q r', cls=Wild)
rules = [
(Or(p < q, Eq(p, q)), p <= q),
# The next two obviously are instances of a general pattern, but it is
# easier to spell out the few cases we care about.
(And(abs(arg(p)) <= pi, abs(arg(p) - 2*pi) <= pi),
Eq(arg(p) - pi, 0)),
(And(abs(2*arg(p) + pi) <= pi, abs(2*arg(p) - pi) <= pi),
Eq(arg(p), 0)),
(And(abs(unbranched_argument(p)) <= pi,
abs(unbranched_argument(exp_polar(-2*pi*I)*p)) <= pi),
Eq(unbranched_argument(exp_polar(-I*pi)*p), 0)),
(And(abs(unbranched_argument(p)) <= pi/2,
abs(unbranched_argument(exp_polar(-pi*I)*p)) <= pi/2),
Eq(unbranched_argument(exp_polar(-I*pi/2)*p), 0)),
(Or(p <= q, And(p < q, r)), p <= q)
]
while change:
change = False
for fro, to in rules:
if fro.func != cond.func:
continue
for n, arg1 in enumerate(cond.args):
if r in fro.args[0].free_symbols:
m = arg1.match(fro.args[1])
num = 1
else:
num = 0
m = arg1.match(fro.args[0])
if not m:
continue
otherargs = [x.subs(m) for x in fro.args[:num] + fro.args[num + 1:]]
otherlist = [n]
for arg2 in otherargs:
for k, arg3 in enumerate(cond.args):
if k in otherlist:
continue
if arg2 == arg3:
otherlist += [k]
break
if isinstance(arg3, And) and arg2.args[1] == r and \
isinstance(arg2, And) and arg2.args[0] in arg3.args:
otherlist += [k]
break
if isinstance(arg3, And) and arg2.args[0] == r and \
isinstance(arg2, And) and arg2.args[1] in arg3.args:
otherlist += [k]
break
if len(otherlist) != len(otherargs) + 1:
continue
newargs = [arg_ for (k, arg_) in enumerate(cond.args)
if k not in otherlist] + [to.subs(m)]
cond = cond.func(*newargs)
change = True
break
# final tweak
def repl_eq(orig):
if orig.lhs == 0:
expr = orig.rhs
elif orig.rhs == 0:
expr = orig.lhs
else:
return orig
m = expr.match(arg(p)**q)
if not m:
m = expr.match(unbranched_argument(polar_lift(p)**q))
if not m:
if isinstance(expr, periodic_argument) and not expr.args[0].is_polar \
and expr.args[1] == oo:
return (expr.args[0] > 0)
return orig
return (m[p] > 0)
return cond.replace(
lambda expr: expr.is_Relational and expr.rel_op == '==',
repl_eq)
def _eval_cond(cond):
""" Re-evaluate the conditions. """
if isinstance(cond, bool):
return cond
return _condsimp(cond.doit())
####################################################################
# Now the "backbone" functions to do actual integration.
####################################################################
def _my_principal_branch(expr, period, full_pb=False):
""" Bring expr nearer to its principal branch by removing superfluous
factors.
This function does *not* guarantee to yield the principal branch,
to avoid introducing opaque principal_branch() objects,
unless full_pb=True. """
from sympy import principal_branch
res = principal_branch(expr, period)
if not full_pb:
res = res.replace(principal_branch, lambda x, y: x)
return res
def _rewrite_saxena_1(fac, po, g, x):
"""
Rewrite the integral fac*po*g dx, from zero to infinity, as
integral fac*G, where G has argument a*x. Note po=x**s.
Return fac, G.
"""
_, s = _get_coeff_exp(po, x)
a, b = _get_coeff_exp(g.argument, x)
period = g.get_period()
a = _my_principal_branch(a, period)
# We substitute t = x**b.
C = fac/(abs(b)*a**((s + 1)/b - 1))
# Absorb a factor of (at)**((1 + s)/b - 1).
def tr(l):
return [a + (1 + s)/b - 1 for a in l]
return C, meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother),
a*x)
def _check_antecedents_1(g, x, helper=False):
r"""
Return a condition under which the mellin transform of g exists.
Any power of x has already been absorbed into the G function,
so this is just int_0^\infty g dx.
See [L, section 5.6.1]. (Note that s=1.)
If ``helper`` is True, only check if the MT exists at infinity, i.e. if
int_1^\infty g dx exists.
"""
# NOTE if you update these conditions, please update the documentation as well
from sympy import Eq, Not, ceiling, Ne, re, unbranched_argument as arg
delta = g.delta
eta, _ = _get_coeff_exp(g.argument, x)
m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)])
xi = m + n - p
if p > q:
def tr(l):
return [1 - x for x in l]
return _check_antecedents_1(meijerg(tr(g.bm), tr(g.bother),
tr(g.an), tr(g.aother), x/eta),
x)
tmp = []
for b in g.bm:
tmp += [-re(b) < 1]
for a in g.an:
tmp += [1 < 1 - re(a)]
cond_3 = And(*tmp)
for b in g.bother:
tmp += [-re(b) < 1]
for a in g.aother:
tmp += [1 < 1 - re(a)]
cond_3_star = And(*tmp)
cond_4 = (-re(g.nu) + (q + 1 - p)/2 > q - p)
def debug(*msg):
_debug(*msg)
debug('Checking antecedents for 1 function:')
debug(' delta=%s, eta=%s, m=%s, n=%s, p=%s, q=%s'
% (delta, eta, m, n, p, q))
debug(' ap = %s, %s' % (list(g.an), list(g.aother)))
debug(' bq = %s, %s' % (list(g.bm), list(g.bother)))
debug(' cond_3=%s, cond_3*=%s, cond_4=%s' % (cond_3, cond_3_star, cond_4))
conds = []
# case 1
case1 = []
tmp1 = [1 <= n, p < q, 1 <= m]
tmp2 = [1 <= p, 1 <= m, Eq(q, p + 1), Not(And(Eq(n, 0), Eq(m, p + 1)))]
tmp3 = [1 <= p, Eq(q, p)]
for k in range(ceiling(delta/2) + 1):
tmp3 += [Ne(abs(arg(eta)), (delta - 2*k)*pi)]
tmp = [delta > 0, abs(arg(eta)) < delta*pi]
extra = [Ne(eta, 0), cond_3]
if helper:
extra = []
for t in [tmp1, tmp2, tmp3]:
case1 += [And(*(t + tmp + extra))]
conds += case1
debug(' case 1:', case1)
# case 2
extra = [cond_3]
if helper:
extra = []
case2 = [And(Eq(n, 0), p + 1 <= m, m <= q,
abs(arg(eta)) < delta*pi, *extra)]
conds += case2
debug(' case 2:', case2)
# case 3
extra = [cond_3, cond_4]
if helper:
extra = []
case3 = [And(p < q, 1 <= m, delta > 0, Eq(abs(arg(eta)), delta*pi),
*extra)]
case3 += [And(p <= q - 2, Eq(delta, 0), Eq(abs(arg(eta)), 0), *extra)]
conds += case3
debug(' case 3:', case3)
# TODO altered cases 4-7
# extra case from wofram functions site:
# (reproduced verbatim from Prudnikov, section 2.24.2)
# http://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/01/
case_extra = []
case_extra += [Eq(p, q), Eq(delta, 0), Eq(arg(eta), 0), Ne(eta, 0)]
if not helper:
case_extra += [cond_3]
s = []
for a, b in zip(g.ap, g.bq):
s += [b - a]
case_extra += [re(Add(*s)) < 0]
case_extra = And(*case_extra)
conds += [case_extra]
debug(' extra case:', [case_extra])
case_extra_2 = [And(delta > 0, abs(arg(eta)) < delta*pi)]
if not helper:
case_extra_2 += [cond_3]
case_extra_2 = And(*case_extra_2)
conds += [case_extra_2]
debug(' second extra case:', [case_extra_2])
# TODO This leaves only one case from the three listed by Prudnikov.
# Investigate if these indeed cover everything; if so, remove the rest.
return Or(*conds)
def _int0oo_1(g, x):
r"""
Evaluate int_0^\infty g dx using G functions,
assuming the necessary conditions are fulfilled.
>>> from sympy.abc import a, b, c, d, x, y
>>> from sympy import meijerg
>>> from sympy.integrals.meijerint import _int0oo_1
>>> _int0oo_1(meijerg([a], [b], [c], [d], x*y), x)
gamma(-a)*gamma(c + 1)/(y*gamma(-d)*gamma(b + 1))
"""
# See [L, section 5.6.1]. Note that s=1.
from sympy import gamma, gammasimp, unpolarify
eta, _ = _get_coeff_exp(g.argument, x)
res = 1/eta
# XXX TODO we should reduce order first
for b in g.bm:
res *= gamma(b + 1)
for a in g.an:
res *= gamma(1 - a - 1)
for b in g.bother:
res /= gamma(1 - b - 1)
for a in g.aother:
res /= gamma(a + 1)
return gammasimp(unpolarify(res))
def _rewrite_saxena(fac, po, g1, g2, x, full_pb=False):
"""
Rewrite the integral fac*po*g1*g2 from 0 to oo in terms of G functions
with argument c*x.
Return C, f1, f2 such that integral C f1 f2 from 0 to infinity equals
integral fac po g1 g2 from 0 to infinity.
>>> from sympy.integrals.meijerint import _rewrite_saxena
>>> from sympy.abc import s, t, m
>>> from sympy import meijerg
>>> g1 = meijerg([], [], [0], [], s*t)
>>> g2 = meijerg([], [], [m/2], [-m/2], t**2/4)
>>> r = _rewrite_saxena(1, t**0, g1, g2, t)
>>> r[0]
s/(4*sqrt(pi))
>>> r[1]
meijerg(((), ()), ((-1/2, 0), ()), s**2*t/4)
>>> r[2]
meijerg(((), ()), ((m/2,), (-m/2,)), t/4)
"""
from sympy.core.numbers import ilcm
def pb(g):
a, b = _get_coeff_exp(g.argument, x)
per = g.get_period()
return meijerg(g.an, g.aother, g.bm, g.bother,
_my_principal_branch(a, per, full_pb)*x**b)
_, s = _get_coeff_exp(po, x)
_, b1 = _get_coeff_exp(g1.argument, x)
_, b2 = _get_coeff_exp(g2.argument, x)
if (b1 < 0) == True:
b1 = -b1
g1 = _flip_g(g1)
if (b2 < 0) == True:
b2 = -b2
g2 = _flip_g(g2)
if not b1.is_Rational or not b2.is_Rational:
return
m1, n1 = b1.p, b1.q
m2, n2 = b2.p, b2.q
tau = ilcm(m1*n2, m2*n1)
r1 = tau//(m1*n2)
r2 = tau//(m2*n1)
C1, g1 = _inflate_g(g1, r1)
C2, g2 = _inflate_g(g2, r2)
g1 = pb(g1)
g2 = pb(g2)
fac *= C1*C2
a1, b = _get_coeff_exp(g1.argument, x)
a2, _ = _get_coeff_exp(g2.argument, x)
# arbitrarily tack on the x**s part to g1
# TODO should we try both?
exp = (s + 1)/b - 1
fac = fac/(abs(b) * a1**exp)
def tr(l):
return [a + exp for a in l]
g1 = meijerg(tr(g1.an), tr(g1.aother), tr(g1.bm), tr(g1.bother), a1*x)
g2 = meijerg(g2.an, g2.aother, g2.bm, g2.bother, a2*x)
return powdenest(fac, polar=True), g1, g2
def _check_antecedents(g1, g2, x):
""" Return a condition under which the integral theorem applies. """
from sympy import re, Eq, Ne, cos, I, exp, sin, sign, unpolarify
from sympy import arg as arg_, unbranched_argument as arg
# Yes, this is madness.
# XXX TODO this is a testing *nightmare*
# NOTE if you update these conditions, please update the documentation as well
# The following conditions are found in
# [P], Section 2.24.1
#
# They are also reproduced (verbatim!) at
# http://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/
#
# Note: k=l=r=alpha=1
sigma, _ = _get_coeff_exp(g1.argument, x)
omega, _ = _get_coeff_exp(g2.argument, x)
s, t, u, v = S([len(g1.bm), len(g1.an), len(g1.ap), len(g1.bq)])
m, n, p, q = S([len(g2.bm), len(g2.an), len(g2.ap), len(g2.bq)])
bstar = s + t - (u + v)/2
cstar = m + n - (p + q)/2
rho = g1.nu + (u - v)/2 + 1
mu = g2.nu + (p - q)/2 + 1
phi = q - p - (v - u)
eta = 1 - (v - u) - mu - rho
psi = (pi*(q - m - n) + abs(arg(omega)))/(q - p)
theta = (pi*(v - s - t) + abs(arg(sigma)))/(v - u)
_debug('Checking antecedents:')
_debug(' sigma=%s, s=%s, t=%s, u=%s, v=%s, b*=%s, rho=%s'
% (sigma, s, t, u, v, bstar, rho))
_debug(' omega=%s, m=%s, n=%s, p=%s, q=%s, c*=%s, mu=%s,'
% (omega, m, n, p, q, cstar, mu))
_debug(' phi=%s, eta=%s, psi=%s, theta=%s' % (phi, eta, psi, theta))
def _c1():
for g in [g1, g2]:
for i in g.an:
for j in g.bm:
diff = i - j
if diff.is_integer and diff.is_positive:
return False
return True
c1 = _c1()
c2 = And(*[re(1 + i + j) > 0 for i in g1.bm for j in g2.bm])
c3 = And(*[re(1 + i + j) < 1 + 1 for i in g1.an for j in g2.an])
c4 = And(*[(p - q)*re(1 + i - 1) - re(mu) > -S(3)/2 for i in g1.an])
c5 = And(*[(p - q)*re(1 + i) - re(mu) > -S(3)/2 for i in g1.bm])
c6 = And(*[(u - v)*re(1 + i - 1) - re(rho) > -S(3)/2 for i in g2.an])
c7 = And(*[(u - v)*re(1 + i) - re(rho) > -S(3)/2 for i in g2.bm])
c8 = (abs(phi) + 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu -
1)*(v - u)) > 0)
c9 = (abs(phi) - 2*re((rho - 1)*(q - p) + (v - u)*(q - p) + (mu -
1)*(v - u)) > 0)
c10 = (abs(arg(sigma)) < bstar*pi)
c11 = Eq(abs(arg(sigma)), bstar*pi)
c12 = (abs(arg(omega)) < cstar*pi)
c13 = Eq(abs(arg(omega)), cstar*pi)
# The following condition is *not* implemented as stated on the wolfram
# function site. In the book of Prudnikov there is an additional part
# (the And involving re()). However, I only have this book in russian, and
# I don't read any russian. The following condition is what other people
# have told me it means.
# Worryingly, it is different from the condition implemented in REDUCE.
# The REDUCE implementation:
# https://reduce-algebra.svn.sourceforge.net/svnroot/reduce-algebra/trunk/packages/defint/definta.red
# (search for tst14)
# The Wolfram alpha version:
# http://functions.wolfram.com/HypergeometricFunctions/MeijerG/21/02/03/03/0014/
z0 = exp(-(bstar + cstar)*pi*I)
zos = unpolarify(z0*omega/sigma)
zso = unpolarify(z0*sigma/omega)
if zos == 1/zso:
c14 = And(Eq(phi, 0), bstar + cstar <= 1,
Or(Ne(zos, 1), re(mu + rho + v - u) < 1,
re(mu + rho + q - p) < 1))
else:
def _cond(z):
'''Returns True if abs(arg(1-z)) < pi, avoiding arg(0).
Note: if `z` is 1 then arg is NaN. This raises a
TypeError on `NaN < pi`. Previously this gave `False` so
this behavior has been hardcoded here but someone should
check if this NaN is more serious! This NaN is triggered by
test_meijerint() in test_meijerint.py:
`meijerint_definite(exp(x), x, 0, I)`
'''
return z != 1 and abs(arg_(1 - z)) < pi
c14 = And(Eq(phi, 0), bstar - 1 + cstar <= 0,
Or(And(Ne(zos, 1), _cond(zos)),
And(re(mu + rho + v - u) < 1, Eq(zos, 1))))
c14_alt = And(Eq(phi, 0), cstar - 1 + bstar <= 0,
Or(And(Ne(zso, 1), _cond(zso)),
And(re(mu + rho + q - p) < 1, Eq(zso, 1))))
# Since r=k=l=1, in our case there is c14_alt which is the same as calling
# us with (g1, g2) = (g2, g1). The conditions below enumerate all cases
# (i.e. we don't have to try arguments reversed by hand), and indeed try
# all symmetric cases. (i.e. whenever there is a condition involving c14,
# there is also a dual condition which is exactly what we would get when g1,
# g2 were interchanged, *but c14 was unaltered*).
# Hence the following seems correct:
c14 = Or(c14, c14_alt)
'''
When `c15` is NaN (e.g. from `psi` being NaN as happens during
'test_issue_4992' and/or `theta` is NaN as in 'test_issue_6253',
both in `test_integrals.py`) the comparison to 0 formerly gave False
whereas now an error is raised. To keep the old behavior, the value
of NaN is replaced with False but perhaps a closer look at this condition
should be made: XXX how should conditions leading to c15=NaN be handled?
'''
try:
lambda_c = (q - p)*abs(omega)**(1/(q - p))*cos(psi) \
+ (v - u)*abs(sigma)**(1/(v - u))*cos(theta)
# the TypeError might be raised here, e.g. if lambda_c is NaN
if _eval_cond(lambda_c > 0) != False:
c15 = (lambda_c > 0)
else:
def lambda_s0(c1, c2):
return c1*(q - p)*abs(omega)**(1/(q - p))*sin(psi) \
+ c2*(v - u)*abs(sigma)**(1/(v - u))*sin(theta)
lambda_s = Piecewise(
((lambda_s0(+1, +1)*lambda_s0(-1, -1)),
And(Eq(arg(sigma), 0), Eq(arg(omega), 0))),
(lambda_s0(sign(arg(omega)), +1)*lambda_s0(sign(arg(omega)), -1),
And(Eq(arg(sigma), 0), Ne(arg(omega), 0))),
(lambda_s0(+1, sign(arg(sigma)))*lambda_s0(-1, sign(arg(sigma))),
And(Ne(arg(sigma), 0), Eq(arg(omega), 0))),
(lambda_s0(sign(arg(omega)), sign(arg(sigma))), True))
tmp = [lambda_c > 0,
And(Eq(lambda_c, 0), Ne(lambda_s, 0), re(eta) > -1),
And(Eq(lambda_c, 0), Eq(lambda_s, 0), re(eta) > 0)]
c15 = Or(*tmp)
except TypeError:
c15 = False
for cond, i in [(c1, 1), (c2, 2), (c3, 3), (c4, 4), (c5, 5), (c6, 6),
(c7, 7), (c8, 8), (c9, 9), (c10, 10), (c11, 11),
(c12, 12), (c13, 13), (c14, 14), (c15, 15)]:
_debug(' c%s:' % i, cond)
# We will return Or(*conds)
conds = []
def pr(count):
_debug(' case %s:' % count, conds[-1])
conds += [And(m*n*s*t != 0, bstar.is_positive is True, cstar.is_positive is True, c1, c2, c3, c10,
c12)] # 1
pr(1)
conds += [And(Eq(u, v), Eq(bstar, 0), cstar.is_positive is True, sigma.is_positive is True, re(rho) < 1,
c1, c2, c3, c12)] # 2
pr(2)
conds += [And(Eq(p, q), Eq(cstar, 0), bstar.is_positive is True, omega.is_positive is True, re(mu) < 1,
c1, c2, c3, c10)] # 3
pr(3)
conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0),
sigma.is_positive is True, omega.is_positive is True, re(mu) < 1, re(rho) < 1,
Ne(sigma, omega), c1, c2, c3)] # 4
pr(4)
conds += [And(Eq(p, q), Eq(u, v), Eq(bstar, 0), Eq(cstar, 0),
sigma.is_positive is True, omega.is_positive is True, re(mu + rho) < 1,
Ne(omega, sigma), c1, c2, c3)] # 5
pr(5)
conds += [And(p > q, s.is_positive is True, bstar.is_positive is True, cstar >= 0,
c1, c2, c3, c5, c10, c13)] # 6
pr(6)
conds += [And(p < q, t.is_positive is True, bstar.is_positive is True, cstar >= 0,
c1, c2, c3, c4, c10, c13)] # 7
pr(7)
conds += [And(u > v, m.is_positive is True, cstar.is_positive is True, bstar >= 0,
c1, c2, c3, c7, c11, c12)] # 8
pr(8)
conds += [And(u < v, n.is_positive is True, cstar.is_positive is True, bstar >= 0,
c1, c2, c3, c6, c11, c12)] # 9
pr(9)
conds += [And(p > q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True,
re(rho) < 1, c1, c2, c3, c5, c13)] # 10
pr(10)
conds += [And(p < q, Eq(u, v), Eq(bstar, 0), cstar >= 0, sigma.is_positive is True,
re(rho) < 1, c1, c2, c3, c4, c13)] # 11
pr(11)
conds += [And(Eq(p, q), u > v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True,
re(mu) < 1, c1, c2, c3, c7, c11)] # 12
pr(12)
conds += [And(Eq(p, q), u < v, bstar >= 0, Eq(cstar, 0), omega.is_positive is True,
re(mu) < 1, c1, c2, c3, c6, c11)] # 13
pr(13)
conds += [And(p < q, u > v, bstar >= 0, cstar >= 0,
c1, c2, c3, c4, c7, c11, c13)] # 14
pr(14)
conds += [And(p > q, u < v, bstar >= 0, cstar >= 0,
c1, c2, c3, c5, c6, c11, c13)] # 15
pr(15)
conds += [And(p > q, u > v, bstar >= 0, cstar >= 0,
c1, c2, c3, c5, c7, c8, c11, c13, c14)] # 16
pr(16)
conds += [And(p < q, u < v, bstar >= 0, cstar >= 0,
c1, c2, c3, c4, c6, c9, c11, c13, c14)] # 17
pr(17)
conds += [And(Eq(t, 0), s.is_positive is True, bstar.is_positive is True, phi.is_positive is True, c1, c2, c10)] # 18
pr(18)
conds += [And(Eq(s, 0), t.is_positive is True, bstar.is_positive is True, phi.is_negative is True, c1, c3, c10)] # 19
pr(19)
conds += [And(Eq(n, 0), m.is_positive is True, cstar.is_positive is True, phi.is_negative is True, c1, c2, c12)] # 20
pr(20)
conds += [And(Eq(m, 0), n.is_positive is True, cstar.is_positive is True, phi.is_positive is True, c1, c3, c12)] # 21
pr(21)
conds += [And(Eq(s*t, 0), bstar.is_positive is True, cstar.is_positive is True,
c1, c2, c3, c10, c12)] # 22
pr(22)
conds += [And(Eq(m*n, 0), bstar.is_positive is True, cstar.is_positive is True,
c1, c2, c3, c10, c12)] # 23
pr(23)
# The following case is from [Luke1969]. As far as I can tell, it is *not*
# covered by Prudnikov's.
# Let G1 and G2 be the two G-functions. Suppose the integral exists from
# 0 to a > 0 (this is easy the easy part), that G1 is exponential decay at
# infinity, and that the mellin transform of G2 exists.
# Then the integral exists.
mt1_exists = _check_antecedents_1(g1, x, helper=True)
mt2_exists = _check_antecedents_1(g2, x, helper=True)
conds += [And(mt2_exists, Eq(t, 0), u < s, bstar.is_positive is True, c10, c1, c2, c3)]
pr('E1')
conds += [And(mt2_exists, Eq(s, 0), v < t, bstar.is_positive is True, c10, c1, c2, c3)]
pr('E2')
conds += [And(mt1_exists, Eq(n, 0), p < m, cstar.is_positive is True, c12, c1, c2, c3)]
pr('E3')
conds += [And(mt1_exists, Eq(m, 0), q < n, cstar.is_positive is True, c12, c1, c2, c3)]
pr('E4')
# Let's short-circuit if this worked ...
# the rest is corner-cases and terrible to read.
r = Or(*conds)
if _eval_cond(r) != False:
return r
conds += [And(m + n > p, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True, cstar.is_negative is True,
abs(arg(omega)) < (m + n - p + 1)*pi,
c1, c2, c10, c14, c15)] # 24
pr(24)
conds += [And(m + n > q, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar.is_negative is True,
abs(arg(omega)) < (m + n - q + 1)*pi,
c1, c3, c10, c14, c15)] # 25
pr(25)
conds += [And(Eq(p, q - 1), Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < abs(arg(omega)),
c1, c2, c10, c14, c15)] # 26
pr(26)
conds += [And(Eq(p, q + 1), Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < abs(arg(omega)),
c1, c3, c10, c14, c15)] # 27
pr(27)
conds += [And(p < q - 1, Eq(t, 0), Eq(phi, 0), s.is_positive is True, bstar.is_positive is True,
cstar >= 0, cstar*pi < abs(arg(omega)),
abs(arg(omega)) < (m + n - p + 1)*pi,
c1, c2, c10, c14, c15)] # 28
pr(28)
conds += [And(
p > q + 1, Eq(s, 0), Eq(phi, 0), t.is_positive is True, bstar.is_positive is True, cstar >= 0,
cstar*pi < abs(arg(omega)),
abs(arg(omega)) < (m + n - q + 1)*pi,
c1, c3, c10, c14, c15)] # 29
pr(29)
conds += [And(Eq(n, 0), Eq(phi, 0), s + t > 0, m.is_positive is True, cstar.is_positive is True, bstar.is_negative is True,
abs(arg(sigma)) < (s + t - u + 1)*pi,
c1, c2, c12, c14, c15)] # 30
pr(30)
conds += [And(Eq(m, 0), Eq(phi, 0), s + t > v, n.is_positive is True, cstar.is_positive is True, bstar.is_negative is True,
abs(arg(sigma)) < (s + t - v + 1)*pi,
c1, c3, c12, c14, c15)] # 31
pr(31)
conds += [And(Eq(n, 0), Eq(phi, 0), Eq(u, v - 1), m.is_positive is True, cstar.is_positive is True,
bstar >= 0, bstar*pi < abs(arg(sigma)),
abs(arg(sigma)) < (bstar + 1)*pi,
c1, c2, c12, c14, c15)] # 32
pr(32)
conds += [And(Eq(m, 0), Eq(phi, 0), Eq(u, v + 1), n.is_positive is True, cstar.is_positive is True,
bstar >= 0, bstar*pi < abs(arg(sigma)),
abs(arg(sigma)) < (bstar + 1)*pi,
c1, c3, c12, c14, c15)] # 33
pr(33)
conds += [And(
Eq(n, 0), Eq(phi, 0), u < v - 1, m.is_positive is True, cstar.is_positive is True, bstar >= 0,
bstar*pi < abs(arg(sigma)),
abs(arg(sigma)) < (s + t - u + 1)*pi,
c1, c2, c12, c14, c15)] # 34
pr(34)
conds += [And(
Eq(m, 0), Eq(phi, 0), u > v + 1, n.is_positive is True, cstar.is_positive is True, bstar >= 0,
bstar*pi < abs(arg(sigma)),
abs(arg(sigma)) < (s + t - v + 1)*pi,
c1, c3, c12, c14, c15)] # 35
pr(35)
return Or(*conds)
# NOTE An alternative, but as far as I can tell weaker, set of conditions
# can be found in [L, section 5.6.2].
def _int0oo(g1, g2, x):
"""
Express integral from zero to infinity g1*g2 using a G function,
assuming the necessary conditions are fulfilled.
>>> from sympy.integrals.meijerint import _int0oo
>>> from sympy.abc import s, t, m
>>> from sympy import meijerg, S
>>> g1 = meijerg([], [], [-S(1)/2, 0], [], s**2*t/4)
>>> g2 = meijerg([], [], [m/2], [-m/2], t/4)
>>> _int0oo(g1, g2, t)
4*meijerg(((1/2, 0), ()), ((m/2,), (-m/2,)), s**(-2))/s**2
"""
# See: [L, section 5.6.2, equation (1)]
eta, _ = _get_coeff_exp(g1.argument, x)
omega, _ = _get_coeff_exp(g2.argument, x)
def neg(l):
return [-x for x in l]
a1 = neg(g1.bm) + list(g2.an)
a2 = list(g2.aother) + neg(g1.bother)
b1 = neg(g1.an) + list(g2.bm)
b2 = list(g2.bother) + neg(g1.aother)
return meijerg(a1, a2, b1, b2, omega/eta)/eta
def _rewrite_inversion(fac, po, g, x):
""" Absorb ``po`` == x**s into g. """
_, s = _get_coeff_exp(po, x)
a, b = _get_coeff_exp(g.argument, x)
def tr(l):
return [t + s/b for t in l]
return (powdenest(fac/a**(s/b), polar=True),
meijerg(tr(g.an), tr(g.aother), tr(g.bm), tr(g.bother), g.argument))
def _check_antecedents_inversion(g, x):
""" Check antecedents for the laplace inversion integral. """
from sympy import re, im, Or, And, Eq, exp, I, Add, nan, Ne
_debug('Checking antecedents for inversion:')
z = g.argument
_, e = _get_coeff_exp(z, x)
if e < 0:
_debug(' Flipping G.')
# We want to assume that argument gets large as |x| -> oo
return _check_antecedents_inversion(_flip_g(g), x)
def statement_half(a, b, c, z, plus):
coeff, exponent = _get_coeff_exp(z, x)
a *= exponent
b *= coeff**c
c *= exponent
conds = []
wp = b*exp(I*re(c)*pi/2)
wm = b*exp(-I*re(c)*pi/2)
if plus:
w = wp
else:
w = wm
conds += [And(Or(Eq(b, 0), re(c) <= 0), re(a) <= -1)]
conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) < 0)]
conds += [And(Ne(b, 0), Eq(im(c), 0), re(c) > 0, re(w) <= 0,
re(a) <= -1)]
return Or(*conds)
def statement(a, b, c, z):
""" Provide a convergence statement for z**a * exp(b*z**c),
c/f sphinx docs. """
return And(statement_half(a, b, c, z, True),
statement_half(a, b, c, z, False))
# Notations from [L], section 5.7-10
m, n, p, q = S([len(g.bm), len(g.an), len(g.ap), len(g.bq)])
tau = m + n - p
nu = q - m - n
rho = (tau - nu)/2
sigma = q - p
if sigma == 1:
epsilon = S(1)/2
elif sigma > 1:
epsilon = 1
else:
epsilon = nan
theta = ((1 - sigma)/2 + Add(*g.bq) - Add(*g.ap))/sigma
delta = g.delta
_debug(' m=%s, n=%s, p=%s, q=%s, tau=%s, nu=%s, rho=%s, sigma=%s' % (
m, n, p, q, tau, nu, rho, sigma))
_debug(' epsilon=%s, theta=%s, delta=%s' % (epsilon, theta, delta))
# First check if the computation is valid.
if not (g.delta >= e/2 or (p >= 1 and p >= q)):
_debug(' Computation not valid for these parameters.')
return False
# Now check if the inversion integral exists.
# Test "condition A"
for a in g.an:
for b in g.bm:
if (a - b).is_integer and a > b:
_debug(' Not a valid G function.')
return False
# There are two cases. If p >= q, we can directly use a slater expansion
# like [L], 5.2 (11). Note in particular that the asymptotics of such an
# expansion even hold when some of the parameters differ by integers, i.e.
# the formula itself would not be valid! (b/c G functions are cts. in their
# parameters)
# When p < q, we need to use the theorems of [L], 5.10.
if p >= q:
_debug(' Using asymptotic Slater expansion.')
return And(*[statement(a - 1, 0, 0, z) for a in g.an])
def E(z):
return And(*[statement(a - 1, 0, 0, z) for a in g.an])
def H(z):
return statement(theta, -sigma, 1/sigma, z)
def Hp(z):
return statement_half(theta, -sigma, 1/sigma, z, True)
def Hm(z):
return statement_half(theta, -sigma, 1/sigma, z, False)
# [L], section 5.10
conds = []
# Theorem 1
conds += [And(1 <= n, p < q, 1 <= m, rho*pi - delta >= pi/2, delta > 0,
E(z*exp(I*pi*(nu + 1))))]
# Theorem 2, statements (2) and (3)
conds += [And(p + 1 <= m, m + 1 <= q, delta > 0, delta < pi/2, n == 0,
(m - p + 1)*pi - delta >= pi/2,
Hp(z*exp(I*pi*(q - m))), Hm(z*exp(-I*pi*(q - m))))]
# Theorem 2, statement (5)
conds += [And(p < q, m == q, n == 0, delta > 0,
(sigma + epsilon)*pi - delta >= pi/2, H(z))]
# Theorem 3, statements (6) and (7)
conds += [And(Or(And(p <= q - 2, 1 <= tau, tau <= sigma/2),
And(p + 1 <= m + n, m + n <= (p + q)/2)),
delta > 0, delta < pi/2, (tau + 1)*pi - delta >= pi/2,
Hp(z*exp(I*pi*nu)), Hm(z*exp(-I*pi*nu)))]
# Theorem 4, statements (10) and (11)
conds += [And(p < q, 1 <= m, rho > 0, delta > 0, delta + rho*pi < pi/2,
(tau + epsilon)*pi - delta >= pi/2,
Hp(z*exp(I*pi*nu)), Hm(z*exp(-I*pi*nu)))]
# Trivial case
conds += [m == 0]
# TODO
# Theorem 5 is quite general
# Theorem 6 contains special cases for q=p+1
return Or(*conds)
def _int_inversion(g, x, t):
"""
Compute the laplace inversion integral, assuming the formula applies.
"""
b, a = _get_coeff_exp(g.argument, x)
C, g = _inflate_fox_h(meijerg(g.an, g.aother, g.bm, g.bother, b/t**a), -a)
return C/t*g
####################################################################
# Finally, the real meat.
####################################################################
_lookup_table = None
@cacheit
@timeit
def _rewrite_single(f, x, recursive=True):
"""
Try to rewrite f as a sum of single G functions of the form
C*x**s*G(a*x**b), where b is a rational number and C is independent of x.
We guarantee that result.argument.as_coeff_mul(x) returns (a, (x**b,))
or (a, ()).
Returns a list of tuples (C, s, G) and a condition cond.
Returns None on failure.
"""
from sympy import polarify, unpolarify, oo, zoo, Tuple
global _lookup_table
if not _lookup_table:
_lookup_table = {}
_create_lookup_table(_lookup_table)
if isinstance(f, meijerg):
from sympy import factor
coeff, m = factor(f.argument, x).as_coeff_mul(x)
if len(m) > 1:
return None
m = m[0]
if m.is_Pow:
if m.base != x or not m.exp.is_Rational:
return None
elif m != x:
return None
return [(1, 0, meijerg(f.an, f.aother, f.bm, f.bother, coeff*m))], True
f_ = f
f = f.subs(x, z)
t = _mytype(f, z)
if t in _lookup_table:
l = _lookup_table[t]
for formula, terms, cond, hint in l:
subs = f.match(formula, old=True)
if subs:
subs_ = {}
for fro, to in subs.items():
subs_[fro] = unpolarify(polarify(to, lift=True),
exponents_only=True)
subs = subs_
if not isinstance(hint, bool):
hint = hint.subs(subs)
if hint == False:
continue
if not isinstance(cond, (bool, BooleanAtom)):
cond = unpolarify(cond.subs(subs))
if _eval_cond(cond) == False:
continue
if not isinstance(terms, list):
terms = terms(subs)
res = []
for fac, g in terms:
r1 = _get_coeff_exp(unpolarify(fac.subs(subs).subs(z, x),
exponents_only=True), x)
try:
g = g.subs(subs).subs(z, x)
except ValueError:
continue
# NOTE these substitutions can in principle introduce oo,
# zoo and other absurdities. It shouldn't matter,
# but better be safe.
if Tuple(*(r1 + (g,))).has(oo, zoo, -oo):
continue
g = meijerg(g.an, g.aother, g.bm, g.bother,
unpolarify(g.argument, exponents_only=True))
res.append(r1 + (g,))
if res:
return res, cond
# try recursive mellin transform
if not recursive:
return None
_debug('Trying recursive Mellin transform method.')
from sympy.integrals.transforms import (mellin_transform,
inverse_mellin_transform, IntegralTransformError,
MellinTransformStripError)
from sympy import oo, nan, zoo, simplify, cancel
def my_imt(F, s, x, strip):
""" Calling simplify() all the time is slow and not helpful, since
most of the time it only factors things in a way that has to be
un-done anyway. But sometimes it can remove apparent poles. """
# XXX should this be in inverse_mellin_transform?
try:
return inverse_mellin_transform(F, s, x, strip,
as_meijerg=True, needeval=True)
except MellinTransformStripError:
return inverse_mellin_transform(
simplify(cancel(expand(F))), s, x, strip,
as_meijerg=True, needeval=True)
f = f_
s = _dummy('s', 'rewrite-single', f)
# to avoid infinite recursion, we have to force the two g functions case
def my_integrator(f, x):
from sympy import Integral, hyperexpand
r = _meijerint_definite_4(f, x, only_double=True)
if r is not None:
res, cond = r
res = _my_unpolarify(hyperexpand(res, rewrite='nonrepsmall'))
return Piecewise((res, cond),
(Integral(f, (x, 0, oo)), True))
return Integral(f, (x, 0, oo))
try:
F, strip, _ = mellin_transform(f, x, s, integrator=my_integrator,
simplify=False, needeval=True)
g = my_imt(F, s, x, strip)
except IntegralTransformError:
g = None
if g is None:
# We try to find an expression by analytic continuation.
# (also if the dummy is already in the expression, there is no point in
# putting in another one)
a = _dummy_('a', 'rewrite-single')
if a not in f.free_symbols and _is_analytic(f, x):
try:
F, strip, _ = mellin_transform(f.subs(x, a*x), x, s,
integrator=my_integrator,
needeval=True, simplify=False)
g = my_imt(F, s, x, strip).subs(a, 1)
except IntegralTransformError:
g = None
if g is None or g.has(oo, nan, zoo):
_debug('Recursive Mellin transform failed.')
return None
args = Add.make_args(g)
res = []
for f in args:
c, m = f.as_coeff_mul(x)
if len(m) > 1:
raise NotImplementedError('Unexpected form...')
g = m[0]
a, b = _get_coeff_exp(g.argument, x)
res += [(c, 0, meijerg(g.an, g.aother, g.bm, g.bother,
unpolarify(polarify(
a, lift=True), exponents_only=True)
*x**b))]
_debug('Recursive Mellin transform worked:', g)
return res, True
def _rewrite1(f, x, recursive=True):
"""
Try to rewrite f using a (sum of) single G functions with argument a*x**b.
Return fac, po, g such that f = fac*po*g, fac is independent of x
and po = x**s.
Here g is a result from _rewrite_single.
Return None on failure.
"""
fac, po, g = _split_mul(f, x)
g = _rewrite_single(g, x, recursive)
if g:
return fac, po, g[0], g[1]
def _rewrite2(f, x):
"""
Try to rewrite f as a product of two G functions of arguments a*x**b.
Return fac, po, g1, g2 such that f = fac*po*g1*g2, where fac is
independent of x and po is x**s.
Here g1 and g2 are results of _rewrite_single.
Returns None on failure.
"""
fac, po, g = _split_mul(f, x)
if any(_rewrite_single(expr, x, False) is None for expr in _mul_args(g)):
return None
l = _mul_as_two_parts(g)
if not l:
return None
l = list(ordered(l, [
lambda p: max(len(_exponents(p[0], x)), len(_exponents(p[1], x))),
lambda p: max(len(_functions(p[0], x)), len(_functions(p[1], x))),
lambda p: max(len(_find_splitting_points(p[0], x)),
len(_find_splitting_points(p[1], x)))]))
for recursive in [False, True]:
for fac1, fac2 in l:
g1 = _rewrite_single(fac1, x, recursive)
g2 = _rewrite_single(fac2, x, recursive)
if g1 and g2:
cond = And(g1[1], g2[1])
if cond != False:
return fac, po, g1[0], g2[0], cond
def meijerint_indefinite(f, x):
"""
Compute an indefinite integral of ``f`` by rewriting it as a G function.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_indefinite
>>> from sympy import sin
>>> from sympy.abc import x
>>> meijerint_indefinite(sin(x), x)
-cos(x)
"""
from sympy import hyper, meijerg
results = []
for a in sorted(_find_splitting_points(f, x) | {S(0)}, key=default_sort_key):
res = _meijerint_indefinite_1(f.subs(x, x + a), x)
if not res:
continue
res = res.subs(x, x - a)
if _has(res, hyper, meijerg):
results.append(res)
else:
return res
if f.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_indefinite(
_rewrite_hyperbolics_as_exp(f), x)
if rv:
if not type(rv) is list:
return collect(factor_terms(rv), rv.atoms(exp))
results.extend(rv)
if results:
return next(ordered(results))
def _meijerint_indefinite_1(f, x):
""" Helper that does not attempt any substitution. """
from sympy import Integral, piecewise_fold, nan, zoo
_debug('Trying to compute the indefinite integral of', f, 'wrt', x)
gs = _rewrite1(f, x)
if gs is None:
# Note: the code that calls us will do expand() and try again
return None
fac, po, gl, cond = gs
_debug(' could rewrite:', gs)
res = S(0)
for C, s, g in gl:
a, b = _get_coeff_exp(g.argument, x)
_, c = _get_coeff_exp(po, x)
c += s
# we do a substitution t=a*x**b, get integrand fac*t**rho*g
fac_ = fac * C / (b*a**((1 + c)/b))
rho = (c + 1)/b - 1
# we now use t**rho*G(params, t) = G(params + rho, t)
# [L, page 150, equation (4)]
# and integral G(params, t) dt = G(1, params+1, 0, t)
# (or a similar expression with 1 and 0 exchanged ... pick the one
# which yields a well-defined function)
# [R, section 5]
# (Note that this dummy will immediately go away again, so we
# can safely pass S(1) for ``expr``.)
t = _dummy('t', 'meijerint-indefinite', S(1))
def tr(p):
return [a + rho + 1 for a in p]
if any(b.is_integer and (b <= 0) == True for b in tr(g.bm)):
r = -meijerg(
tr(g.an), tr(g.aother) + [1], tr(g.bm) + [0], tr(g.bother), t)
else:
r = meijerg(
tr(g.an) + [1], tr(g.aother), tr(g.bm), tr(g.bother) + [0], t)
# The antiderivative is most often expected to be defined
# in the neighborhood of x = 0.
place = 0
if b < 0 or f.subs(x, 0).has(nan, zoo):
place = None
r = hyperexpand(r.subs(t, a*x**b), place=place)
# now substitute back
# Note: we really do want the powers of x to combine.
res += powdenest(fac_*r, polar=True)
def _clean(res):
"""This multiplies out superfluous powers of x we created, and chops off
constants:
>> _clean(x*(exp(x)/x - 1/x) + 3)
exp(x)
cancel is used before mul_expand since it is possible for an
expression to have an additive constant that doesn't become isolated
with simple expansion. Such a situation was identified in issue 6369:
>>> from sympy import sqrt, cancel
>>> from sympy.abc import x
>>> a = sqrt(2*x + 1)
>>> bad = (3*x*a**5 + 2*x - a**5 + 1)/a**2
>>> bad.expand().as_independent(x)[0]
0
>>> cancel(bad).expand().as_independent(x)[0]
1
"""
from sympy import cancel
res = expand_mul(cancel(res), deep=False)
return Add._from_args(res.as_coeff_add(x)[1])
res = piecewise_fold(res)
if res.is_Piecewise:
newargs = []
for expr, cond in res.args:
expr = _my_unpolarify(_clean(expr))
newargs += [(expr, cond)]
res = Piecewise(*newargs)
else:
res = _my_unpolarify(_clean(res))
return Piecewise((res, _my_unpolarify(cond)), (Integral(f, x), True))
@timeit
def meijerint_definite(f, x, a, b):
"""
Integrate ``f`` over the interval [``a``, ``b``], by rewriting it as a product
of two G functions, or as a single G function.
Return res, cond, where cond are convergence conditions.
Examples
========
>>> from sympy.integrals.meijerint import meijerint_definite
>>> from sympy import exp, oo
>>> from sympy.abc import x
>>> meijerint_definite(exp(-x**2), x, -oo, oo)
(sqrt(pi), True)
This function is implemented as a succession of functions
meijerint_definite, _meijerint_definite_2, _meijerint_definite_3,
_meijerint_definite_4. Each function in the list calls the next one
(presumably) several times. This means that calling meijerint_definite
can be very costly.
"""
# This consists of three steps:
# 1) Change the integration limits to 0, oo
# 2) Rewrite in terms of G functions
# 3) Evaluate the integral
#
# There are usually several ways of doing this, and we want to try all.
# This function does (1), calls _meijerint_definite_2 for step (2).
from sympy import arg, exp, I, And, DiracDelta, SingularityFunction
_debug('Integrating', f, 'wrt %s from %s to %s.' % (x, a, b))
if f.has(DiracDelta):
_debug('Integrand has DiracDelta terms - giving up.')
return None
if f.has(SingularityFunction):
_debug('Integrand has Singularity Function terms - giving up.')
return None
f_, x_, a_, b_ = f, x, a, b
# Let's use a dummy in case any of the boundaries has x.
d = Dummy('x')
f = f.subs(x, d)
x = d
if a == b:
return (S.Zero, True)
results = []
if a == -oo and b != oo:
return meijerint_definite(f.subs(x, -x), x, -b, -a)
elif a == -oo:
# Integrating -oo to oo. We need to find a place to split the integral.
_debug(' Integrating -oo to +oo.')
innermost = _find_splitting_points(f, x)
_debug(' Sensible splitting points:', innermost)
for c in sorted(innermost, key=default_sort_key, reverse=True) + [S(0)]:
_debug(' Trying to split at', c)
if not c.is_real:
_debug(' Non-real splitting point.')
continue
res1 = _meijerint_definite_2(f.subs(x, x + c), x)
if res1 is None:
_debug(' But could not compute first integral.')
continue
res2 = _meijerint_definite_2(f.subs(x, c - x), x)
if res2 is None:
_debug(' But could not compute second integral.')
continue
res1, cond1 = res1
res2, cond2 = res2
cond = _condsimp(And(cond1, cond2))
if cond == False:
_debug(' But combined condition is always false.')
continue
res = res1 + res2
return res, cond
elif a == oo:
res = meijerint_definite(f, x, b, oo)
return -res[0], res[1]
elif (a, b) == (0, oo):
# This is a common case - try it directly first.
res = _meijerint_definite_2(f, x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
else:
if b == oo:
for split in _find_splitting_points(f, x):
if (a - split >= 0) == True:
_debug('Trying x -> x + %s' % split)
res = _meijerint_definite_2(f.subs(x, x + split)
*Heaviside(x + split - a), x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
f = f.subs(x, x + a)
b = b - a
a = 0
if b != oo:
phi = exp(I*arg(b))
b = abs(b)
f = f.subs(x, phi*x)
f *= Heaviside(b - x)*phi
b = oo
_debug('Changed limits to', a, b)
_debug('Changed function to', f)
res = _meijerint_definite_2(f, x)
if res:
if _has(res[0], meijerg):
results.append(res)
else:
return res
if f_.has(HyperbolicFunction):
_debug('Try rewriting hyperbolics in terms of exp.')
rv = meijerint_definite(
_rewrite_hyperbolics_as_exp(f_), x_, a_, b_)
if rv:
if not type(rv) is list:
rv = (collect(factor_terms(rv[0]), rv[0].atoms(exp)),) + rv[1:]
return rv
results.extend(rv)
if results:
return next(ordered(results))
def _guess_expansion(f, x):
""" Try to guess sensible rewritings for integrand f(x). """
from sympy import expand_trig
from sympy.functions.elementary.trigonometric import TrigonometricFunction
res = [(f, 'original integrand')]
orig = res[-1][0]
saw = {orig}
expanded = expand_mul(orig)
if expanded not in saw:
res += [(expanded, 'expand_mul')]
saw.add(expanded)
expanded = expand(orig)
if expanded not in saw:
res += [(expanded, 'expand')]
saw.add(expanded)
if orig.has(TrigonometricFunction, HyperbolicFunction):
expanded = expand_mul(expand_trig(orig))
if expanded not in saw:
res += [(expanded, 'expand_trig, expand_mul')]
saw.add(expanded)
if orig.has(cos, sin):
reduced = sincos_to_sum(orig)
if reduced not in saw:
res += [(reduced, 'trig power reduction')]
saw.add(reduced)
return res
def _meijerint_definite_2(f, x):
"""
Try to integrate f dx from zero to infinty.
The body of this function computes various 'simplifications'
f1, f2, ... of f (e.g. by calling expand_mul(), trigexpand()
- see _guess_expansion) and calls _meijerint_definite_3 with each of
these in succession.
If _meijerint_definite_3 succeeds with any of the simplified functions,
returns this result.
"""
# This function does preparation for (2), calls
# _meijerint_definite_3 for (2) and (3) combined.
# use a positive dummy - we integrate from 0 to oo
# XXX if a nonnegative symbol is used there will be test failures
dummy = _dummy('x', 'meijerint-definite2', f, positive=True)
f = f.subs(x, dummy)
x = dummy
if f == 0:
return S(0), True
for g, explanation in _guess_expansion(f, x):
_debug('Trying', explanation)
res = _meijerint_definite_3(g, x)
if res:
return res
def _meijerint_definite_3(f, x):
"""
Try to integrate f dx from zero to infinity.
This function calls _meijerint_definite_4 to try to compute the
integral. If this fails, it tries using linearity.
"""
res = _meijerint_definite_4(f, x)
if res and res[1] != False:
return res
if f.is_Add:
_debug('Expanding and evaluating all terms.')
ress = [_meijerint_definite_4(g, x) for g in f.args]
if all(r is not None for r in ress):
conds = []
res = S(0)
for r, c in ress:
res += r
conds += [c]
c = And(*conds)
if c != False:
return res, c
def _my_unpolarify(f):
from sympy import unpolarify
return _eval_cond(unpolarify(f))
@timeit
def _meijerint_definite_4(f, x, only_double=False):
"""
Try to integrate f dx from zero to infinity.
This function tries to apply the integration theorems found in literature,
i.e. it tries to rewrite f as either one or a product of two G-functions.
The parameter ``only_double`` is used internally in the recursive algorithm
to disable trying to rewrite f as a single G-function.
"""
# This function does (2) and (3)
_debug('Integrating', f)
# Try single G function.
if not only_double:
gs = _rewrite1(f, x, recursive=False)
if gs is not None:
fac, po, g, cond = gs
_debug('Could rewrite as single G function:', fac, po, g)
res = S(0)
for C, s, f in g:
if C == 0:
continue
C, f = _rewrite_saxena_1(fac*C, po*x**s, f, x)
res += C*_int0oo_1(f, x)
cond = And(cond, _check_antecedents_1(f, x))
if cond == False:
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False.')
else:
_debug('Result before branch substitutions is:', res)
return _my_unpolarify(hyperexpand(res)), cond
# Try two G functions.
gs = _rewrite2(f, x)
if gs is not None:
for full_pb in [False, True]:
fac, po, g1, g2, cond = gs
_debug('Could rewrite as two G functions:', fac, po, g1, g2)
res = S(0)
for C1, s1, f1 in g1:
for C2, s2, f2 in g2:
r = _rewrite_saxena(fac*C1*C2, po*x**(s1 + s2),
f1, f2, x, full_pb)
if r is None:
_debug('Non-rational exponents.')
return
C, f1_, f2_ = r
_debug('Saxena subst for yielded:', C, f1_, f2_)
cond = And(cond, _check_antecedents(f1_, f2_, x))
if cond == False:
break
res += C*_int0oo(f1_, f2_, x)
else:
continue
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False (full_pb=%s).' % full_pb)
else:
_debug('Result before branch substitutions is:', res)
if only_double:
return res, cond
return _my_unpolarify(hyperexpand(res)), cond
def meijerint_inversion(f, x, t):
r"""
Compute the inverse laplace transform
:math:\int_{c+i\infty}^{c-i\infty} f(x) e^{tx) dx,
for real c larger than the real part of all singularities of f.
Note that ``t`` is always assumed real and positive.
Return None if the integral does not exist or could not be evaluated.
Examples
========
>>> from sympy.abc import x, t
>>> from sympy.integrals.meijerint import meijerint_inversion
>>> meijerint_inversion(1/x, x, t)
Heaviside(t)
"""
from sympy import I, Integral, exp, expand, log, Add, Mul, Heaviside
f_ = f
t_ = t
t = Dummy('t', polar=True) # We don't want sqrt(t**2) = abs(t) etc
f = f.subs(t_, t)
c = Dummy('c')
_debug('Laplace-inverting', f)
if not _is_analytic(f, x):
_debug('But expression is not analytic.')
return None
# We filter out exponentials here. If we are given an Add this will not
# work, but the calling code will take care of that.
shift = 0
if f.is_Mul:
args = list(f.args)
newargs = []
exponentials = []
while args:
arg = args.pop()
if isinstance(arg, exp):
arg2 = expand(arg)
if arg2.is_Mul:
args += arg2.args
continue
try:
a, b = _get_coeff_exp(arg.args[0], x)
except _CoeffExpValueError:
b = 0
if b == 1:
exponentials.append(a)
else:
newargs.append(arg)
elif arg.is_Pow:
arg2 = expand(arg)
if arg2.is_Mul:
args += arg2.args
continue
if x not in arg.base.free_symbols:
try:
a, b = _get_coeff_exp(arg.exp, x)
except _CoeffExpValueError:
b = 0
if b == 1:
exponentials.append(a*log(arg.base))
newargs.append(arg)
else:
newargs.append(arg)
shift = Add(*exponentials)
f = Mul(*newargs)
gs = _rewrite1(f, x)
if gs is not None:
fac, po, g, cond = gs
_debug('Could rewrite as single G function:', fac, po, g)
res = S(0)
for C, s, f in g:
C, f = _rewrite_inversion(fac*C, po*x**s, f, x)
res += C*_int_inversion(f, x, t)
cond = And(cond, _check_antecedents_inversion(f, x))
if cond == False:
break
cond = _my_unpolarify(cond)
if cond == False:
_debug('But cond is always False.')
else:
_debug('Result before branch substitution:', res)
res = _my_unpolarify(hyperexpand(res))
if not res.has(Heaviside):
res *= Heaviside(t)
res = res.subs(t, t + shift)
if not isinstance(cond, bool):
cond = cond.subs(t, t + shift)
return Piecewise((res.subs(t, t_), cond),
(Integral(f_*exp(x*t), (x, c - oo*I, c + oo*I)).subs(t, t_), True))
|
gpl-2.0
|
hhru/ansible
|
test/units/TestFilters.py
|
107
|
5974
|
'''
Test bundled filters
'''
import os.path
import unittest, tempfile, shutil
from ansible import playbook, inventory, callbacks
import ansible.runner.filter_plugins.core
import ansible.runner.filter_plugins.mathstuff
INVENTORY = inventory.Inventory(['localhost'])
BOOK = '''
- hosts: localhost
vars:
var: { a: [1,2,3] }
tasks:
- template: src=%s dest=%s
'''
SRC = '''
-
{{ var|to_json }}
-
{{ var|to_nice_json }}
-
{{ var|to_yaml }}
-
{{ var|to_nice_yaml }}
'''
DEST = '''
-
{"a": [1, 2, 3]}
-
{
"a": [
1,
2,
3
]
}
-
a: [1, 2, 3]
-
a:
- 1
- 2
- 3
'''
class TestFilters(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp(dir='/tmp')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def temp(self, name, data=''):
'''write a temporary file and return the name'''
name = self.tmpdir + '/' + name
with open(name, 'w') as f:
f.write(data)
return name
def test_bool_none(self):
a = ansible.runner.filter_plugins.core.bool(None)
assert a == None
def test_bool_true(self):
a = ansible.runner.filter_plugins.core.bool(True)
assert a == True
def test_bool_yes(self):
a = ansible.runner.filter_plugins.core.bool('Yes')
assert a == True
def test_bool_no(self):
a = ansible.runner.filter_plugins.core.bool('Foo')
assert a == False
def test_quotes(self):
a = ansible.runner.filter_plugins.core.quote('ls | wc -l')
assert a == "'ls | wc -l'"
def test_fileglob(self):
pathname = os.path.join(os.path.dirname(__file__), '*')
a = ansible.runner.filter_plugins.core.fileglob(pathname)
assert __file__ in a
def test_regex(self):
a = ansible.runner.filter_plugins.core.regex('ansible', 'ansible',
match_type='findall')
assert a == True
def test_match_case_sensitive(self):
a = ansible.runner.filter_plugins.core.match('ansible', 'ansible')
assert a == True
def test_match_case_insensitive(self):
a = ansible.runner.filter_plugins.core.match('ANSIBLE', 'ansible',
True)
assert a == True
def test_match_no_match(self):
a = ansible.runner.filter_plugins.core.match(' ansible', 'ansible')
assert a == False
def test_search_case_sensitive(self):
a = ansible.runner.filter_plugins.core.search(' ansible ', 'ansible')
assert a == True
def test_search_case_insensitive(self):
a = ansible.runner.filter_plugins.core.search(' ANSIBLE ', 'ansible',
True)
assert a == True
def test_regex_replace_case_sensitive(self):
a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^a.*i(.*)$',
'a\\1')
assert a == 'able'
def test_regex_replace_case_insensitive(self):
a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^A.*I(.*)$',
'a\\1', True)
assert a == 'able'
def test_regex_replace_no_match(self):
a = ansible.runner.filter_plugins.core.regex_replace('ansible', '^b.*i(.*)$',
'a\\1')
assert a == 'ansible'
def test_to_uuid(self):
a = ansible.runner.filter_plugins.core.to_uuid('example.com')
assert a == 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'
#def test_filters(self):
# this test is pretty low level using a playbook, hence I am disabling it for now -- MPD.
#return
#src = self.temp('src.j2', SRC)
#dest = self.temp('dest.txt')
#book = self.temp('book', BOOK % (src, dest))
#playbook.PlayBook(
# playbook = book,
# inventory = INVENTORY,
# transport = 'local',
# callbacks = callbacks.PlaybookCallbacks(),
# runner_callbacks = callbacks.DefaultRunnerCallbacks(),
# stats = callbacks.AggregateStats(),
#).run()
#out = open(dest).read()
#self.assertEqual(DEST, out)
def test_version_compare(self):
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(0, 1.1, 'lt', False))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.2, '<'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '=='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, '='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.2, 'eq'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'gt'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '>'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, 'ne'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '!='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.3, 1.2, '<>'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'ge'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.2, 1.1, '>='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.1, 1.1, 'le'))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare(1.0, 1.1, '<='))
self.assertTrue(ansible.runner.filter_plugins.core.version_compare('12.04', 12, 'ge'))
def test_min(self):
a = ansible.runner.filter_plugins.mathstuff.min([3, 2, 5, 4])
assert a == 2
def test_max(self):
a = ansible.runner.filter_plugins.mathstuff.max([3, 2, 5, 4])
assert a == 5
|
gpl-3.0
|
gitprouser/appengine-bottle-skeleton
|
lib/setuptools/command/test.py
|
22
|
8857
|
import os
import operator
import sys
import contextlib
import itertools
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
from unittest import TestLoader
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
"""
Add the indicated paths to the head of the PYTHONPATH environment
variable so that subprocesses will also see the packages at
these paths.
Do this in a context that restores the value on exit.
"""
nothing = object()
orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
current_pythonpath = os.environ.get('PYTHONPATH', '')
try:
prefix = os.pathsep.join(paths)
to_join = filter(None, [prefix, current_pythonpath])
new_path = os.pathsep.join(to_join)
if new_path:
os.environ['PYTHONPATH'] = new_path
yield
finally:
if orig_pythonpath is nothing:
os.environ.pop('PYTHONPATH', None)
else:
os.environ['PYTHONPATH'] = orig_pythonpath
@staticmethod
def install_dists(dist):
"""
Install the requirements indicated by self.distribution and
return an iterable of the dists that were built.
"""
ir_d = dist.fetch_build_eggs(dist.install_requires or [])
tr_d = dist.fetch_build_eggs(dist.tests_require or [])
return itertools.chain(ir_d, tr_d)
def run(self):
installed_dists = self.install_dists(self.distribution)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if six.PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
exit_kwarg = {} if sys.version_info < (2, 7) else {"exit": False}
test = unittest_main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
**exit_kwarg
)
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
|
apache-2.0
|
KuwaitNET/django-oscar-api
|
oscarapi/views/basket.py
|
1
|
6657
|
from django.utils.translation import ugettext_lazy as _
from oscar.apps.basket import signals
from oscar.core.loading import get_model
from rest_framework import status, generics, exceptions
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from oscarapi import serializers, permissions
from oscarapi.basket.operations import (
apply_offers,
get_basket
)
from oscarapi.views.mixin import PutIsPatchMixin
from oscarapi.views.utils import BasketPermissionMixin
__all__ = ('BasketView', 'LineList', 'LineDetail', 'add_product', 'add_voucher')
Basket = get_model('basket', 'Basket')
Line = get_model('basket', 'Line')
class BasketView(APIView):
"""
Api for retrieving a user's basket.
GET:
Retrieve your basket.
"""
def get(self, request, format=None):
basket = get_basket(request)
ser = serializers.BasketSerializer(basket,
context={'request': request})
return Response(ser.data)
@api_view(('POST',))
def add_product(request, format=None):
"""
Add a certain quantity of a product to the basket.
POST(url, quantity)
{
"url": "http://testserver.org/oscarapi/products/209/",
"quantity": 6
}
NOT IMPLEMENTED: LineAttributes, which are references to catalogue.Option.
To Implement make the serializer accept lists of option object, which look
like this:
{
option: "http://testserver.org/oscarapi/options/1/,
value: "some value"
},
These should be passed to basket.add_product as a list of dictionaries.
"""
p_ser = serializers.AddProductSerializer(data=request.DATA,
context={'request': request})
if p_ser.is_valid():
basket = get_basket(request)
product = p_ser.object
quantity = p_ser.init_data.get('quantity')
availability = basket.strategy.fetch_for_product(product).availability
# check if product is available at all
if not availability.is_available_to_buy:
return Response(
{'reason': availability.message}, status=status.HTTP_406_NOT_ACCEPTABLE)
# check if we can buy this quantity
allowed, message = availability.is_purchase_permitted(quantity)
if not allowed:
return Response({'reason': message}, status=status.HTTP_406_NOT_ACCEPTABLE)
# check if there is a limit on amount
allowed, message = basket.is_quantity_allowed(quantity)
if not allowed:
return Response({'reason': message}, status=status.HTTP_406_NOT_ACCEPTABLE)
basket.add_product(p_ser.object, quantity=quantity)
apply_offers(request, basket)
ser = serializers.BasketSerializer(
basket, context={'request': request})
return Response(ser.data)
return Response({'reason': p_ser.errors}, status=status.HTTP_406_NOT_ACCEPTABLE)
@api_view(('POST',))
def add_voucher(request, format=None):
"""
Add a voucher to the basket.
POST(vouchercode)
{
"vouchercode": "kjadjhgadjgh7667"
}
Will return 200 and the voucher as json if succesful.
If unsuccessful, will return 406 with the error.
"""
v_ser = serializers.VoucherAddSerializer(data=request.DATA,
context={'request': request})
if v_ser.is_valid():
basket = get_basket(request)
voucher = v_ser.object
basket.vouchers.add(voucher)
signals.voucher_addition.send(
sender=None, basket=basket, voucher=voucher)
# Recalculate discounts to see if the voucher gives any
apply_offers(request, basket)
discounts_after = basket.offer_applications
# Look for discounts from this new voucher
for discount in discounts_after:
if discount['voucher'] and discount['voucher'] == voucher:
break
else:
basket.vouchers.remove(voucher)
return Response({'reason':_("Your basket does not qualify for a voucher discount")}, status=status.HTTP_406_NOT_ACCEPTABLE)
ser = serializers.VoucherSerializer(voucher, context={'request': request})
return Response(ser.data)
return Response(v_ser.errors, status=status.HTTP_406_NOT_ACCEPTABLE)
class LineList(BasketPermissionMixin, generics.ListCreateAPIView):
"""
Api for adding lines to a basket.
Permission will be checked,
Regular users may only access their own basket,
staff users may access any basket.
GET:
A list of basket lines.
POST(basket, line_reference, product, stockrecord,
quantity, price_currency, price_excl_tax, price_incl_tax):
Add a line to the basket, example::
{
"basket": "http://127.0.0.1:8000/oscarapi/baskets/100/",
"line_reference": "234_345",
"product": "http://127.0.0.1:8000/oscarapi/products/209/",
"stockrecord":
"http://127.0.0.1:8000/oscarapi/stockrecords/100/",
"quantity": 3,
"price_currency": "EUR",
"price_excl_tax": "100.0",
"price_incl_tax": "121.0"
}
"""
queryset = Line.objects.all()
serializer_class = serializers.LineSerializer
def get(self, request, pk=None, format=None):
if pk is not None:
self.check_basket_permission(request, pk)
self.queryset = self.queryset.filter(basket__id=pk)
elif not request.user.is_staff:
self.permission_denied(request)
return super(LineList, self).get(request, format)
def post(self, request, pk=None, format=None):
data_basket = self.get_data_basket(request.DATA, format)
self.check_basket_permission(request, basket=data_basket)
if pk is not None:
url_basket = self.check_basket_permission(request, basket_pk=pk)
if url_basket != data_basket:
raise exceptions.NotAcceptable(
_('Target basket inconsistent %s != %s') % (
url_basket.pk, data_basket.pk
)
)
elif not request.user.is_staff:
self.permission_denied(request)
return super(LineList, self).post(request, format=format)
class LineDetail(PutIsPatchMixin, generics.RetrieveUpdateDestroyAPIView):
queryset = Line.objects.all()
serializer_class = serializers.LineSerializer
permission_classes = (permissions.IsAdminUserOrRequestContainsLine,)
|
bsd-3-clause
|
flybird119/voltdb
|
tools/voter.d/voter.py
|
7
|
2792
|
# This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# All the commands supported by the Voter application.
import os
@VOLT.Command(description = 'Build the Voter application and catalog.',
options = VOLT.BooleanOption('-C', '--conditional', 'conditional',
'only build when the catalog file is missing'))
def build(runner):
if not runner.opts.conditional or not os.path.exists('voter.jar'):
runner.java.compile('obj', 'src/voter/*.java', 'src/voter/procedures/*.java')
runner.call('volt.compile', '-c', 'obj', '-o', 'voter.jar', 'ddl.sql')
@VOLT.Command(description = 'Clean the Voter build output.')
def clean(runner):
runner.shell('rm', '-rfv', 'obj', 'debugoutput', 'voter.jar', 'voltdbroot')
@VOLT.Server('create',
description = 'Start the Voter VoltDB server.',
command_arguments = 'voter.jar',
classpath = 'obj')
def server(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.AsyncBenchmark', classpath = 'obj',
description = 'Run the Voter asynchronous benchmark.')
def async(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.SyncBenchmark', classpath = 'obj',
description = 'Run the Voter synchronous benchmark.')
def sync(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.JDBCBenchmark', classpath = 'obj',
description = 'Run the Voter JDBC benchmark.')
def jdbc(runner):
runner.call('build', '-C')
runner.go()
@VOLT.Java('voter.SimpleBenchmark', classpath = 'obj',
description = 'Run the Voter simple benchmark.')
def simple(runner):
runner.call('build', '-C')
runner.go()
|
agpl-3.0
|
marxin/youtube-dl
|
youtube_dl/extractor/streetvoice.py
|
129
|
1677
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import unified_strdate
class StreetVoiceIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?streetvoice\.com/[^/]+/songs/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://streetvoice.com/skippylu/songs/94440/',
'md5': '15974627fc01a29e492c98593c2fd472',
'info_dict': {
'id': '94440',
'ext': 'mp3',
'filesize': 4167053,
'title': '輸',
'description': 'Crispy脆樂團 - 輸',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 260,
'upload_date': '20091018',
'uploader': 'Crispy脆樂團',
'uploader_id': '627810',
}
}, {
'url': 'http://tw.streetvoice.com/skippylu/songs/94440/',
'only_matching': True,
}]
def _real_extract(self, url):
song_id = self._match_id(url)
song = self._download_json(
'http://streetvoice.com/music/api/song/%s' % song_id, song_id)
title = song['name']
author = song['musician']['name']
return {
'id': song_id,
'url': song['file'],
'filesize': song.get('size'),
'title': title,
'description': '%s - %s' % (author, title),
'thumbnail': self._proto_relative_url(song.get('image'), 'http:'),
'duration': song.get('length'),
'upload_date': unified_strdate(song.get('created_at')),
'uploader': author,
'uploader_id': compat_str(song['musician']['id']),
}
|
unlicense
|
camilo-v/Neural-Networks
|
single-layer-networks/adaline/adaline.py
|
1
|
4336
|
#!/usr/local/bin/python3
# ---------------------------------------------------------------------------------------------------------------------
#
# Bioinformatics Research Group
# http://biorg.cis.fiu.edu/
# Florida International University
#
# This software is a "Camilo Valdes Work" under the terms of the United States Copyright Act. Please cite the
# author(s) in any work or product based on this material. Base implementation based on Sebastian Raschka at
# https://github.com/rasbt/python-machine-learning-book/.
#
# OBJECTIVE:
# The purpose of this program is to implement the Adaptive Linear Neuron (ADALINE) classifier.
#
# NOTES:
# Please see the dependencies section below for the required libraries (if any).
#
# DEPENDENCIES:
#
# • Pandas
# • Numpy
#
# The above libraries & modules are required. You can check the modules currently installed in your
# system by running: python -c "help('modules')"
#
# USAGE:
# Run the program with the "--help" flag to see usage instructions.
#
# AUTHOR: Camilo Valdes ([email protected])
# Bioinformatics Research Group,
# School of Computing and Information Sciences,
# Florida International University (FIU)
#
#
# ---------------------------------------------------------------------------------------------------------------------
# Python Modules
import numpy as np
class Adaline(object):
"""
Perceptron Classifier.
This module implements the Perceptron Learning algorithm.
Parameters:
eta (float): Learning rate (between 0.0 and 1.0)
n_iter (int): Number of passes over the entire dataset (epochs).
Attributes:
w_ (1-d array): Weights after fitting.
cost_ (list): Sum-of-squares cost function value in each epoch.
"""
def __init__(self, eta=0.01, n_iter=50):
"""
Default initilizer.
Args:
eta: The learning rate.
n_iter: number of iterations (epochs).
"""
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
"""
Fit the Adaline instance with the training data.
Args:
X (numpy.ndarray): Feature matrix, where n_samples is the number of samples and n_features is the
number of features.
y (numpy.ndarray): Target values.
Returns:
self An object type.
"""
self.w_ = np.array(np.zeros( 1 + X.shape[1] ))
self.w_ = self.w_[:, np.newaxis] # Adds a new axis -> 2D array. Required to update the weights.
self.cost_ = []
for i in range(self.n_iter):
output = self.activation( X )
errors = (y - output)
# Calculate the gradient based on the whole training dataset for weights 1 to m
# Note that np.asarray(self.w_[1:]) is required so that Numpy can see the vector of weights
# correctly and it can perform the dot product.
self.w_[1:] = np.add( np.asarray(self.w_[1:]), self.eta * X.T.dot( errors ) )
# Calculate the gradient based on the whole training dataset
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum() / 2.0
self.cost_.append(cost)
return self
def net_input(self, X):
"""
Net input calculation for a given P.E.
Args:
X (numpy.ndarray): Feature matrix.
Returns:
numpy.ndarray Sum of net inputs
"""
return np.dot( X, self.w_[1:] ) + self.w_[0]
def activation(self, X):
"""
Method for computing the linear activation function.
Args:
X (numpy.ndarray): Feature matrix.
Returns:
numpy.ndarray Activation as calculated by the net input.
"""
return self.net_input(X)
def predict(self, X):
"""
Estimate the class label for a given pattern
Args:
X (numpy.ndarray): Feature matrix.
Returns:
ndarray: A Numpy array value with the expected (predicted) label of the pattern.
"""
return np.where(self.activation(X) >= 0.0, 1, -1)
|
gpl-3.0
|
armani-dev/android_kernel_xiaomi_armani_OLD
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
|
11088
|
3246
|
# Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
|
gpl-2.0
|
facebook/fbthrift
|
thrift/compiler/test/fixtures/includes/gen-py/module/ttypes.py
|
1
|
7111
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import six
import sys
from thrift.util.Recursive import fix_spec
from thrift.Thrift import TType, TMessageType, TPriority, TRequestContext, TProcessorEventHandler, TServerInterface, TProcessor, TException, TApplicationException, UnimplementedTypedef
from thrift.protocol.TProtocol import TProtocolException
import includes.ttypes
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
try:
from thrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
__all__ = ['UTF8STRINGS', 'MyStruct']
class MyStruct:
"""
Attributes:
- MyIncludedField
- MyOtherIncludedField
- MyIncludedInt
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.MyIncludedField = includes.ttypes.Included()
self.MyIncludedField.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.MyOtherIncludedField = includes.ttypes.Included()
self.MyOtherIncludedField.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.MyIncludedInt = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('MyStruct')
if self.MyIncludedField != None:
oprot.writeFieldBegin('MyIncludedField', TType.STRUCT, 1)
self.MyIncludedField.write(oprot)
oprot.writeFieldEnd()
if self.MyOtherIncludedField != None:
oprot.writeFieldBegin('MyOtherIncludedField', TType.STRUCT, 2)
self.MyOtherIncludedField.write(oprot)
oprot.writeFieldEnd()
if self.MyIncludedInt != None:
oprot.writeFieldBegin('MyIncludedInt', TType.I64, 3)
oprot.writeI64(self.MyIncludedInt)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = []
padding = ' ' * 4
if self.MyIncludedField is not None:
value = pprint.pformat(self.MyIncludedField, indent=0)
value = padding.join(value.splitlines(True))
L.append(' MyIncludedField=%s' % (value))
if self.MyOtherIncludedField is not None:
value = pprint.pformat(self.MyOtherIncludedField, indent=0)
value = padding.join(value.splitlines(True))
L.append(' MyOtherIncludedField=%s' % (value))
if self.MyIncludedInt is not None:
value = pprint.pformat(self.MyIncludedInt, indent=0)
value = padding.join(value.splitlines(True))
L.append(' MyIncludedInt=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
if not six.PY2:
__hash__ = object.__hash__
all_structs.append(MyStruct)
MyStruct.thrift_spec = (
None, # 0
(1, TType.STRUCT, 'MyIncludedField', [includes.ttypes.Included, includes.ttypes.Included.thrift_spec, False], includes.ttypes.Included(**{
"MyIntField" : 2,
"MyTransitiveField" : transitive.ttypes.Foo(**{
"a" : 2,
}),
}), 2, ), # 1
(2, TType.STRUCT, 'MyOtherIncludedField', [includes.ttypes.Included, includes.ttypes.Included.thrift_spec, False], None, 2, ), # 2
(3, TType.I64, 'MyIncludedInt', None, 42, 2, ), # 3
)
MyStruct.thrift_struct_annotations = {
}
MyStruct.thrift_field_annotations = {
}
def MyStruct__init__(self, MyIncludedField=MyStruct.thrift_spec[1][4], MyOtherIncludedField=None, MyIncludedInt=MyStruct.thrift_spec[3][4],):
if MyIncludedField is self.thrift_spec[1][4]:
MyIncludedField = includes.ttypes.Included(**{
"MyIntField" : 2,
"MyTransitiveField" : transitive.ttypes.Foo(**{
"a" : 2,
}),
})
self.MyIncludedField = MyIncludedField
self.MyOtherIncludedField = MyOtherIncludedField
if MyIncludedInt is self.thrift_spec[3][4]:
MyIncludedInt = 42
self.MyIncludedInt = MyIncludedInt
MyStruct.__init__ = MyStruct__init__
def MyStruct__setstate__(self, state):
state.setdefault('MyIncludedField', includes.ttypes.Included(**{
"MyIntField" : 2,
"MyTransitiveField" : transitive.ttypes.Foo(**{
"a" : 2,
}),
}))
state.setdefault('MyOtherIncludedField', None)
state.setdefault('MyIncludedInt', 42)
self.__dict__ = state
MyStruct.__getstate__ = lambda self: self.__dict__.copy()
MyStruct.__setstate__ = MyStruct__setstate__
fix_spec(all_structs)
del all_structs
|
apache-2.0
|
bsdman/p2pool
|
p2pool/test/bitcoin/test_data.py
|
272
|
4635
|
import unittest
from p2pool.bitcoin import data, networks
from p2pool.util import pack
class Test(unittest.TestCase):
def test_header_hash(self):
assert data.hash256(data.block_header_type.pack(dict(
version=1,
previous_block=0x000000000000038a2a86b72387f93c51298298a732079b3b686df3603d2f6282,
merkle_root=0x37a43a3b812e4eb665975f46393b4360008824aab180f27d642de8c28073bc44,
timestamp=1323752685,
bits=data.FloatingInteger(437159528),
nonce=3658685446,
))) == 0x000000000000003aaaf7638f9f9c0d0c60e8b0eb817dcdb55fd2b1964efc5175
def test_header_hash_litecoin(self):
assert networks.nets['litecoin'].POW_FUNC(data.block_header_type.pack(dict(
version=1,
previous_block=0xd928d3066613d1c9dd424d5810cdd21bfeef3c698977e81ec1640e1084950073,
merkle_root=0x03f4b646b58a66594a182b02e425e7b3a93c8a52b600aa468f1bc5549f395f16,
timestamp=1327807194,
bits=data.FloatingInteger(0x1d01b56f),
nonce=20736,
))) < 2**256//2**30
def test_tx_hash(self):
assert data.hash256(data.tx_type.pack(dict(
version=1,
tx_ins=[dict(
previous_output=None,
sequence=None,
script='70736a0468860e1a0452389500522cfabe6d6d2b2f33cf8f6291b184f1b291d24d82229463fcec239afea0ee34b4bfc622f62401000000000000004d696e656420627920425443204775696c6420ac1eeeed88'.decode('hex'),
)],
tx_outs=[dict(
value=5003880250,
script=data.pubkey_hash_to_script2(pack.IntType(160).unpack('ca975b00a8c203b8692f5a18d92dc5c2d2ebc57b'.decode('hex'))),
)],
lock_time=0,
))) == 0xb53802b2333e828d6532059f46ecf6b313a42d79f97925e457fbbfda45367e5c
def test_address_to_pubkey_hash(self):
assert data.address_to_pubkey_hash('1KUCp7YP5FP8ViRxhfszSUJCTAajK6viGy', networks.nets['bitcoin']) == pack.IntType(160).unpack('ca975b00a8c203b8692f5a18d92dc5c2d2ebc57b'.decode('hex'))
def test_merkle_hash(self):
assert data.merkle_hash([
0xb53802b2333e828d6532059f46ecf6b313a42d79f97925e457fbbfda45367e5c,
0x326dfe222def9cf571af37a511ccda282d83bedcc01dabf8aa2340d342398cf0,
0x5d2e0541c0f735bac85fa84bfd3367100a3907b939a0c13e558d28c6ffd1aea4,
0x8443faf58aa0079760750afe7f08b759091118046fe42794d3aca2aa0ff69da2,
0x4d8d1c65ede6c8eab843212e05c7b380acb82914eef7c7376a214a109dc91b9d,
0x1d750bc0fa276f89db7e6ed16eb1cf26986795121f67c03712210143b0cb0125,
0x5179349931d714d3102dfc004400f52ef1fed3b116280187ca85d1d638a80176,
0xa8b3f6d2d566a9239c9ad9ae2ed5178dee4a11560a8dd1d9b608fd6bf8c1e75,
0xab4d07cd97f9c0c4129cff332873a44efdcd33bdbfc7574fe094df1d379e772f,
0xf54a7514b1de8b5d9c2a114d95fba1e694b6e3e4a771fda3f0333515477d685b,
0x894e972d8a2fc6c486da33469b14137a7f89004ae07b95e63923a3032df32089,
0x86cdde1704f53fce33ab2d4f5bc40c029782011866d0e07316d695c41e32b1a0,
0xf7cf4eae5e497be8215778204a86f1db790d9c27fe6a5b9f745df5f3862f8a85,
0x2e72f7ddf157d64f538ec72562a820e90150e8c54afc4d55e0d6e3dbd8ca50a,
0x9f27471dfbc6ce3cbfcf1c8b25d44b8d1b9d89ea5255e9d6109e0f9fd662f75c,
0x995f4c9f78c5b75a0c19f0a32387e9fa75adaa3d62fba041790e06e02ae9d86d,
0xb11ec2ad2049aa32b4760d458ee9effddf7100d73c4752ea497e54e2c58ba727,
0xa439f288fbc5a3b08e5ffd2c4e2d87c19ac2d5e4dfc19fabfa33c7416819e1ec,
0x3aa33f886f1357b4bbe81784ec1cf05873b7c5930ab912ee684cc6e4f06e4c34,
0xcab9a1213037922d94b6dcd9c567aa132f16360e213c202ee59f16dde3642ac7,
0xa2d7a3d2715eb6b094946c6e3e46a88acfb37068546cabe40dbf6cd01a625640,
0x3d02764f24816aaa441a8d472f58e0f8314a70d5b44f8a6f88cc8c7af373b24e,
0xcc5adf077c969ebd78acebc3eb4416474aff61a828368113d27f72ad823214d0,
0xf2d8049d1971f02575eb37d3a732d46927b6be59a18f1bd0c7f8ed123e8a58a,
0x94ffe8d46a1accd797351894f1774995ed7df3982c9a5222765f44d9c3151dbb,
0x82268fa74a878636261815d4b8b1b01298a8bffc87336c0d6f13ef6f0373f1f0,
0x73f441f8763dd1869fe5c2e9d298b88dc62dc8c75af709fccb3622a4c69e2d55,
0xeb78fc63d4ebcdd27ed618fd5025dc61de6575f39b2d98e3be3eb482b210c0a0,
0x13375a426de15631af9afdf00c490e87cc5aab823c327b9856004d0b198d72db,
0x67d76a64fa9b6c5d39fde87356282ef507b3dec1eead4b54e739c74e02e81db4,
]) == 0x37a43a3b812e4eb665975f46393b4360008824aab180f27d642de8c28073bc44
|
gpl-3.0
|
thedrow/django
|
django/contrib/auth/forms.py
|
35
|
14207
|
from __future__ import unicode_literals
from django import forms
from django.contrib.auth import (
authenticate, get_user_model, password_validation,
)
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, identify_hasher,
)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.forms.utils import flatatt
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.html import format_html, format_html_join
from django.utils.http import urlsafe_base64_encode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
final_attrs = self.build_attrs(attrs)
if not encoded or encoded.startswith(UNUSABLE_PASSWORD_PREFIX):
summary = mark_safe("<strong>%s</strong>" % ugettext("No password set."))
else:
try:
hasher = identify_hasher(encoded)
except ValueError:
summary = mark_safe("<strong>%s</strong>" % ugettext(
"Invalid password format or unknown hashing algorithm."))
else:
summary = format_html_join('',
"<strong>{}</strong>: {} ",
((ugettext(key), value)
for key, value in hasher.safe_summary(encoded).items())
)
return format_html("<div{}>{}</div>", flatatt(final_attrs), summary)
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
def has_changed(self, initial, data):
return False
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"../password/\">this form</a>."))
class Meta:
model = User
fields = '__all__'
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions')
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if self.fields['username'].label is None:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("Email"), max_length=254)
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Sends a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = loader.render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = loader.render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name is not None:
html_email = loader.render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
def get_users(self, email):
"""Given an email, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
active_users = get_user_model()._default_manager.filter(
email__iexact=email, is_active=True)
return (u for u in active_users if u.has_usable_password())
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
email = self.cleaned_data["email"]
for user in self.get_users(email):
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
}
self.send_mail(subject_template_name, email_template_name,
context, from_email, user.email,
html_email_template_name=html_email_template_name)
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set their password without entering the old
password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html())
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
password = self.cleaned_data["new_password1"]
self.user.set_password(password)
password_validation.password_changed(password, self.user)
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change their password by entering their old
password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
field_order = ['old_password', 'new_password1', 'new_password2']
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
required_css_class = 'required'
password1 = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
password = self.cleaned_data["password1"]
self.user.set_password(password)
password_validation.password_changed(password, self.user)
if commit:
self.user.save()
return self.user
def _get_changed_data(self):
data = super(AdminPasswordChangeForm, self).changed_data
for name in self.fields.keys():
if name not in data:
return []
return ['password']
changed_data = property(_get_changed_data)
|
bsd-3-clause
|
kevinlondon/youtube-dl
|
youtube_dl/extractor/m6.py
|
147
|
1952
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class M6IE(InfoExtractor):
IE_NAME = 'm6'
_VALID_URL = r'http://(?:www\.)?m6\.fr/[^/]+/videos/(?P<id>\d+)-[^\.]+\.html'
_TEST = {
'url': 'http://www.m6.fr/emission-les_reines_du_shopping/videos/11323908-emeline_est_la_reine_du_shopping_sur_le_theme_ma_fete_d_8217_anniversaire.html',
'md5': '242994a87de2c316891428e0176bcb77',
'info_dict': {
'id': '11323908',
'ext': 'mp4',
'title': 'Emeline est la Reine du Shopping sur le thème « Ma fête d’anniversaire ! »',
'description': 'md5:1212ae8fb4b7baa4dc3886c5676007c2',
'duration': 100,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
rss = self._download_xml('http://ws.m6.fr/v1/video/info/m6/bonus/%s' % video_id, video_id,
'Downloading video RSS')
title = rss.find('./channel/item/title').text
description = rss.find('./channel/item/description').text
thumbnail = rss.find('./channel/item/visuel_clip_big').text
duration = int(rss.find('./channel/item/duration').text)
view_count = int(rss.find('./channel/item/nombre_vues').text)
formats = []
for format_id in ['lq', 'sd', 'hq', 'hd']:
video_url = rss.find('./channel/item/url_video_%s' % format_id)
if video_url is None:
continue
formats.append({
'url': video_url.text,
'format_id': format_id,
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'view_count': view_count,
'formats': formats,
}
|
unlicense
|
PetePriority/home-assistant
|
homeassistant/components/cover/__init__.py
|
1
|
9420
|
"""
Support for Cover devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover/
"""
from datetime import timedelta
import functools as ft
import logging
import voluptuous as vol
from homeassistant.loader import bind_hass
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import ( # noqa
PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
import homeassistant.helpers.config_validation as cv
from homeassistant.components import group
from homeassistant.helpers import intent
from homeassistant.const import (
SERVICE_OPEN_COVER, SERVICE_CLOSE_COVER, SERVICE_SET_COVER_POSITION,
SERVICE_STOP_COVER, SERVICE_OPEN_COVER_TILT, SERVICE_CLOSE_COVER_TILT,
SERVICE_STOP_COVER_TILT, SERVICE_SET_COVER_TILT_POSITION, STATE_OPEN,
STATE_CLOSED, STATE_OPENING, STATE_CLOSING, ATTR_ENTITY_ID)
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'cover'
DEPENDENCIES = ['group']
SCAN_INTERVAL = timedelta(seconds=15)
GROUP_NAME_ALL_COVERS = 'all covers'
ENTITY_ID_ALL_COVERS = group.ENTITY_ID_FORMAT.format('all_covers')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
DEVICE_CLASSES = [
'damper',
'garage', # Garage door control
'window', # Window control
]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
SUPPORT_OPEN = 1
SUPPORT_CLOSE = 2
SUPPORT_SET_POSITION = 4
SUPPORT_STOP = 8
SUPPORT_OPEN_TILT = 16
SUPPORT_CLOSE_TILT = 32
SUPPORT_STOP_TILT = 64
SUPPORT_SET_TILT_POSITION = 128
ATTR_CURRENT_POSITION = 'current_position'
ATTR_CURRENT_TILT_POSITION = 'current_tilt_position'
ATTR_POSITION = 'position'
ATTR_TILT_POSITION = 'tilt_position'
INTENT_OPEN_COVER = 'HassOpenCover'
INTENT_CLOSE_COVER = 'HassCloseCover'
COVER_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
})
COVER_SET_COVER_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({
vol.Required(ATTR_POSITION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
})
COVER_SET_COVER_TILT_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({
vol.Required(ATTR_TILT_POSITION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
})
@bind_hass
def is_closed(hass, entity_id=None):
"""Return if the cover is closed based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_COVERS
return hass.states.is_state(entity_id, STATE_CLOSED)
async def async_setup(hass, config):
"""Track states and offer events for covers."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_COVERS)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_OPEN_COVER, COVER_SERVICE_SCHEMA,
'async_open_cover'
)
component.async_register_entity_service(
SERVICE_CLOSE_COVER, COVER_SERVICE_SCHEMA,
'async_close_cover'
)
component.async_register_entity_service(
SERVICE_SET_COVER_POSITION, COVER_SET_COVER_POSITION_SCHEMA,
'async_set_cover_position'
)
component.async_register_entity_service(
SERVICE_STOP_COVER, COVER_SERVICE_SCHEMA,
'async_stop_cover'
)
component.async_register_entity_service(
SERVICE_OPEN_COVER_TILT, COVER_SERVICE_SCHEMA,
'async_open_cover_tilt'
)
component.async_register_entity_service(
SERVICE_CLOSE_COVER_TILT, COVER_SERVICE_SCHEMA,
'async_close_cover_tilt'
)
component.async_register_entity_service(
SERVICE_STOP_COVER_TILT, COVER_SERVICE_SCHEMA,
'async_stop_cover_tilt'
)
component.async_register_entity_service(
SERVICE_SET_COVER_TILT_POSITION, COVER_SET_COVER_TILT_POSITION_SCHEMA,
'async_set_cover_tilt_position'
)
hass.helpers.intent.async_register(intent.ServiceIntentHandler(
INTENT_OPEN_COVER, DOMAIN, SERVICE_OPEN_COVER,
"Opened {}"))
hass.helpers.intent.async_register(intent.ServiceIntentHandler(
INTENT_CLOSE_COVER, DOMAIN, SERVICE_CLOSE_COVER,
"Closed {}"))
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class CoverDevice(Entity):
"""Representation a cover."""
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
pass
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
pass
@property
def state(self):
"""Return the state of the cover."""
if self.is_opening:
return STATE_OPENING
if self.is_closing:
return STATE_CLOSING
closed = self.is_closed
if closed is None:
return None
return STATE_CLOSED if closed else STATE_OPEN
@property
def state_attributes(self):
"""Return the state attributes."""
data = {}
current = self.current_cover_position
if current is not None:
data[ATTR_CURRENT_POSITION] = self.current_cover_position
current_tilt = self.current_cover_tilt_position
if current_tilt is not None:
data[ATTR_CURRENT_TILT_POSITION] = self.current_cover_tilt_position
return data
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self.current_cover_position is not None:
supported_features |= SUPPORT_SET_POSITION
if self.current_cover_tilt_position is not None:
supported_features |= (
SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_STOP_TILT |
SUPPORT_SET_TILT_POSITION)
return supported_features
@property
def is_opening(self):
"""Return if the cover is opening or not."""
pass
@property
def is_closing(self):
"""Return if the cover is closing or not."""
pass
@property
def is_closed(self):
"""Return if the cover is closed or not."""
raise NotImplementedError()
def open_cover(self, **kwargs):
"""Open the cover."""
raise NotImplementedError()
def async_open_cover(self, **kwargs):
"""Open the cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.open_cover, **kwargs))
def close_cover(self, **kwargs):
"""Close cover."""
raise NotImplementedError()
def async_close_cover(self, **kwargs):
"""Close cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.close_cover, **kwargs))
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
pass
def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.set_cover_position, **kwargs))
def stop_cover(self, **kwargs):
"""Stop the cover."""
pass
def async_stop_cover(self, **kwargs):
"""Stop the cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.stop_cover, **kwargs))
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
pass
def async_open_cover_tilt(self, **kwargs):
"""Open the cover tilt.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.open_cover_tilt, **kwargs))
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
pass
def async_close_cover_tilt(self, **kwargs):
"""Close the cover tilt.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.close_cover_tilt, **kwargs))
def set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
pass
def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.set_cover_tilt_position, **kwargs))
def stop_cover_tilt(self, **kwargs):
"""Stop the cover."""
pass
def async_stop_cover_tilt(self, **kwargs):
"""Stop the cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.stop_cover_tilt, **kwargs))
|
apache-2.0
|
rjdp/EE-dbmigrate
|
ee/cli/plugins/stack_migrate.py
|
3
|
5210
|
from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.mysql import EEMysql
from ee.core.logging import Log
from ee.core.variables import EEVariables
from ee.core.aptget import EEAptGet
from ee.core.shellexec import EEShellExec
from ee.core.apt_repo import EERepo
from ee.core.services import EEService
import configparser
import os
class EEStackMigrateController(CementBaseController):
class Meta:
label = 'migrate'
stacked_on = 'stack'
stacked_type = 'nested'
description = ('Migrate stack safely')
arguments = [
(['--mariadb'],
dict(help="Migrate database to MariaDB",
action='store_true')),
# (['--PHP'],
# dict(help="update to html site", action='store_true')),
]
@expose(hide=True)
def migrate_mariadb(self):
# Backup all database
EEMysql.backupAll(self)
# Add MariaDB repo
Log.info(self, "Adding repository for MariaDB, please wait...")
mysql_pref = ("Package: *\nPin: origin mirror.aarnet.edu.au"
"\nPin-Priority: 1000\n")
with open('/etc/apt/preferences.d/'
'MariaDB.pref', 'w') as mysql_pref_file:
mysql_pref_file.write(mysql_pref)
EERepo.add(self, repo_url=EEVariables.ee_mysql_repo)
Log.debug(self, 'Adding key for {0}'
.format(EEVariables.ee_mysql_repo))
EERepo.add_key(self, '0xcbcb082a1bb943db',
keyserver="keyserver.ubuntu.com")
config = configparser.ConfigParser()
config.read(os.path.expanduser("~")+'/.my.cnf')
try:
chars = config['client']['password']
except Exception as e:
Log.error(self, "Error: process exited with error %s"
% e)
Log.debug(self, "Pre-seeding MariaDB")
Log.debug(self, "echo \"mariadb-server-10.0 "
"mysql-server/root_password "
"password \" | "
"debconf-set-selections")
EEShellExec.cmd_exec(self, "echo \"mariadb-server-10.0 "
"mysql-server/root_password "
"password {chars}\" | "
"debconf-set-selections"
.format(chars=chars),
log=False)
Log.debug(self, "echo \"mariadb-server-10.0 "
"mysql-server/root_password_again "
"password \" | "
"debconf-set-selections")
EEShellExec.cmd_exec(self, "echo \"mariadb-server-10.0 "
"mysql-server/root_password_again "
"password {chars}\" | "
"debconf-set-selections"
.format(chars=chars),
log=False)
# Install MariaDB
apt_packages = EEVariables.ee_mysql
# If PHP is installed then install php5-mysql
if EEAptGet.is_installed(self, "php5-fpm"):
apt_packages = apt_packages + ["php5-mysql"]
# If mail server is installed then install dovecot-sql and postfix-sql
if EEAptGet.is_installed(self, "dovecot-core"):
apt_packages = apt_packages + ["dovecot-mysql", "postfix-mysql",
"libclass-dbi-mysql-perl"]
Log.info(self, "Updating apt-cache, please wait...")
EEAptGet.update(self)
Log.info(self, "Installing MariaDB, please wait...")
EEAptGet.remove(self, ["mysql-common", "libmysqlclient18"])
EEAptGet.auto_remove(self)
EEAptGet.install(self, apt_packages)
# Restart dovecot and postfix if installed
if EEAptGet.is_installed(self, "dovecot-core"):
EEService.restart_service(self, 'dovecot')
EEService.restart_service(self, 'postfix')
@expose(hide=True)
def default(self):
if ((not self.app.pargs.mariadb)):
self.app.args.print_help()
if self.app.pargs.mariadb:
if EEVariables.ee_mysql_host is not "localhost":
Log.error(self, "Remote MySQL found, EasyEngine will not "
"install MariaDB")
if EEShellExec.cmd_exec(self, "mysqladmin ping") and (not
EEAptGet.is_installed(self, 'mariadb-server')):
Log.info(self, "If your database size is big, "
"migration may take some time.")
Log.info(self, "During migration non nginx-cached parts of "
"your site may remain down")
start_migrate = input("Type \"mariadb\" to continue:")
if start_migrate != "mariadb":
Log.error(self, "Not starting migration")
self.migrate_mariadb()
else:
Log.error(self, "Your current MySQL is not alive or "
"you allready installed MariaDB")
|
mit
|
fairbird/OpenPLI-BlackHole
|
lib/python/Blackhole/BhBlue.py
|
1
|
16097
|
from Screens.Screen import Screen
from enigma import iServiceInformation, eTimer
from Components.ActionMap import ActionMap
from Components.Label import Label
from Components.ScrollLabel import ScrollLabel
from Components.MenuList import MenuList
from Components.Sources.List import List
from Components.Pixmap import MultiPixmap
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigText, getConfigListEntry, ConfigSelection, NoSave
from Screens.MessageBox import MessageBox
from Tools.Directories import fileExists
from ServiceReference import ServiceReference
from os import system, listdir, chdir, getcwd, rename as os_rename
from BhEpgPanel import DeliteEpgPanel
from BhSettings import DeliteSettings
from BhInfo import DeliteInfo
from BhUtils import BhU_get_Version, BhU_check_proc_version
import socket
config.delite = ConfigSubsection()
config.delite.fp = ConfigText(default='')
class DeliteBluePanel(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self['lab1'] = Label(_('xx CAMs Installed'))
self['lab2'] = Label(_('Set Default CAM'))
self['lab3'] = Label(_('Active CAM'))
self['Ilab1'] = Label()
self['Ilab2'] = Label()
self['Ilab3'] = Label()
self['Ilab4'] = Label()
self['key_red'] = Label(_('Epg Panel'))
self['key_green'] = Label(_('Autocam'))
self['key_yellow'] = Label(_('Sys Info'))
self['key_blue'] = Label(_('Extra Settings'))
self['activecam'] = Label()
self['Ecmtext'] = ScrollLabel()
self.emlist = []
self.populate_List()
self['list'] = MenuList(self.emlist)
totcam = str(len(self.emlist))
self['lab1'].setText(totcam + ' ' + _('CAMs Installed'))
self.onShow.append(self.updateBP)
self['myactions'] = ActionMap(['ColorActions', 'OkCancelActions', 'DirectionActions'], {'ok': self.keyOk,
'cancel': self.close,
'green': self.autoCam,
'red': self.keyRed,
'yellow': self.nInfo,
'blue': self.Settings,
'up': self['Ecmtext'].pageUp,
'down': self['Ecmtext'].pageDown}, -1)
def nInfo(self):
self.session.open(DeliteInfo)
def Settings(self):
self.session.open(DeliteSettings)
def autoCam(self):
self.session.open(DeliteAutocamMan)
def keyRed(self):
self.session.open(DeliteEpgPanel)
def populate_List(self):
self.camnames = {}
cams = listdir('/usr/camscript')
for fil in cams:
if fil.find('Ncam_') != -1:
f = open('/usr/camscript/' + fil, 'r')
for line in f.readlines():
if line.find('CAMNAME=') != -1:
line = line.strip()
cn = line[9:-1]
self.emlist.append(cn)
self.camnames[cn] = '/usr/camscript/' + fil
f.close()
if fileExists('/etc/BhCamConf') == False:
out = open('/etc/BhCamConf', 'w')
out.write('delcurrent|/usr/camscript/Ncam_Ci.sh\n')
out.write('deldefault|/usr/camscript/Ncam_Ci.sh\n')
out.close()
def updateBP(self):
name = 'N/A'
provider = 'N/A'
aspect = 'N/A'
videosize = 'N/A'
myserviceinfo = ''
myservice = self.session.nav.getCurrentService()
if myservice is not None:
myserviceinfo = myservice.info()
if self.session.nav.getCurrentlyPlayingServiceReference():
name = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getServiceName()
provider = self.getServiceInfoValue(iServiceInformation.sProvider, myserviceinfo)
aspect = self.getServiceInfoValue(iServiceInformation.sAspect, myserviceinfo)
if aspect in (1, 2, 5, 6, 9, 10, 13, 14):
aspect = '4:3'
else:
aspect = '16:9'
if myserviceinfo:
width = myserviceinfo and myserviceinfo.getInfo(iServiceInformation.sVideoWidth) or -1
height = myserviceinfo and myserviceinfo.getInfo(iServiceInformation.sVideoHeight) or -1
if width != -1 and height != -1:
videosize = '%dx%d' % (width, height)
self['Ilab1'].setText(_('Name: ') + name)
self['Ilab2'].setText(_('Provider: ') + provider)
self['Ilab3'].setText(_('Aspect Ratio: ') + aspect)
self['Ilab4'].setText(_('Videosize: ') + videosize)
self.currentcam = '/usr/camscript/Ncam_Ci.sh'
self.defaultcam = '/usr/camscript/Ncam_Ci.sh'
f = open('/etc/BhCamConf', 'r')
for line in f.readlines():
parts = line.strip().split('|')
if parts[0] == 'delcurrent':
self.currentcam = parts[1]
elif parts[0] == 'deldefault':
self.defaultcam = parts[1]
f.close()
defCamname = 'Common Interface'
curCamname = 'Common Interface'
for c in self.camnames.keys():
if self.camnames[c] == self.defaultcam:
defCamname = c
if self.camnames[c] == self.currentcam:
curCamname = c
pos = 0
for x in self.emlist:
if x == defCamname:
self['list'].moveToIndex(pos)
break
pos += 1
mytext = ''
if fileExists('/tmp/ecm.info'):
f = open('/tmp/ecm.info', 'r')
for line in f.readlines():
line = line.replace('\n', '')
line = line.strip()
if len(line) > 3:
mytext = mytext + line + '\n'
f.close()
if len(mytext) < 5:
mytext = '\n\n ' + _('Ecm Info not available.')
self['activecam'].setText(curCamname)
self['Ecmtext'].setText(mytext)
def getServiceInfoValue(self, what, myserviceinfo):
if myserviceinfo is None:
return ''
else:
v = myserviceinfo.getInfo(what)
if v == -2:
v = myserviceinfo.getInfoString(what)
elif v == -1:
v = 'N/A'
return v
def keyOk(self):
self.sel = self['list'].getCurrent()
self.newcam = self.camnames[self.sel]
inme = open('/etc/BhCamConf', 'r')
out = open('/etc/BhCamConf.tmp', 'w')
for line in inme.readlines():
if line.find('delcurrent') == 0:
line = 'delcurrent|' + self.newcam + '\n'
elif line.find('deldefault') == 0:
line = 'deldefault|' + self.newcam + '\n'
out.write(line)
out.close()
inme.close()
os_rename('/etc/BhCamConf.tmp', '/etc/BhCamConf')
out = open('/etc/CurrentBhCamName', 'w')
out.write(self.sel)
out.close()
cmd = 'cp -f ' + self.newcam + ' /usr/bin/StartBhCam'
system(cmd)
mydata = 'STOP_CAMD,' + self.currentcam
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client_socket.connect('/tmp/Blackhole.socket')
client_socket.send(mydata)
client_socket.close()
mydata = 'NEW_CAMD,' + self.newcam
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
client_socket.connect('/tmp/Blackhole.socket')
client_socket.send(mydata)
client_socket.close()
self.session.openWithCallback(self.myclose, Nab_DoStartCam, self.sel)
def checkKern(self):
mycheck = 0
deversion = BhU_get_Version()
if deversion == BhU_check_proc_version():
mycheck = 1
else:
nobox = self.session.open(MessageBox, _('Sorry: Wrong image in flash found. You have to install in flash Black Hole image v. ') + deversion, MessageBox.TYPE_INFO)
nobox.setTitle(_('Info'))
self.myclose()
return mycheck
def myclose(self):
self.close()
class Nab_DoStartCam(Screen):
skin = '\n\t<screen position="390,100" size="484,250" title="Black Hole" flags="wfNoBorder">\n\t\t<widget name="connect" position="0,0" size="484,250" zPosition="-1" pixmaps="skin_default/startcam_1.png,skin_default/startcam_2.png,skin_default/startcam_3.png,skin_default/startcam_4.png" transparent="1" />\n\t\t<widget name="lab1" position="10,180" halign="center" size="460,60" zPosition="1" font="Regular;20" valign="top" transparent="1" />\n\t</screen>'
def __init__(self, session, title):
Screen.__init__(self, session)
msg = _('Please wait while starting\n') + title + '...'
self['connect'] = MultiPixmap()
self['lab1'] = Label(msg)
self.activityTimer = eTimer()
self.activityTimer.timeout.get().append(self.updatepix)
self.onShow.append(self.startShow)
self.onClose.append(self.delTimer)
def startShow(self):
self.curpix = 0
self.count = 0
self['connect'].setPixmapNum(0)
self.activityTimer.start(10)
def updatepix(self):
self.activityTimer.stop()
if self.curpix > 2:
self.curpix = 0
if self.count > 7:
self.curpix = 3
self['connect'].setPixmapNum(self.curpix)
if self.count == 20:
self.hide()
self.close()
self.activityTimer.start(140)
self.curpix += 1
self.count += 1
def delTimer(self):
del self.activityTimer
class DeliteAutocamMan(Screen):
skin = '\n\t<screen position="240,120" size="800,520" title="Black Hole Autocam Manager">\n\t\t<widget name="defaultcam" position="10,10" size="780,30" font="Regular;24" halign="center" valign="center" backgroundColor="#9f1313" />\n\t\t<widget source="list" render="Listbox" position="20,60" size="760,400" scrollbarMode="showOnDemand" >\n\t\t\t<convert type="StringList" />\n\t\t</widget>\n \t\t<ePixmap pixmap="skin_default/buttons/red.png" position="200,480" size="140,40" alphatest="on" />\n\t\t<ePixmap pixmap="skin_default/buttons/yellow.png" position="440,480" size="140,40" alphatest="on" />\n\t\t<widget name="key_red" position="200,480" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />\n\t\t<widget name="key_yellow" position="440,480" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />\n \t</screen>'
def __init__(self, session):
Screen.__init__(self, session)
self['key_red'] = Label(_('Add'))
self['key_yellow'] = Label(_('Delete'))
self['defaultcam'] = Label(_('Default Cam:'))
self.emlist = []
self.camnames = {}
self.list = []
self['list'] = List(self.list)
self['actions'] = ActionMap(['WizardActions', 'ColorActions'], {'ok': self.close,
'back': self.close,
'red': self.addtocam,
'yellow': self.deltocam})
self.updateList()
def addtocam(self):
self.session.openWithCallback(self.updateList, DeliteSetupAutocam)
def updateList(self):
self.list = []
cams = listdir('/usr/camscript')
for fil in cams:
if fil.find('Ncam_') != -1:
f = open('/usr/camscript/' + fil, 'r')
for line in f.readlines():
if line.find('CAMNAME=') != -1:
line = line.strip()
cn = line[9:-1]
self.emlist.append(cn)
self.camnames[cn] = '/usr/camscript/' + fil
f.close()
f = open('/etc/BhCamConf', 'r')
for line in f.readlines():
parts = line.strip().split('|')
if parts[0] == 'delcurrent':
continue
elif parts[0] == 'deldefault':
defaultcam = self.GetCamName(parts[1])
self['defaultcam'].setText(_('Default Cam: ') + defaultcam)
else:
text = parts[2] + '\t' + self.GetCamName(parts[1])
res = (text, parts[0])
self.list.append(res)
f.close()
self['list'].list = self.list
def GetCamName(self, cam):
activeCam = ''
for c in self.camnames.keys():
if self.camnames[c] == cam:
activeCam = c
return activeCam
def deltocam(self):
mysel = self['list'].getCurrent()
if mysel:
mysel = mysel[1]
out = open('/etc/BhCamConf.tmp', 'w')
f = open('/etc/BhCamConf', 'r')
for line in f.readlines():
parts = line.strip().split('|')
if parts[0] != mysel:
out.write(line)
f.close()
out.close()
os_rename('/etc/BhCamConf.tmp', '/etc/BhCamConf')
self.updateList()
class DeliteSetupAutocam(Screen, ConfigListScreen):
skin = '\n\t<screen position="240,190" size="800,340" title="Black Hole Autocam Setup">\n\t\t<widget name="config" position="10,20" size="780,280" scrollbarMode="showOnDemand" />\n\t\t<ePixmap pixmap="skin_default/buttons/red.png" position="330,270" size="140,40" alphatest="on" />\n\t\t<widget name="key_red" position="330,270" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />\n\t</screen>'
def __init__(self, session):
Screen.__init__(self, session)
self.list = []
ConfigListScreen.__init__(self, self.list)
self['key_red'] = Label(_('Save'))
self['actions'] = ActionMap(['WizardActions', 'ColorActions'], {'red': self.saveMyconf,
'back': self.close})
self.updateList()
def updateList(self):
mychoices = []
self.chname = 'Unknown'
self.chref = 'Unknown'
myservice = self.session.nav.getCurrentService()
if myservice is not None:
myserviceinfo = myservice.info()
if self.session.nav.getCurrentlyPlayingServiceReference():
self.chname = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getServiceName()
self.chref = self.session.nav.getCurrentlyPlayingServiceReference().toString()
cams = listdir('/usr/camscript')
for fil in cams:
if fil.find('Ncam_') != -1:
f = open('/usr/camscript/' + fil, 'r')
for line in f.readlines():
if line.find('CAMNAME=') != -1:
line = line.strip()
cn = line[9:-1]
cn2 = '/usr/camscript/' + fil
res = (cn2, cn)
mychoices.append(res)
f.close()
self.autocam_file = NoSave(ConfigSelection(choices=mychoices))
res = getConfigListEntry(self.chname, self.autocam_file)
self.list.append(res)
self['config'].list = self.list
self['config'].l.setList(self.list)
def saveMyconf(self):
check = True
f = open('/etc/BhCamConf', 'r')
for line in f.readlines():
parts = line.strip().split('|')
if parts[0] == self.chref:
check = False
f.close()
if check == True:
line = self.chref + '|' + self.autocam_file.value + '|' + self.chname + '\n'
out = open('/etc/BhCamConf', 'a')
out.write(line)
out.close()
self.close()
class DeliteBp:
def __init__(self):
self['DeliteBp'] = ActionMap(['InfobarExtensions'], {'DeliteBpshow': self.showDeliteBp})
def showDeliteBp(self):
self.session.openWithCallback(self.callNabAction, DeliteBluePanel)
def callNabAction(self, *args):
if len(args):
actionmap, context, action = args
actionmap.action(context, action)
|
gpl-2.0
|
rgommers/statsmodels
|
statsmodels/sandbox/tsa/examples/try_ld_nitime.py
|
34
|
1354
|
'''Levinson Durbin recursion adjusted from nitime
'''
from statsmodels.compat.python import range
import numpy as np
from statsmodels.tsa.stattools import acovf
def levinson_durbin_nitime(s, order=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
'''
#from nitime
## if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
## else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order+1] #not tested
phi = np.zeros((order+1, order+1), 'd')
sig = np.zeros(order+1)
# initial points for the recursion
phi[1,1] = sxx_m[1]/sxx_m[0]
sig[1] = sxx_m[0] - phi[1,1]*sxx_m[1]
for k in range(2,order+1):
phi[k,k] = (sxx_m[k]-np.dot(phi[1:k,k-1], sxx_m[1:k][::-1]))/sig[k-1]
for j in range(1,k):
phi[j,k] = phi[j,k-1] - phi[k,k]*phi[k-j,k-1]
sig[k] = sig[k-1]*(1 - phi[k,k]**2)
sigma_v = sig[-1]; arcoefs = phi[1:,-1]
return sigma_v, arcoefs, pacf, phi #return everything
import nitime.utils as ut
sxx=None
order = 10
npts = 2048*10
sigma = 1
drop_transients = 1024
coefs = np.array([0.9, -0.5])
# Generate AR(2) time series
X, v, _ = ut.ar_generator(npts, sigma, coefs, drop_transients)
s = X
import statsmodels.api as sm
sm.tsa.stattools.pacf(X)
|
bsd-3-clause
|
alikins/subscription-manager
|
src/subscription_manager/managerlib.py
|
2
|
32414
|
#
# common calls to get product and entitlemnt info for gui/tui
#
# Copyright (c) 2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import gettext
import glob
import logging
import os
import re
import shutil
import stat
import syslog
from rhsm.config import initConfig
from rhsm.certificate import Key, CertificateException, create_from_pem
import subscription_manager.cache as cache
from subscription_manager.cert_sorter import StackingGroupSorter, ComplianceManager
from subscription_manager import identity
from subscription_manager.facts import Facts
from subscription_manager.injection import require, CERT_SORTER, \
IDENTITY, ENTITLEMENT_STATUS_CACHE, \
PROD_STATUS_CACHE, ENT_DIR, PROD_DIR, CP_PROVIDER, OVERRIDE_STATUS_CACHE, \
POOLTYPE_CACHE
from subscription_manager import isodate
from subscription_manager.jsonwrapper import PoolWrapper
from subscription_manager.repolib import RepoActionInvoker
from subscription_manager import utils
# FIXME FIXME
from subscription_manager.identity import ConsumerIdentity
from dateutil.tz import tzlocal
log = logging.getLogger('rhsm-app.' + __name__)
_ = gettext.gettext
cfg = initConfig()
ENT_CONFIG_DIR = cfg.get('rhsm', 'entitlementCertDir')
# Expected permissions for identity certificates:
ID_CERT_PERMS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP
def system_log(message, priority=syslog.LOG_NOTICE):
utils.system_log(message, priority)
# FIXME: move me to identity.py
def persist_consumer_cert(consumerinfo):
"""
Calls the consumerIdentity, persists and gets consumer info
"""
cert_dir = cfg.get('rhsm', 'consumerCertDir')
if not os.path.isdir(cert_dir):
os.mkdir(cert_dir)
# unsure if this could be injected?
consumer = identity.ConsumerIdentity(consumerinfo['idCert']['key'],
consumerinfo['idCert']['cert'])
consumer.write()
consumer_info = {"consumer_name": consumer.getConsumerName(),
"uuid": consumer.getConsumerId()}
log.info("Consumer created: %s" % consumer_info)
system_log("Registered system with identity: %s" % consumer.getConsumerId())
return consumer_info
class CertificateFetchError(Exception):
def __init__(self, errors):
self.errors = errors
def __str__(self, reason=""):
msg = 'Entitlement Certificate(s) update failed due to the following reasons:\n' + \
'\n'.join(self.errors)
return msg
def fetch_certificates(certlib):
# Force fetch all certs
result = certlib.update()
exceptions = result.exceptions()
if exceptions:
raise CertificateFetchError(exceptions)
return True
class PoolFilter(object):
"""
Helper to filter a list of pools.
"""
# Although sorter isn't necessarily required, when present it allows
# us to not filter out yellow packages when "has no overlap" is selected
def __init__(self, product_dir, entitlement_dir, sorter=None):
self.product_directory = product_dir
self.entitlement_directory = entitlement_dir
self.sorter = sorter
def filter_product_ids(self, pools, product_ids):
"""
Filter a list of pools and return just those that provide products
in the requested list of product ids. Both the top level product
and all provided products will be checked.
"""
matched_pools = []
for pool in pools:
if pool['productId'] in product_ids:
log.debug("pool matches: %s" % pool['productId'])
matched_pools.append(pool)
continue
for provided in pool['providedProducts']:
if provided['productId'] in product_ids:
log.debug("pool provides: %s" % provided['productId'])
matched_pools.append(pool)
break
return matched_pools
def filter_out_uninstalled(self, pools):
"""
Filter the given list of pools, return only those which provide
a product installed on this system.
"""
installed_products = self.product_directory.list()
matched_data_dict = {}
for d in pools:
for product in installed_products:
productid = product.products[0].id
# we only need one matched item per pool id, so add to dict to keep unique:
# Build a list of provided product IDs for comparison:
provided_ids = [p['productId'] for p in d['providedProducts']]
if str(productid) in provided_ids or \
str(productid) == d['productId']:
matched_data_dict[d['id']] = d
return matched_data_dict.values()
def filter_out_installed(self, pools):
"""
Filter the given list of pools, return only those which do not provide
a product installed on this system.
"""
installed_products = self.product_directory.list()
matched_data_dict = {}
for d in pools:
matched_data_dict[d['id']] = d
provided_ids = [p['productId'] for p in d['providedProducts']]
for product in installed_products:
productid = product.products[0].id
# we only need one matched item per pool id, so add to dict to keep unique:
if str(productid) in provided_ids or \
str(productid) == d['productId']:
del matched_data_dict[d['id']]
break
return matched_data_dict.values()
def filter_product_name(self, pools, contains_text):
"""
Filter the given list of pools, removing those whose product name
does not contain the given text.
"""
lowered = contains_text.lower()
filtered_pools = []
for pool in pools:
if lowered in pool['productName'].lower():
filtered_pools.append(pool)
else:
for provided in pool['providedProducts']:
if lowered in provided['productName'].lower():
filtered_pools.append(pool)
break
return filtered_pools
def _get_entitled_product_ids(self):
entitled_products = []
for cert in self.entitlement_directory.list():
for product in cert.products:
entitled_products.append(product.id)
return entitled_products
def _get_entitled_product_to_cert_map(self):
entitled_products_to_certs = {}
for cert in self.entitlement_directory.list():
for product in cert.products:
prod_id = product.id
if prod_id not in entitled_products_to_certs:
entitled_products_to_certs[prod_id] = set()
entitled_products_to_certs[prod_id].add(cert)
return entitled_products_to_certs
def _dates_overlap(self, pool, certs):
pool_start = isodate.parse_date(pool['startDate'])
pool_end = isodate.parse_date(pool['endDate'])
for cert in certs:
cert_range = cert.valid_range
if cert_range.has_date(pool_start) or cert_range.has_date(pool_end):
return True
return False
def filter_out_overlapping(self, pools):
entitled_product_ids_to_certs = self._get_entitled_product_to_cert_map()
filtered_pools = []
for pool in pools:
provided_ids = set([p['productId'] for p in pool['providedProducts']])
wrapped_pool = PoolWrapper(pool)
# NOTE: We may have to check for other types or handle the case of a product with no type in the future
if wrapped_pool.get_product_attributes('type')['type'] == 'SVC':
provided_ids.add(pool['productId'])
overlap = 0
possible_overlap_pids = provided_ids.intersection(entitled_product_ids_to_certs.keys())
for productid in possible_overlap_pids:
if self._dates_overlap(pool, entitled_product_ids_to_certs[productid]) \
and productid not in self.sorter.partially_valid_products:
overlap += 1
else:
break
if overlap != len(provided_ids) or wrapped_pool.get_stacking_id() in self.sorter.partial_stacks:
filtered_pools.append(pool)
return filtered_pools
def filter_out_non_overlapping(self, pools):
not_overlapping = self.filter_out_overlapping(pools)
return [pool for pool in pools if pool not in not_overlapping]
def filter_subscribed_pools(self, pools, subscribed_pool_ids,
compatible_pools):
"""
Filter the given list of pools, removing those for which the system
already has a subscription, unless the pool can be subscribed to again
(ie has multi-entitle).
"""
resubscribeable_pool_ids = [pool['id'] for pool in
compatible_pools.values()]
filtered_pools = []
for pool in pools:
if (pool['id'] not in subscribed_pool_ids) or \
(pool['id'] in resubscribeable_pool_ids):
filtered_pools.append(pool)
return filtered_pools
def list_pools(uep, consumer_uuid, facts, list_all=False, active_on=None, filter_string=None):
"""
Wrapper around the UEP call to fetch pools, which forces a facts update
if anything has changed before making the request. This ensures the
rule checks server side will have the most up to date info about the
consumer possible.
"""
facts.update_check(uep, consumer_uuid)
profile_mgr = cache.ProfileManager()
profile_mgr.update_check(uep, consumer_uuid)
owner = uep.getOwner(consumer_uuid)
ownerid = owner['key']
return uep.getPoolsList(consumer=consumer_uuid, listAll=list_all,
active_on=active_on, owner=ownerid, filter_string=filter_string)
# TODO: This method is morphing the actual pool json and returning a new
# dict which does not contain all the pool info. Not sure if this is really
# necessary. Also some "view" specific things going on in here.
def get_available_entitlements(facts, get_all=False, active_on=None,
overlapping=False, uninstalled=False, text=None, filter_string=None):
"""
Returns a list of entitlement pools from the server.
Facts will be updated if appropriate before making the request, to ensure
the rules on the server will pass if appropriate.
The 'all' setting can be used to return all pools, even if the rules do
not pass. (i.e. show pools that are incompatible for your hardware)
"""
columns = [
'id',
'quantity',
'consumed',
'endDate',
'productName',
'providedProducts',
'productId',
'attributes',
'pool_type',
'service_level',
'service_type',
'suggested',
'contractNumber',
'management_enabled'
]
pool_stash = PoolStash(Facts(require(ENT_DIR), require(PROD_DIR)))
dlist = pool_stash.get_filtered_pools_list(active_on, not get_all,
overlapping, uninstalled, text, filter_string)
for pool in dlist:
pool_wrapper = PoolWrapper(pool)
pool['providedProducts'] = pool_wrapper.get_provided_products()
if allows_multi_entitlement(pool):
pool['multi-entitlement'] = "Yes"
else:
pool['multi-entitlement'] = "No"
support_attrs = pool_wrapper.get_product_attributes("support_level",
"support_type")
pool['service_level'] = support_attrs['support_level']
pool['service_type'] = support_attrs['support_type']
pool['suggested'] = pool_wrapper.get_suggested_quantity()
pool['pool_type'] = pool_wrapper.get_pool_type()
pool['management_enabled'] = pool_wrapper.management_enabled()
if pool['suggested'] is None:
pool['suggested'] = ""
# no default, so default is None if key not found
data = [_sub_dict(pool, columns) for pool in dlist]
for d in data:
if int(d['quantity']) < 0:
d['quantity'] = _('Unlimited')
else:
d['quantity'] = str(int(d['quantity']) - int(d['consumed']))
d['endDate'] = format_date(isodate.parse_date(d['endDate']))
del d['consumed']
return data
class MergedPools(object):
"""
Class to track the view of merged pools for the same product.
Used to view total entitlement information across all pools for a
particular product.
"""
def __init__(self, product_id, product_name):
self.product_id = product_id
self.product_name = product_name
self.bundled_products = 0
self.quantity = 0 # how many entitlements were purchased
self.consumed = 0 # how many are in use
self.pools = []
def add_pool(self, pool):
# TODO: check if product id and name match?
self.consumed += pool['consumed']
# we want to add the quantity for this pool
# the total. if the pool is unlimited, the
# resulting quantity will be set to -1 and
# subsequent added pools will not change that.
if pool['quantity'] == -1:
self.quantity = -1
elif self.quantity != -1:
self.quantity += pool['quantity']
self.pools.append(pool)
# This is a little tricky, technically speaking, subscriptions
# decide what products they provide, so it *could* vary in some
# edge cases from one sub to another even though they are for the
# same product. For now we'll just set this value each time a pool
# is added and hope they are consistent.
self.bundled_products = len(pool['providedProducts'])
def _virt_physical_sorter(self, pool):
"""
Used to sort the pools, return Physical or Virt depending on
the value or existence of the virt_only attribute.
Returning numeric values to simulate the behavior we want.
"""
for attr in pool['attributes']:
if attr['name'] == 'virt_only' and attr['value'] == 'true':
return 1
return 2
def sort_virt_to_top(self):
"""
Prioritizes virt pools to the front of the list, if any are present.
Used by contract selector to show these first in the list.
"""
self.pools.sort(key=self._virt_physical_sorter)
def merge_pools(pools):
"""
Merges the given pools into a data structure representing the totals
for a particular product, across all pools for that product.
This provides an overview for a given product, how many total entitlements
you have available and in use across all subscriptions for that product.
Returns a dict mapping product ID to MergedPools object.
"""
# Map product ID to MergedPools object:
merged_pools = {}
for pool in pools:
if not pool['productId'] in merged_pools:
merged_pools[pool['productId']] = MergedPools(pool['productId'],
pool['productName'])
merged_pools[pool['productId']].add_pool(pool)
# Just return a list of the MergedPools objects, without the product ID
# key hashing:
return merged_pools
class MergedPoolsStackingGroupSorter(StackingGroupSorter):
"""
Sorts a list of MergedPool objects by stacking_id.
"""
def __init__(self, merged_pools):
StackingGroupSorter.__init__(self, merged_pools)
def _get_stacking_id(self, merged_pool):
return PoolWrapper(merged_pool.pools[0]).get_stacking_id()
def _get_identity_name(self, merged_pool):
return merged_pool.pools[0]['productName']
class PoolStash(object):
"""
Object used to fetch pools from the server, sort them into compatible,
incompatible, and installed lists. Also does filtering based on name.
"""
def __init__(self, facts):
self.identity = require(IDENTITY)
self.facts = facts
self.sorter = None
# Pools which passed rules server side for this consumer:
self.compatible_pools = {}
# Pools which failed a rule check server side:
self.incompatible_pools = {}
# Pools for which we already have an entitlement:
self.subscribed_pool_ids = []
# All pools:
self.all_pools = {}
def all_pools_size(self):
return len(self.all_pools)
def refresh(self, active_on):
"""
Refresh the list of pools from the server, active on the given date.
"""
if active_on:
self.sorter = ComplianceManager(active_on)
else:
self.sorter = require(CERT_SORTER)
self.all_pools = {}
self.compatible_pools = {}
log.debug("Refreshing pools from server...")
for pool in list_pools(require(CP_PROVIDER).get_consumer_auth_cp(),
self.identity.uuid, self.facts, active_on=active_on):
self.compatible_pools[pool['id']] = pool
self.all_pools[pool['id']] = pool
# Filter the list of all pools, removing those we know are compatible.
# Sadly this currently requires a second query to the server.
self.incompatible_pools = {}
for pool in list_pools(require(CP_PROVIDER).get_consumer_auth_cp(),
self.identity.uuid, self.facts, list_all=True, active_on=active_on):
if not pool['id'] in self.compatible_pools:
self.incompatible_pools[pool['id']] = pool
self.all_pools[pool['id']] = pool
self.subscribed_pool_ids = self._get_subscribed_pool_ids()
# In the gui, cache all pool types so when we attach new ones
# we can avoid more api calls
require(POOLTYPE_CACHE).update_from_pools(self.all_pools)
log.debug("found %s pools:" % len(self.all_pools))
log.debug(" %s compatible" % len(self.compatible_pools))
log.debug(" %s incompatible" % len(self.incompatible_pools))
log.debug(" %s already subscribed" % len(self.subscribed_pool_ids))
def get_filtered_pools_list(self, active_on, incompatible,
overlapping, uninstalled, text, filter_string):
"""
Used for CLI --available filtering
cuts down on api calls
"""
self.all_pools = {}
self.compatible_pools = {}
if active_on and overlapping:
self.sorter = ComplianceManager(active_on)
elif not active_on and overlapping:
self.sorter = require(CERT_SORTER)
if incompatible:
for pool in list_pools(require(CP_PROVIDER).get_consumer_auth_cp(),
self.identity.uuid, self.facts, active_on=active_on, filter_string=filter_string):
self.compatible_pools[pool['id']] = pool
else: # --all has been used
for pool in list_pools(require(CP_PROVIDER).get_consumer_auth_cp(),
self.identity.uuid, self.facts, list_all=True, active_on=active_on, filter_string=filter_string):
self.all_pools[pool['id']] = pool
return self._filter_pools(incompatible, overlapping, uninstalled, False, text)
def _get_subscribed_pool_ids(self):
return [ent.pool.id for ent in require(ENT_DIR).list()]
def _filter_pools(self, incompatible, overlapping, uninstalled, subscribed,
text):
"""
Return a list of pool hashes, filtered according to the given options.
This method does not actually hit the server, filtering is done in
memory.
"""
log.debug("Filtering %d total pools" % len(self.all_pools))
if not incompatible:
pools = self.all_pools.values()
else:
pools = self.compatible_pools.values()
log.debug("\tRemoved %d incompatible pools" %
len(self.incompatible_pools))
pool_filter = PoolFilter(require(PROD_DIR),
require(ENT_DIR), self.sorter)
# Filter out products that are not installed if necessary:
if uninstalled:
prev_length = len(pools)
pools = pool_filter.filter_out_uninstalled(pools)
log.debug("\tRemoved %d pools for not installed products" %
(prev_length - len(pools)))
if overlapping:
prev_length = len(pools)
pools = pool_filter.filter_out_overlapping(pools)
log.debug("\tRemoved %d pools overlapping existing entitlements" %
(prev_length - len(pools)))
# Filter by product name if necessary:
if text:
prev_length = len(pools)
pools = pool_filter.filter_product_name(pools, text)
log.debug("\tRemoved %d pools not matching the search string" %
(prev_length - len(pools)))
if subscribed:
prev_length = len(pools)
pools = pool_filter.filter_subscribed_pools(pools,
self.subscribed_pool_ids, self.compatible_pools)
log.debug("\tRemoved %d pools that we're already subscribed to" %
(prev_length - len(pools)))
log.debug("\t%d pools to display, %d filtered out" % (len(pools),
len(self.all_pools) - len(pools)))
return pools
def merge_pools(self, incompatible=False, overlapping=False,
uninstalled=False, subscribed=False, text=None):
"""
Return a merged view of pools filtered according to the given options.
Pools for the same product will be merged into a MergedPool object.
Arguments turn on filters, so setting one to True will reduce the
number of results.
"""
pools = self._filter_pools(incompatible, overlapping, uninstalled,
subscribed, text)
merged_pools = merge_pools(pools)
return merged_pools
def lookup_provided_products(self, pool_id):
"""
Return a list of tuples (product name, product id) for all products
provided for a given pool. If we do not actually have any info on this
pool, return None.
"""
pool = self.all_pools.get(pool_id)
if pool is None:
log.debug("pool id %s not found in all_pools", pool_id)
return None
provided_products = []
for product in pool['providedProducts']:
provided_products.append((product['productName'], product['productId']))
return provided_products
class ImportFileExtractor(object):
"""
Responsible for checking an import file and pulling cert and key from it.
An import file may include only the certificate, but may also include its
key.
An import file is processed looking for:
-----BEGIN <TAG>-----
<CONTENT>
..
-----END <TAG>-----
and will only process if it finds CERTIFICATE or KEY in the <TAG> text.
For example the following would locate a key and cert.
-----BEGIN CERTIFICATE-----
<CERT_CONTENT>
-----END CERTIFICATE-----
-----BEGIN PUBLIC KEY-----
<KEY_CONTENT>
-----END PUBLIC KEY-----
"""
_REGEX_START_GROUP = "start"
_REGEX_CONTENT_GROUP = "content"
_REGEX_END_GROUP = "end"
_REGEX = "(?P<%s>[-]*BEGIN[\w\ ]*[-]*)(?P<%s>[^-]*)(?P<%s>[-]*END[\w\ ]*[-]*)" % \
(_REGEX_START_GROUP, _REGEX_CONTENT_GROUP, _REGEX_END_GROUP)
_PATTERN = re.compile(_REGEX)
_CERT_DICT_TAG = "CERTIFICATE"
_KEY_DICT_TAG = "KEY"
_ENT_DICT_TAG = "ENTITLEMENT"
_SIG_DICT_TAG = "RSA SIGNATURE"
def __init__(self, cert_file_path):
self.path = cert_file_path
self.file_name = os.path.basename(cert_file_path)
content = self._read(cert_file_path)
self.parts = self._process_content(content)
def _read(self, file_path):
fd = open(file_path, "r")
file_content = fd.read()
fd.close()
return file_content
def _process_content(self, content):
part_dict = {}
matches = self._PATTERN.finditer(content)
for match in matches:
start = match.group(self._REGEX_START_GROUP)
meat = match.group(self._REGEX_CONTENT_GROUP)
end = match.group(self._REGEX_END_GROUP)
dict_key = None
if not start.find(self._KEY_DICT_TAG) < 0:
dict_key = self._KEY_DICT_TAG
elif not start.find(self._CERT_DICT_TAG) < 0:
dict_key = self._CERT_DICT_TAG
elif not start.find(self._ENT_DICT_TAG) < 0:
dict_key = self._ENT_DICT_TAG
elif not start.find(self._SIG_DICT_TAG) < 0:
dict_key = self._SIG_DICT_TAG
if dict_key is None:
continue
part_dict[dict_key] = start + meat + end
return part_dict
def contains_key_content(self):
return self._KEY_DICT_TAG in self.parts
def get_key_content(self):
key_content = None
if self._KEY_DICT_TAG in self.parts:
key_content = self.parts[self._KEY_DICT_TAG]
return key_content
def get_cert_content(self):
cert_content = None
if self._CERT_DICT_TAG in self.parts:
cert_content = self.parts[self._CERT_DICT_TAG]
if self._ENT_DICT_TAG in self.parts:
cert_content = cert_content + os.linesep + self.parts[self._ENT_DICT_TAG]
if self._SIG_DICT_TAG in self.parts:
cert_content = cert_content + os.linesep + self.parts[self._SIG_DICT_TAG]
return cert_content
def verify_valid_entitlement(self):
"""
Verify that a valid entitlement was processed.
@return: True if valid, False otherwise.
"""
try:
cert = self.get_cert()
# Don't want to check class explicitly, instead we'll look for
# order info, which only an entitlement cert could have:
if not hasattr(cert, 'order'):
return False
except CertificateException:
return False
ent_key = Key(self.get_key_content())
if ent_key.bogus():
return False
return True
# TODO: rewrite to use certlib.EntitlementCertBundleInstall?
def write_to_disk(self):
"""
Write/copy cert to the entitlement cert dir.
"""
self._ensure_entitlement_dir_exists()
dest_file_path = os.path.join(ENT_CONFIG_DIR,
self._create_filename_from_cert_serial_number())
# Write the key/cert content to new files
log.debug("Writing certificate file: %s" % (dest_file_path))
cert_content = self.get_cert_content()
self._write_file(dest_file_path, cert_content)
if self.contains_key_content():
dest_key_file_path = self._get_key_path_from_dest_cert_path(dest_file_path)
log.debug("Writing key file: %s" % (dest_key_file_path))
self._write_file(dest_key_file_path, self.get_key_content())
def _write_file(self, target_path, content):
new_file = open(target_path, "w")
try:
new_file.write(content)
finally:
new_file.close()
def _ensure_entitlement_dir_exists(self):
if not os.access(ENT_CONFIG_DIR, os.R_OK):
os.mkdir(ENT_CONFIG_DIR)
def _get_key_path_from_dest_cert_path(self, dest_cert_path):
file_parts = os.path.splitext(dest_cert_path)
return file_parts[0] + "-key" + file_parts[1]
def _create_filename_from_cert_serial_number(self):
"create from serial"
ent_cert = self.get_cert()
return "%s.pem" % (ent_cert.serial)
def get_cert(self):
cert_content = self.get_cert_content()
ent_cert = create_from_pem(cert_content)
return ent_cert
def _sub_dict(datadict, subkeys, default=None):
"""Return a dict that is a subset of datadict matching only the keys in subkeys"""
return dict([(k, datadict.get(k, default)) for k in subkeys])
def format_date(dt):
if dt:
try:
return dt.astimezone(tzlocal()).strftime("%x")
except ValueError:
log.warn("Datetime does not contain timezone information")
return dt.strftime("%x")
else:
return ""
def unregister(uep, consumer_uuid):
"""
Shared logic for un-registration.
"""
uep.unregisterConsumer(consumer_uuid)
log.info("Successfully un-registered.")
system_log("Unregistered machine with identity: %s" % consumer_uuid)
clean_all_data(backup=False)
# FIXME: move me to identity.py
def check_identity_cert_perms():
"""
Ensure the identity certs on this system have the correct permissions, and
fix them if not.
"""
certs = [identity.ConsumerIdentity.keypath(), identity.ConsumerIdentity.certpath()]
for cert in certs:
if not os.path.exists(cert):
# Only relevant if these files exist.
continue
statinfo = os.stat(cert)
if statinfo[stat.ST_UID] != 0 or statinfo[stat.ST_GID] != 0:
os.chown(cert, 0, 0)
log.warn("Corrected incorrect ownership of %s." % cert)
mode = stat.S_IMODE(statinfo[stat.ST_MODE])
if mode != ID_CERT_PERMS:
os.chmod(cert, ID_CERT_PERMS)
log.warn("Corrected incorrect permissions on %s." % cert)
def clean_all_data(backup=True):
consumer_dir = cfg.get('rhsm', 'consumerCertDir')
if backup:
if consumer_dir[-1] == "/":
consumer_dir_backup = consumer_dir[0:-1] + ".old"
else:
consumer_dir_backup = consumer_dir + ".old"
# Delete backup dir if it exists:
shutil.rmtree(consumer_dir_backup, ignore_errors=True)
# Copy current consumer dir:
log.debug("Backing up %s to %s.", consumer_dir, consumer_dir_backup)
shutil.copytree(consumer_dir, consumer_dir_backup)
# FIXME FIXME
# Delete current consumer certs:
for path in [ConsumerIdentity.keypath(), ConsumerIdentity.certpath()]:
if (os.path.exists(path)):
log.debug("Removing identity cert: %s" % path)
os.remove(path)
require(IDENTITY).reload()
# Delete all entitlement certs rather than the directory itself:
ent_cert_dir = cfg.get('rhsm', 'entitlementCertDir')
if os.path.exists(ent_cert_dir):
for f in glob.glob("%s/*.pem" % ent_cert_dir):
certpath = os.path.join(ent_cert_dir, f)
log.debug("Removing entitlement cert: %s" % f)
os.remove(certpath)
else:
log.warn("Entitlement cert directory does not exist: %s" % ent_cert_dir)
cache.ProfileManager.delete_cache()
cache.InstalledProductsManager.delete_cache()
Facts.delete_cache()
# Must also delete in-memory cache
require(ENTITLEMENT_STATUS_CACHE).delete_cache()
require(PROD_STATUS_CACHE).delete_cache()
require(OVERRIDE_STATUS_CACHE).delete_cache()
RepoActionInvoker.delete_repo_file()
log.info("Cleaned local data")
def valid_quantity(quantity):
if not quantity:
return False
try:
return int(quantity) > 0
except ValueError:
return False
def allows_multi_entitlement(pool):
"""
Determine if this pool allows multi-entitlement based on the pool's
top-level product's multi-entitlement attribute.
"""
for attribute in pool['productAttributes']:
if attribute['name'] == "multi-entitlement" and \
utils.is_true_value(attribute['value']):
return True
return False
|
gpl-2.0
|
Belxjander/Kirito
|
Python-3.5.0-main/Lib/test/test_frame.py
|
12
|
4476
|
import gc
import sys
import types
import unittest
import weakref
from test import support
class ClearTest(unittest.TestCase):
"""
Tests for frame.clear().
"""
def inner(self, x=5, **kwargs):
1/0
def outer(self, **kwargs):
try:
self.inner(**kwargs)
except ZeroDivisionError as e:
exc = e
return exc
def clear_traceback_frames(self, tb):
"""
Clear all frames in a traceback.
"""
while tb is not None:
tb.tb_frame.clear()
tb = tb.tb_next
def test_clear_locals(self):
class C:
pass
c = C()
wr = weakref.ref(c)
exc = self.outer(c=c)
del c
support.gc_collect()
# A reference to c is held through the frames
self.assertIsNot(None, wr())
self.clear_traceback_frames(exc.__traceback__)
support.gc_collect()
# The reference was released by .clear()
self.assertIs(None, wr())
def test_clear_generator(self):
endly = False
def g():
nonlocal endly
try:
yield
inner()
finally:
endly = True
gen = g()
next(gen)
self.assertFalse(endly)
# Clearing the frame closes the generator
gen.gi_frame.clear()
self.assertTrue(endly)
def test_clear_executing(self):
# Attempting to clear an executing frame is forbidden.
try:
1/0
except ZeroDivisionError as e:
f = e.__traceback__.tb_frame
with self.assertRaises(RuntimeError):
f.clear()
with self.assertRaises(RuntimeError):
f.f_back.clear()
def test_clear_executing_generator(self):
# Attempting to clear an executing generator frame is forbidden.
endly = False
def g():
nonlocal endly
try:
1/0
except ZeroDivisionError as e:
f = e.__traceback__.tb_frame
with self.assertRaises(RuntimeError):
f.clear()
with self.assertRaises(RuntimeError):
f.f_back.clear()
yield f
finally:
endly = True
gen = g()
f = next(gen)
self.assertFalse(endly)
# Clearing the frame closes the generator
f.clear()
self.assertTrue(endly)
@support.cpython_only
def test_clear_refcycles(self):
# .clear() doesn't leave any refcycle behind
with support.disable_gc():
class C:
pass
c = C()
wr = weakref.ref(c)
exc = self.outer(c=c)
del c
self.assertIsNot(None, wr())
self.clear_traceback_frames(exc.__traceback__)
self.assertIs(None, wr())
class FrameLocalsTest(unittest.TestCase):
"""
Tests for the .f_locals attribute.
"""
def make_frames(self):
def outer():
x = 5
y = 6
def inner():
z = x + 2
1/0
t = 9
return inner()
try:
outer()
except ZeroDivisionError as e:
tb = e.__traceback__
frames = []
while tb:
frames.append(tb.tb_frame)
tb = tb.tb_next
return frames
def test_locals(self):
f, outer, inner = self.make_frames()
outer_locals = outer.f_locals
self.assertIsInstance(outer_locals.pop('inner'), types.FunctionType)
self.assertEqual(outer_locals, {'x': 5, 'y': 6})
inner_locals = inner.f_locals
self.assertEqual(inner_locals, {'x': 5, 'z': 7})
def test_clear_locals(self):
# Test f_locals after clear() (issue #21897)
f, outer, inner = self.make_frames()
outer.clear()
inner.clear()
self.assertEqual(outer.f_locals, {})
self.assertEqual(inner.f_locals, {})
def test_locals_clear_locals(self):
# Test f_locals before and after clear() (to exercise caching)
f, outer, inner = self.make_frames()
outer.f_locals
inner.f_locals
outer.clear()
inner.clear()
self.assertEqual(outer.f_locals, {})
self.assertEqual(inner.f_locals, {})
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
|
tensor-tang/Paddle
|
python/paddle/distributed/launch_ps.py
|
2
|
5430
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import unicode_literals
import subprocess
import sys
import os
import copy
from argparse import ArgumentParser, REMAINDER
def parse_args():
# Optional arguments for the launch helper
parser = ArgumentParser(description="Distributed training")
parser.add_argument(
"--cluster_node_ips",
type=str,
default="127.0.0.1",
help="Paddle cluster nodes ips, such as 192.168.0.16,192.168.0.17..")
parser.add_argument(
"--node_ip",
type=str,
default="127.0.0.1",
help="The current node ip. ")
parser.add_argument(
"--start_port",
type=int,
default=6170,
help="The trainer's start port on a single node")
parser.add_argument(
"--print_config",
type=bool,
default=True,
help="Print the config or not")
parser.add_argument(
"--endpoints", type=str, default="", help="User defined endpoints")
parser.add_argument(
"--worker_num", type=int, default=2, help="number of workers")
parser.add_argument(
"--server_num", type=int, default=2, help="number of servers")
parser.add_argument(
"--log_dir",
default="logs",
type=str,
help="The path for each process's log.If it's not setted, the log will printed to default pipe."
)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def start_procs(args):
worker_num = args.worker_num
server_num = args.server_num
start_port = args.start_port
default_env = os.environ.copy()
current_env = copy.copy(default_env)
current_env.pop("http_proxy", None)
current_env.pop("https_proxy", None)
procs = []
cmds = []
log_fns = []
ports = range(start_port, start_port + server_num, 1)
default_endpoints = ",".join(["127.0.0.1:" + str(x) for x in ports])
user_endpoints = ""
if args.endpoints == "":
user_endpoints = default_endpoints
else:
user_endpoints = args.endpoints
user_endpoints_ips = [x.split(":")[0] for x in user_endpoints.split(",")]
user_endpoints_port = [x.split(":")[1] for x in user_endpoints.split(",")]
for i in range(server_num):
current_env.update({
"PADDLE_TRAINERS_NUM": str(server_num),
"PADDLE_PORT": ",".join(user_endpoints_port),
#"POD_IP": user_endpoints_ips[i],
"CURRENT_ENDPOINT":
user_endpoints_ips[i] + ":" + user_endpoints_port[i],
"PADDLE_PSERVERS": ",".join(user_endpoints_ips),
"PADDLE_TRAINING_ROLE": "PSERVER"
})
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
cmds.append(cmd)
print(cmd)
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/serverlog.%d" % (args.log_dir, i), "w")
log_fns.append(fn)
proc = subprocess.Popen(cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
procs.append(proc)
for i in range(worker_num):
current_env.update({
"PADDLE_PSERVERS": ",".join(user_endpoints_ips),
"PADDLE_PORT": ",".join(user_endpoints_port),
"PADDLE_TRAINERS_NUM": str(worker_num),
"PADDLE_TRAINING_ROLE": "TRAINER",
"PADDLE_TRAINER_ID": str(i)
})
cmd = [sys.executable, "-u", args.training_script
] + args.training_script_args
print(cmd)
cmds.append(cmd)
if args.log_dir is not None:
os.system("mkdir -p {}".format(args.log_dir))
fn = open("%s/workerlog.%d" % (args.log_dir, i), "w")
log_fns.append(fn)
proc = subprocess.Popen(cmd, env=current_env, stdout=fn, stderr=fn)
else:
proc = subprocess.Popen(cmd, env=current_env)
procs.append(proc)
for i in range(0, len(procs)):
proc = procs[i]
proc.wait()
if len(log_fns) > 0:
log_fns[i].close()
if proc.returncode != 0:
raise subprocess.CalledProcessError(
returncode=procs[i].returncode, cmd=cmds[i])
def launch():
args = parse_args()
if args.print_config:
start_procs(args)
# server num, worker num
if __name__ == "__main__":
launch()
|
apache-2.0
|
sbidoul/buildbot
|
master/buildbot/process/debug.py
|
11
|
2024
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from buildbot.util import service
class DebugServices(service.ReconfigurableServiceMixin, service.AsyncMultiService):
name = 'debug_services'
def __init__(self):
service.AsyncMultiService.__init__(self)
self.debug_port = None
self.debug_password = None
self.debug_registration = None
self.manhole = None
@defer.inlineCallbacks
def reconfigServiceWithBuildbotConfig(self, new_config):
if new_config.manhole != self.manhole:
if self.manhole:
yield self.manhole.disownServiceParent()
self.manhole = None
if new_config.manhole:
self.manhole = new_config.manhole
yield self.manhole.setServiceParent(self)
# chain up
yield service.ReconfigurableServiceMixin.reconfigServiceWithBuildbotConfig(self,
new_config)
@defer.inlineCallbacks
def stopService(self):
# manhole will get stopped as a sub-service
yield service.AsyncMultiService.stopService(self)
# clean up
if self.manhole:
self.manhole = None
|
gpl-2.0
|
int19h/PTVS
|
Python/Tests/TestData/VirtualEnv/env/Lib/encodings/ascii.py
|
858
|
1248
|
""" Python 'ascii' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.ascii_encode
decode = codecs.ascii_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.ascii_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.ascii_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.ascii_decode
decode = codecs.ascii_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ascii',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
|
apache-2.0
|
landism/pants
|
tests/python/pants_test/backend/jvm/tasks/test_detect_duplicates.py
|
8
|
10101
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.detect_duplicates import DuplicateDetector
from pants.base.exceptions import TaskError
from pants.java.jar.jar_dependency import JarDependency
from pants.java.jar.jar_dependency_utils import M2Coordinate, ResolvedJar
from pants.util.contextutil import open_zip
from pants.util.dirutil import safe_mkdir, safe_mkdir_for, touch
from pants_test.jvm.jvm_task_test_base import JvmTaskTestBase
class DuplicateDetectorTest(JvmTaskTestBase):
@classmethod
def task_type(cls):
return DuplicateDetector
def setUp(self):
super(DuplicateDetectorTest, self).setUp()
self.classes_dir = os.path.join(self.test_workdir, 'classes')
safe_mkdir(self.classes_dir)
def generate_class(name):
path = os.path.join(self.classes_dir, name)
touch(path)
return path
test_class_path = generate_class('com/twitter/Test.class')
duplicate_class_path = generate_class('com/twitter/commons/Duplicate.class')
unique_class_path = generate_class('org/apache/Unique.class')
unicode_class_path = generate_class('cucumber/api/java/zh_cn/假如.class')
def generate_jar(path, *class_name):
jar_path = os.path.join(self.test_workdir, 'jars', path)
safe_mkdir_for(jar_path)
with open_zip(jar_path, 'w') as zipfile:
for clazz in class_name:
zipfile.write(clazz, os.path.relpath(clazz, self.classes_dir))
return jar_path
self.test_jar = generate_jar('test.jar', test_class_path, duplicate_class_path)
self.dups_jar = generate_jar('dups.jar', duplicate_class_path, unique_class_path)
self.no_dups_jar = generate_jar('no_dups.jar', unique_class_path)
self.unicode_jar = generate_jar('unicode_class.jar', unicode_class_path)
def resolved_jarlib(name, jar_path):
resolved_jar = ResolvedJar(M2Coordinate(org='org.example', name=name, rev='0.0.1'),
cache_path=jar_path,
pants_path=jar_path)
jar_dep = JarDependency(org='org.example', name=name, rev='0.0.1')
jar_library = self.make_target(spec='3rdparty:{}'.format(name),
target_type=JarLibrary,
jars=[jar_dep])
return jar_library, resolved_jar
self.test_jarlib, self.test_resolved_jar = resolved_jarlib('test', self.test_jar)
self.dups_jarlib, self.dups_resolved_jar = resolved_jarlib('dups', self.dups_jar)
self.no_dups_jarlib, self.no_dups_resolved_jar = resolved_jarlib('no_dups', self.no_dups_jar)
self.unicode_jarlib, self.unicode_resolved_jar = resolved_jarlib('unicode', self.unicode_jar)
def _setup_external_duplicate(self):
jvm_binary = self.make_target(spec='src/java/com/twitter:thing',
target_type=JvmBinary,
dependencies=[self.test_jarlib, self.dups_jarlib])
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_jars_for_targets([self.test_jarlib], 'default', [self.test_resolved_jar])
classpath.add_jars_for_targets([self.dups_jarlib], 'default', [self.dups_resolved_jar])
return task, jvm_binary
def test_duplicate_found_external(self):
self.set_options(fail_fast=False)
task, jvm_binary = self._setup_external_duplicate()
conflicts_by_binary = task.execute()
expected = {
jvm_binary: {
('org.example-dups-0.0.1.jar', 'org.example-test-0.0.1.jar'):
{'com/twitter/commons/Duplicate.class'}
}
}
self.assertEqual(expected, conflicts_by_binary)
def test_duplicate_skip(self):
self.set_options(fail_fast=False, skip=True)
task, _ = self._setup_external_duplicate()
conflicts_by_binary = task.execute()
self.assertEqual(None, conflicts_by_binary)
def test_duplicate_excluded_file(self):
self.set_options(fail_fast=False, excludes=[], exclude_files=['Duplicate.class'])
task, jvm_binary = self._setup_external_duplicate()
conflicts_by_binary = task.execute()
self.assertEqual({}, conflicts_by_binary)
def _setup_internal_duplicate(self):
java_library = self.make_target(spec='src/java/com/twitter:lib',
target_type=JavaLibrary,
sources=[])
jvm_binary = self.make_target(spec='src/java/com/twitter:thing',
target_type=JvmBinary,
dependencies=[java_library])
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_for_target(java_library, [('default', self.classes_dir)])
classpath.add_for_target(jvm_binary, [('default', self.classes_dir)])
return task, jvm_binary
def test_duplicate_found_internal(self):
self.set_options(fail_fast=False)
task, jvm_binary = self._setup_internal_duplicate()
conflicts_by_binary = task.execute()
expected = {
jvm_binary: {
('src/java/com/twitter:lib', 'src/java/com/twitter:thing'):
{'com/twitter/Test.class',
'com/twitter/commons/Duplicate.class',
'org/apache/Unique.class',
'cucumber/api/java/zh_cn/假如.class'}
}
}
self.assertEqual(expected, conflicts_by_binary)
def test_duplicate_excluded_internal(self):
self.set_options(fail_fast=False, excludes=[], exclude_files=['Duplicate.class', '假如.class'])
task, jvm_binary = self._setup_internal_duplicate()
conflicts_by_binary = task.execute()
expected = {
jvm_binary: {
('src/java/com/twitter:lib', 'src/java/com/twitter:thing'):
{'com/twitter/Test.class',
'org/apache/Unique.class'}
}
}
self.assertEqual(expected, conflicts_by_binary)
def test_duplicate_found_mixed(self):
self.set_options(fail_fast=False)
jvm_binary = self.make_target(spec='src/java/com/twitter:thing',
target_type=JvmBinary,
dependencies=[self.test_jarlib])
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_for_target(jvm_binary, [('default', self.classes_dir)])
classpath.add_jars_for_targets([self.test_jarlib], 'default', [self.test_resolved_jar])
conflicts_by_binary = task.execute()
expected = {
jvm_binary: {
('org.example-test-0.0.1.jar', 'src/java/com/twitter:thing'):
{'com/twitter/Test.class', 'com/twitter/commons/Duplicate.class'}
}
}
self.assertEqual(expected, conflicts_by_binary)
def test_duplicate_not_found(self):
self.set_options(fail_fast=True)
jvm_binary = self.make_target(spec='src/java/com/twitter:thing',
target_type=JvmBinary,
dependencies=[self.no_dups_jarlib,
self.unicode_jarlib])
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_jars_for_targets([self.no_dups_jarlib], 'default', [self.no_dups_resolved_jar])
classpath.add_jars_for_targets([self.unicode_jarlib], 'default', [self.unicode_resolved_jar])
conflicts_by_binary = task.execute()
self.assertEqual({}, conflicts_by_binary)
def test_fail_fast_error_raised(self):
self.set_options(fail_fast=True)
jvm_binary = self.make_target(spec='src/java/com/twitter:thing',
target_type=JvmBinary,
dependencies=[self.test_jarlib])
context = self.context(target_roots=[jvm_binary])
task = self.create_task(context)
classpath = self.get_runtime_classpath(context)
classpath.add_for_target(jvm_binary, [('default', self.classes_dir)])
classpath.add_jars_for_targets([self.test_jarlib], 'default', [self.test_resolved_jar])
with self.assertRaises(TaskError):
task.execute()
def test_is_excluded_default(self):
task = self.create_task(self.context())
self.assertFalse(task._is_excluded('foo'))
self.assertFalse(task._is_excluded('foo/BCKEY.DSA'))
# excluded_files: No directroy
self.assertTrue(task._is_excluded('.DS_Store'))
# excluded_files: Mixed case
self.assertTrue(task._is_excluded('NOTICE.txt'))
# excluded_files: Leading directory
self.assertTrue(task._is_excluded('/foo/bar/dependencies'))
# excluded_dirs:
self.assertTrue(task._is_excluded('META-INF/services/foo'))
# excluded_patterns:
self.assertTrue(task._is_excluded('META-INF/BCKEY.RSA'))
def test_is_excluded_pattern(self):
self.set_options(exclude_patterns=[r'.*/garbage\.'])
task = self.create_task(self.context())
self.assertTrue(task._is_excluded('foo/garbage.txt'))
def test_is_excluded_files(self):
self.set_options(excludes=None, exclude_files=['bckey.dsa'])
task = self.create_task(self.context())
self.assertTrue(task._is_excluded('foo/BCKEY.DSA'))
# Defaults are now overridden
self.assertFalse(task._is_excluded('NOTICE.txt'))
def test_is_excluded_files_again(self):
self.set_options(exclude_dirs=['org/duplicated'])
task = self.create_task(self.context())
self.assertTrue(task._is_excluded('org/duplicated/FOO'))
# Defaults are now overridden
self.assertFalse(task._is_excluded('META-INF/services/foo'))
|
apache-2.0
|
mlperf/inference_results_v0.5
|
open/NVIDIA/code/gnmt/tensorrt/GNMT.py
|
1
|
4828
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys
import shutil
sys.path.insert(0, os.getcwd())
from code.common import logging, dict_get, run_command
g_calibration_cache = "code/gnmt/tensorrt/data/Int8CalibrationCache"
class GNMTBuilder():
def __init__(self, args):
self.args = args
self.name = "gnmt"
self.system_id = args["system_id"]
self.scenario = args["scenario"]
self.engine_dir = "./build/engines/{:}/{:}/{:}".format(self.system_id, self.name, self.scenario)
self.device_type = "gpu"
self.precision = args["precision"]
self.precision_flag = ""
if (self.precision == "int8") or (self.precision == "fp16"):
self.precision_flag = "-t fp16"
if dict_get(args, "enable_int8_generator", default=False):
self.precision_flag += " --int8Generator --calibration_cache {}".format(g_calibration_cache)
def build_engines(self):
if self.scenario == "MultiStream":
raise NotImplementedError("GNMT MultiStream scenario is not yet implemented")
elif self.scenario == "SingleStream":
batch_sizes = [ self.args["batch_size"] ]
elif self.scenario == "Offline":
batch_sizes = [ self.args["batch_size"] ]
elif self.scenario == "Server":
batch_sizes = self.args["batch_sizes"]
else:
raise ValueError("Invalid scenario: {:}".format(self.scenario))
beam_size = dict_get(self.args, "beam_size", default=10)
seq_len_slots = dict_get(self.args, "seq_len_slots", default=1)
mpbs_flag = ""
if "max_persistent_bs" in self.args:
mpbs_flag = "--max_persistent_bs {:}".format(self.args["max_persistent_bs"])
if not os.path.exists(self.engine_dir):
os.makedirs(self.engine_dir)
for batch_size in batch_sizes:
engine_name = "{:}/{:}-{:}-gpu-b{:}-{:}.plan".format(self.engine_dir, self.name, self.scenario, batch_size, self.precision)
logging.info("Building {:}".format(engine_name))
cmd = "build/bin/GNMT/gnmt --seq_len_slots {seq_len_slots} --bm {beam_size} --build_only --bs {batch_size} --store_engine {engine_name} {max_persistent_bs} {precision}".format(
seq_len_slots=seq_len_slots,
beam_size=beam_size,
batch_size=batch_size,
engine_name=engine_name,
max_persistent_bs=mpbs_flag,
precision=self.precision_flag,
)
run_command(cmd)
def calibrate(self):
beam_size = 1
batch_size = 1
num_batches = 64
output_dir_1 = "calib_phase_1"
output_dir_2 = "calib_phase_2"
try:
phase_1 = "./build/bin/GNMT/gnmt --calibration_phase 1 --input_file {input} --num_batches {num_batches} --bm {beam_size} --bs {batch_size} --output_dir {output_dir}".format(
input="build/inference/calibration/translation/calibration_data.tok.bpe.en",
num_batches = num_batches,
beam_size = beam_size,
batch_size = batch_size,
output_dir = output_dir_1
)
run_command(phase_1)
# Get the directory to raw output
calib_data = None
for d in os.listdir(output_dir_1):
if "gnmt_tensors" in d:
calib_data = os.path.join(output_dir_1, d)
assert(calib_data != None)
phase_2 = "./build/bin/GNMT/gnmt --calibration_phase 2 --bm {beam_size} --calibration_data {input} --calibration_cache {calib_cache} --output_dir {output_dir}".format(
input = calib_data,
beam_size = beam_size,
batch_size = batch_size,
calib_cache = g_calibration_cache,
output_dir = output_dir_2
)
run_command(phase_2)
# Remove the generated intermediate output files
finally:
if os.path.exists(output_dir_1):
shutil.rmtree(output_dir_1)
if os.path.exists(output_dir_2):
shutil.rmtree(output_dir_2)
|
apache-2.0
|
nerzhul/ansible
|
lib/ansible/modules/cloud/digital_ocean/digital_ocean_tag.py
|
47
|
7481
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: digital_ocean_tag
short_description: Create and remove tag(s) to DigitalOcean resource.
description:
- Create and remove tag(s) to DigitalOcean resource.
version_added: "2.2"
options:
name:
description:
- The name of the tag. The supported characters for names include
alphanumeric characters, dashes, and underscores.
required: true
resource_id:
description:
- The ID of the resource to operate on.
resource_type:
description:
- The type of resource to operate on. Currently only tagging of
droplets is supported.
default: droplet
choices: ['droplet']
state:
description:
- Whether the tag should be present or absent on the resource.
default: present
choices: ['present', 'absent']
api_token:
description:
- DigitalOcean api token.
notes:
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN.
They both refer to the v2 token.
- As of Ansible 2.0, Version 2 of the DigitalOcean API is used.
requirements:
- "python >= 2.6"
'''
EXAMPLES = '''
- name: create a tag
digital_ocean_tag:
name: production
state: present
- name: tag a resource; creating the tag if it does not exists
digital_ocean_tag:
name: "{{ item }}"
resource_id: YYY
state: present
with_items:
- staging
- dbserver
- name: untag a resource
digital_ocean_tag:
name: staging
resource_id: YYY
state: absent
# Deleting a tag also untags all the resources that have previously been
# tagged with it
- name: remove a tag
digital_ocean_tag:
name: dbserver
state: absent
'''
RETURN = '''
data:
description: a DigitalOcean Tag resource
returned: success and no resource constraint
type: dict
sample: {
"tag": {
"name": "awesome",
"resources": {
"droplets": {
"count": 0,
"last_tagged": null
}
}
}
}
'''
import json
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
class Response(object):
def __init__(self, resp, info):
self.body = None
if resp:
self.body = resp.read()
self.info = info
@property
def json(self):
if not self.body:
if "body" in self.info:
return json.loads(self.info["body"])
return None
try:
return json.loads(self.body)
except ValueError:
return None
@property
def status_code(self):
return self.info["status"]
class Rest(object):
def __init__(self, module, headers):
self.module = module
self.headers = headers
self.baseurl = 'https://api.digitalocean.com/v2'
def _url_builder(self, path):
if path[0] == '/':
path = path[1:]
return '%s/%s' % (self.baseurl, path)
def send(self, method, path, data=None, headers=None):
url = self._url_builder(path)
data = self.module.jsonify(data)
resp, info = fetch_url(self.module, url, data=data, headers=self.headers, method=method)
return Response(resp, info)
def get(self, path, data=None, headers=None):
return self.send('GET', path, data, headers)
def put(self, path, data=None, headers=None):
return self.send('PUT', path, data, headers)
def post(self, path, data=None, headers=None):
return self.send('POST', path, data, headers)
def delete(self, path, data=None, headers=None):
return self.send('DELETE', path, data, headers)
def core(module):
try:
api_token = module.params['api_token'] or \
os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
state = module.params['state']
name = module.params['name']
resource_id = module.params['resource_id']
resource_type = module.params['resource_type']
rest = Rest(module, {'Authorization': 'Bearer {}'.format(api_token),
'Content-type': 'application/json'})
if state in ('present'):
if name is None:
module.fail_json(msg='parameter `name` is missing')
# Ensure Tag exists
response = rest.post("tags", data={'name': name})
status_code = response.status_code
json = response.json
if status_code == 201:
changed = True
elif status_code == 422:
changed = False
else:
module.exit_json(changed=False, data=json)
if resource_id is None:
# No resource defined, we're done.
if json is None:
module.exit_json(changed=changed, data=json)
else:
module.exit_json(changed=changed, data=json)
else:
# Tag a resource
url = "tags/{}/resources".format(name)
payload = {
'resources': [{
'resource_id': resource_id,
'resource_type': resource_type}]}
response = rest.post(url, data=payload)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.fail_json(msg="error tagging resource '{}': {}".format(
resource_id, response.json["message"]))
elif state in ('absent'):
if name is None:
module.fail_json(msg='parameter `name` is missing')
if resource_id:
url = "tags/{}/resources".format(name)
payload = {
'resources': [{
'resource_id': resource_id,
'resource_type': resource_type}]}
response = rest.delete(url, data=payload)
else:
url = "tags/{}".format(name)
response = rest.delete(url)
if response.status_code == 204:
module.exit_json(changed=True)
else:
module.exit_json(changed=False, data=response.json)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
resource_id=dict(aliases=['droplet_id'], type='int'),
resource_type=dict(choices=['droplet'], default='droplet'),
state=dict(choices=['present', 'absent'], default='present'),
api_token=dict(aliases=['API_TOKEN'], no_log=True),
)
)
try:
core(module)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
gpl-3.0
|
faucamp/python-gsmmodem
|
examples/dial_callback_demo.py
|
12
|
2583
|
#!/usr/bin/env python
"""\
Demo: dial a number (using callbacks to track call status)
Simple demo app that makes a voice call and plays sone DTMF tones (if supported by modem)
when the call is answered, and hangs up the call.
It uses the dial() methods callback mechanism to be informed when the call is answered and ended.
Note: you need to modify the NUMBER variable for this to work
"""
from __future__ import print_function
import sys, time, logging
PORT = '/dev/ttyUSB2'
BAUDRATE = 115200
NUMBER = '00000' # Number to dial - CHANGE THIS TO A REAL NUMBER
PIN = None # SIM card PIN (if any)
from gsmmodem.modem import GsmModem
from gsmmodem.exceptions import InterruptedException, CommandError
waitForCallback = True
def callStatusCallback(call):
global waitForCallback
print('Call status update callback function called')
if call.answered:
print('Call has been answered; waiting a while...')
# Wait for a bit - some older modems struggle to send DTMF tone immediately after answering a call
time.sleep(3.0)
print('Playing DTMF tones...')
try:
if call.active: # Call could have been ended by remote party while we waited in the time.sleep() call
call.sendDtmfTone('9515999955951')
except InterruptedException as e:
# Call was ended during playback
print('DTMF playback interrupted: {0} ({1} Error {2})'.format(e, e.cause.type, e.cause.code))
except CommandError as e:
print('DTMF playback failed: {0}'.format(e))
finally:
if call.active: # Call is still active
print('Hanging up call...')
call.hangup()
waitForCallback = False
else:
# Call is no longer active (remote party ended it)
print('Call has been ended by remote party')
waitForCallback = False
def main():
if NUMBER == None or NUMBER == '00000':
print('Error: Please change the NUMBER variable\'s value before running this example.')
sys.exit(1)
print('Initializing modem...')
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
modem = GsmModem(PORT, BAUDRATE)
modem.connect(PIN)
print('Waiting for network coverage...')
modem.waitForNetworkCoverage(30)
print('Dialing number: {0}'.format(NUMBER))
call = modem.dial(NUMBER, callStatusUpdateCallbackFunc=callStatusCallback)
global waitForCallback
while waitForCallback:
time.sleep(0.1)
print('Done')
if __name__ == '__main__':
main()
|
lgpl-3.0
|
Mega-DatA-Lab/mxnet
|
example/rcnn/rcnn/symbol/proposal_target.py
|
25
|
4760
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Proposal Target Operator selects foreground and background roi and assigns label, bbox_transform to them.
"""
import logging
import mxnet as mx
import numpy as np
from distutils.util import strtobool
from ..logger import logger
from rcnn.io.rcnn import sample_rois
class ProposalTargetOperator(mx.operator.CustomOp):
def __init__(self, num_classes, batch_images, batch_rois, fg_fraction):
super(ProposalTargetOperator, self).__init__()
self._num_classes = num_classes
self._batch_images = batch_images
self._batch_rois = batch_rois
self._fg_fraction = fg_fraction
if logger.level == logging.DEBUG:
self._count = 0
self._fg_num = 0
self._bg_num = 0
def forward(self, is_train, req, in_data, out_data, aux):
assert self._batch_rois % self._batch_images == 0, \
'BATCHIMAGES {} must devide BATCH_ROIS {}'.format(self._batch_images, self._batch_rois)
rois_per_image = self._batch_rois / self._batch_images
fg_rois_per_image = np.round(self._fg_fraction * rois_per_image).astype(int)
all_rois = in_data[0].asnumpy()
gt_boxes = in_data[1].asnumpy()
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack((all_rois, np.hstack((zeros, gt_boxes[:, :-1]))))
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), 'Only single item batches are supported'
rois, labels, bbox_targets, bbox_weights = \
sample_rois(all_rois, fg_rois_per_image, rois_per_image, self._num_classes, gt_boxes=gt_boxes)
if logger.level == logging.DEBUG:
logger.debug("labels: %s" % labels)
logger.debug('num fg: {}'.format((labels > 0).sum()))
logger.debug('num bg: {}'.format((labels == 0).sum()))
self._count += 1
self._fg_num += (labels > 0).sum()
self._bg_num += (labels == 0).sum()
logger.debug("self._count: %d" % self._count)
logger.debug('num fg avg: %d' % (self._fg_num / self._count))
logger.debug('num bg avg: %d' % (self._bg_num / self._count))
logger.debug('ratio: %.3f' % (float(self._fg_num) / float(self._bg_num)))
for ind, val in enumerate([rois, labels, bbox_targets, bbox_weights]):
self.assign(out_data[ind], req[ind], val)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 0)
self.assign(in_grad[1], req[1], 0)
@mx.operator.register('proposal_target')
class ProposalTargetProp(mx.operator.CustomOpProp):
def __init__(self, num_classes, batch_images, batch_rois, fg_fraction='0.25'):
super(ProposalTargetProp, self).__init__(need_top_grad=False)
self._num_classes = int(num_classes)
self._batch_images = int(batch_images)
self._batch_rois = int(batch_rois)
self._fg_fraction = float(fg_fraction)
def list_arguments(self):
return ['rois', 'gt_boxes']
def list_outputs(self):
return ['rois_output', 'label', 'bbox_target', 'bbox_weight']
def infer_shape(self, in_shape):
rpn_rois_shape = in_shape[0]
gt_boxes_shape = in_shape[1]
output_rois_shape = (self._batch_rois, 5)
label_shape = (self._batch_rois, )
bbox_target_shape = (self._batch_rois, self._num_classes * 4)
bbox_weight_shape = (self._batch_rois, self._num_classes * 4)
return [rpn_rois_shape, gt_boxes_shape], \
[output_rois_shape, label_shape, bbox_target_shape, bbox_weight_shape]
def create_operator(self, ctx, shapes, dtypes):
return ProposalTargetOperator(self._num_classes, self._batch_images, self._batch_rois, self._fg_fraction)
def declare_backward_dependency(self, out_grad, in_data, out_data):
return []
|
apache-2.0
|
HyperBaton/ansible
|
lib/ansible/plugins/action/cli_config.py
|
10
|
1302
|
#
# Copyright 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True
if self._play_context.connection.split('.')[-1] != 'network_cli':
return {'failed': True, 'msg': 'Connection type %s is not valid for cli_config module' % self._play_context.connection}
return super(ActionModule, self).run(task_vars=task_vars)
|
gpl-3.0
|
Workday/OpenFrame
|
tools/polymer/txt_to_polymer_grdp.py
|
21
|
2455
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import os
import string
import sys
FILE_TEMPLATE = \
"""<?xml version="1.0" encoding="utf-8"?>
<!--
This file is generated.
Please use 'src/tools/polymer/polymer_grdp_to_txt.py' and
'src/tools/polymer/txt_to_polymer_grdp.py' to modify it, if possible.
'polymer_grdp_to_txt.py' converts 'polymer_resources.grdp' to a plane list of
used Polymer components:
...
iron-iron-iconset/iron-iconset-extracted.js
iron-iron-iconset/iron-iconset.html
...
'txt_to_polymer_grdp.py' converts list back to GRDP file.
Usage:
$ polymer_grdp_to_txt.py polymer_resources.grdp > /tmp/list.txt
$ vim /tmp/list.txt
$ txt_to_polymer_grdp.py /tmp/list.txt > polymer_resources.grdp
-->
<grit-part>
<!-- Polymer 1.0 -->
%(v_1_0)s
<structure name="IDR_POLYMER_1_0_WEB_ANIMATIONS_JS_WEB_ANIMATIONS_NEXT_LITE_MIN_JS"
file="../../../third_party/web-animations-js/sources/web-animations-next-lite.min.js"
type="chrome_html" />
</grit-part>
"""
DEFINITION_TEMPLATE_1_0 = \
""" <structure name="%s"
file="../../../third_party/polymer/v1_0/components-chromium/%s"
type="chrome_html" />"""
def PathToGritId(path):
table = string.maketrans(string.lowercase + '/.-', string.uppercase + '___')
return 'IDR_POLYMER_1_0_' + path.translate(table)
def SortKey(record):
return (record, PathToGritId(record))
def ParseRecord(record):
return record.strip()
class FileNotFoundException(Exception):
pass
_HERE = os.path.dirname(os.path.realpath(__file__))
_POLYMER_DIR = os.path.join(_HERE, os.pardir, os.pardir,
'third_party', 'polymer', 'v1_0', 'components-chromium')
def main(argv):
with open(argv[1]) as f:
records = [ParseRecord(r) for r in f if not r.isspace()]
lines = { 'v_1_0': [] }
for path in sorted(set(records), key=SortKey):
full_path = os.path.normpath(os.path.join(_POLYMER_DIR, path))
if not os.path.exists(full_path):
raise FileNotFoundException('%s not found' % full_path)
template = DEFINITION_TEMPLATE_1_0
lines['v_1_0'].append(
template % (PathToGritId(path), path))
print FILE_TEMPLATE % { 'v_1_0': '\n'.join(lines['v_1_0']) }
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bsd-3-clause
|
vicky2135/lucious
|
oscar/lib/python2.7/site-packages/pip/_vendor/webencodings/mklabels.py
|
512
|
1305
|
"""
webencodings.mklabels
~~~~~~~~~~~~~~~~~~~~~
Regenarate the webencodings.labels module.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
import json
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
def assert_lower(string):
assert string == string.lower()
return string
def generate(url):
parts = ['''\
"""
webencodings.labels
~~~~~~~~~~~~~~~~~~~
Map encoding labels to their name.
:copyright: Copyright 2012 by Simon Sapin
:license: BSD, see LICENSE for details.
"""
# XXX Do not edit!
# This file is automatically generated by mklabels.py
LABELS = {
''']
labels = [
(repr(assert_lower(label)).lstrip('u'),
repr(encoding['name']).lstrip('u'))
for category in json.loads(urlopen(url).read().decode('ascii'))
for encoding in category['encodings']
for label in encoding['labels']]
max_len = max(len(label) for label, name in labels)
parts.extend(
' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name)
for label, name in labels)
parts.append('}')
return ''.join(parts)
if __name__ == '__main__':
print(generate('http://encoding.spec.whatwg.org/encodings.json'))
|
bsd-3-clause
|
phuongxuanpham/SelfDrivingCar
|
CarND-Alexnet-Feature-Extraction-Lab/traffic_sign_inference_solution.py
|
7
|
1040
|
"""
The traffic signs are 32x32 so you
have to resize them to be 227x227 before
passing them to AlexNet.
"""
import time
import tensorflow as tf
import numpy as np
from scipy.misc import imread
from caffe_classes import class_names
from alexnet import AlexNet
# placeholders
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
resized = tf.image.resize_images(x, (227, 227))
probs = AlexNet(resized)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# Read Images
im1 = imread("construction.jpg").astype(np.float32)
im1 = im1 - np.mean(im1)
im2 = imread("stop.jpg").astype(np.float32)
im2 = im2 - np.mean(im2)
# Run Inference
t = time.time()
output = sess.run(probs, feed_dict={x: [im1, im2]})
# Print Output
for input_im_ind in range(output.shape[0]):
inds = np.argsort(output)[input_im_ind, :]
print("Image", input_im_ind)
for i in range(5):
print("%s: %.3f" % (class_names[inds[-1 - i]], output[input_im_ind, inds[-1 - i]]))
print()
print("Time: %.3f seconds" % (time.time() - t))
|
gpl-3.0
|
pomegranited/edx-platform
|
common/test/acceptance/pages/studio/users.py
|
15
|
9666
|
"""
Page classes to test either the Course Team page or the Library Team page.
"""
from bok_choy.promise import EmptyPromise
from bok_choy.page_object import PageObject
from ...tests.helpers import disable_animations
from .course_page import CoursePage
from . import BASE_URL
def wait_for_ajax_or_reload(browser):
"""
Wait for all ajax requests to finish, OR for the page to reload.
Normal wait_for_ajax() chokes on occasion if the pages reloads,
giving "WebDriverException: Message: u'jQuery is not defined'"
"""
def _is_ajax_finished():
""" Wait for jQuery to finish all AJAX calls, if it is present. """
return browser.execute_script("return typeof(jQuery) == 'undefined' || jQuery.active == 0")
EmptyPromise(_is_ajax_finished, "Finished waiting for ajax requests.").fulfill()
class UsersPageMixin(PageObject):
""" Common functionality for course/library team pages """
new_user_form_selector = '.form-create.create-user .user-email-input'
def url(self):
"""
URL to this page - override in subclass
"""
raise NotImplementedError
def is_browser_on_page(self):
"""
Returns True if the browser has loaded the page.
"""
return self.q(css='body.view-team').present and not self.q(css='.ui-loading').present
@property
def users(self):
"""
Return a list of users listed on this page.
"""
return self.q(css='.user-list .user-item').map(
lambda el: UserWrapper(self.browser, el.get_attribute('data-email'))
).results
@property
def usernames(self):
"""
Returns a list of user names for users listed on this page
"""
return [user.name for user in self.users]
@property
def has_add_button(self):
"""
Is the "New Team Member" button present?
"""
return self.q(css='.create-user-button').present
def click_add_button(self):
"""
Click on the "New Team Member" button
"""
self.q(css='.create-user-button').first.click()
self.wait_for(lambda: self.new_user_form_visible, "Add user form is visible")
@property
def new_user_form_visible(self):
""" Is the new user form visible? """
return self.q(css='.form-create.create-user .user-email-input').visible
def set_new_user_email(self, email):
""" Set the value of the "New User Email Address" field. """
self.q(css='.form-create.create-user .user-email-input').fill(email)
def click_submit_new_user_form(self):
""" Submit the "New User" form """
self.q(css='.form-create.create-user .action-primary').click()
wait_for_ajax_or_reload(self.browser)
def get_user(self, email):
""" Gets user wrapper by email """
target_users = [user for user in self.users if user.email == email]
assert len(target_users) == 1
return target_users[0]
def add_user_to_course(self, email):
""" Adds user to a course/library """
self.wait_for_element_visibility('.create-user-button', "Add team member button is available")
self.click_add_button()
self.set_new_user_email(email)
self.click_submit_new_user_form()
def delete_user_from_course(self, email):
""" Deletes user from course/library """
target_user = self.get_user(email)
target_user.click_delete()
def modal_dialog_visible(self, dialog_type):
""" Checks if modal dialog of specified class is displayed """
return self.q(css='.prompt.{dialog_type}'.format(dialog_type=dialog_type)).visible
def modal_dialog_text(self, dialog_type):
""" Gets modal dialog text """
return self.q(css='.prompt.{dialog_type} .message'.format(dialog_type=dialog_type)).text[0]
def wait_until_no_loading_indicator(self):
"""
When the page first loads, there is a loading indicator and most
functionality is not yet available. This waits for that loading to finish
and be removed from the DOM.
This method is different from wait_until_ready because the loading element
is removed from the DOM, rather than hidden.
It also disables animations for improved test reliability.
"""
self.wait_for(
lambda: not self.q(css='.ui-loading').present,
"Wait for page to complete its initial loading"
)
disable_animations(self)
def wait_until_ready(self):
"""
When the page first loads, there is a loading indicator and most
functionality is not yet available. This waits for that loading to
finish.
This method is different from wait_until_no_loading_indicator because this expects
the loading indicator to still exist on the page; it is just hidden.
It also disables animations for improved test reliability.
"""
self.wait_for_element_invisibility(
'.ui-loading',
'Wait for the page to complete its initial loading'
)
disable_animations(self)
class LibraryUsersPage(UsersPageMixin):
"""
Library Team page in Studio
"""
def __init__(self, browser, locator):
super(LibraryUsersPage, self).__init__(browser)
self.locator = locator
@property
def url(self):
"""
URL to the "User Access" page for the given library.
"""
return "{}/library/{}/team/".format(BASE_URL, unicode(self.locator))
class CourseTeamPage(CoursePage, UsersPageMixin):
"""
Course Team page in Studio.
"""
url_path = "course_team"
class UserWrapper(PageObject):
"""
A PageObject representing a wrapper around a user listed on the course/library team page.
"""
url = None
COMPONENT_BUTTONS = {
'basic_tab': '.editor-tabs li.inner_tab_wrap:nth-child(1) > a',
'advanced_tab': '.editor-tabs li.inner_tab_wrap:nth-child(2) > a',
'save_settings': '.action-save',
}
def __init__(self, browser, email):
super(UserWrapper, self).__init__(browser)
self.email = email
self.selector = '.user-list .user-item[data-email="{}"]'.format(self.email)
def is_browser_on_page(self):
"""
Sanity check that our wrapper element is on the page.
"""
return self.q(css=self.selector).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular user entry's context
"""
return '{} {}'.format(self.selector, selector)
@property
def name(self):
""" Get this user's username, as displayed. """
return self.q(css=self._bounded_selector('.user-username')).text[0]
@property
def role_label(self):
""" Get this user's role, as displayed. """
return self.q(css=self._bounded_selector('.flag-role .value')).text[0]
@property
def is_current_user(self):
""" Does the UI indicate that this is the current user? """
return self.q(css=self._bounded_selector('.flag-role .msg-you')).present
@property
def can_promote(self):
""" Can this user be promoted to a more powerful role? """
return self.q(css=self._bounded_selector('.add-admin-role')).present
@property
def promote_button_text(self):
""" What does the promote user button say? """
return self.q(css=self._bounded_selector('.add-admin-role')).text[0]
def click_promote(self):
""" Click on the button to promote this user to the more powerful role """
self.q(css=self._bounded_selector('.add-admin-role')).click()
wait_for_ajax_or_reload(self.browser)
@property
def can_demote(self):
""" Can this user be demoted to a less powerful role? """
return self.q(css=self._bounded_selector('.remove-admin-role')).present
@property
def demote_button_text(self):
""" What does the demote user button say? """
return self.q(css=self._bounded_selector('.remove-admin-role')).text[0]
def click_demote(self):
""" Click on the button to demote this user to the less powerful role """
self.q(css=self._bounded_selector('.remove-admin-role')).click()
wait_for_ajax_or_reload(self.browser)
@property
def can_delete(self):
""" Can this user be deleted? """
return self.q(css=self._bounded_selector('.action-delete:not(.is-disabled) .remove-user')).present
def click_delete(self):
""" Click the button to delete this user. """
disable_animations(self)
self.q(css=self._bounded_selector('.remove-user')).click()
# We can't use confirm_prompt because its wait_for_ajax is flaky when the page is expected to reload.
self.wait_for_element_visibility('.prompt', 'Prompt is visible')
self.wait_for_element_visibility('.prompt .action-primary', 'Confirmation button is visible')
self.q(css='.prompt .action-primary').click()
self.wait_for_element_absence('.page-prompt .is-shown', 'Confirmation prompt is hidden')
wait_for_ajax_or_reload(self.browser)
@property
def has_no_change_warning(self):
""" Does this have a warning in place of the promote/demote buttons? """
return self.q(css=self._bounded_selector('.notoggleforyou')).present
@property
def no_change_warning_text(self):
""" Text of the warning seen in place of the promote/demote buttons. """
return self.q(css=self._bounded_selector('.notoggleforyou')).text[0]
|
agpl-3.0
|
Serag8/Bachelor
|
google_appengine/lib/django-1.5/django/views/generic/list.py
|
99
|
6695
|
from __future__ import unicode_literals
from django.core.paginator import Paginator, InvalidPage
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateResponseMixin, ContextMixin, View
class MultipleObjectMixin(ContextMixin):
"""
A mixin for views manipulating multiple objects.
"""
allow_empty = True
queryset = None
model = None
paginate_by = None
context_object_name = None
paginator_class = Paginator
page_kwarg = 'page'
def get_queryset(self):
"""
Get the list of items for this view. This must be an iterable, and may
be a queryset (in which qs-specific behavior will be enabled).
"""
if self.queryset is not None:
queryset = self.queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise ImproperlyConfigured("'%s' must define 'queryset' or 'model'"
% self.__class__.__name__)
return queryset
def paginate_queryset(self, queryset, page_size):
"""
Paginate the queryset, if needed.
"""
paginator = self.get_paginator(queryset, page_size, allow_empty_first_page=self.get_allow_empty())
page_kwarg = self.page_kwarg
page = self.kwargs.get(page_kwarg) or self.request.GET.get(page_kwarg) or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_("Page is not 'last', nor can it be converted to an int."))
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage as e:
raise Http404(_('Invalid page (%(page_number)s): %(message)s') % {
'page_number': page_number,
'message': str(e)
})
def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by, or ``None`` for no pagination.
"""
return self.paginate_by
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True):
"""
Return an instance of the paginator for this view.
"""
return self.paginator_class(queryset, per_page, orphans=orphans, allow_empty_first_page=allow_empty_first_page)
def get_allow_empty(self):
"""
Returns ``True`` if the view should display empty lists, and ``False``
if a 404 should be raised instead.
"""
return self.allow_empty
def get_context_object_name(self, object_list):
"""
Get the name of the item to be used in the context.
"""
if self.context_object_name:
return self.context_object_name
elif hasattr(object_list, 'model'):
return '%s_list' % object_list.model._meta.object_name.lower()
else:
return None
def get_context_data(self, **kwargs):
"""
Get the context for this view.
"""
queryset = kwargs.pop('object_list')
page_size = self.get_paginate_by(queryset)
context_object_name = self.get_context_object_name(queryset)
if page_size:
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context = {
'paginator': paginator,
'page_obj': page,
'is_paginated': is_paginated,
'object_list': queryset
}
else:
context = {
'paginator': None,
'page_obj': None,
'is_paginated': False,
'object_list': queryset
}
if context_object_name is not None:
context[context_object_name] = queryset
context.update(kwargs)
return super(MultipleObjectMixin, self).get_context_data(**context)
class BaseListView(MultipleObjectMixin, View):
"""
A base view for displaying a list of objects.
"""
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty:
# When pagination is enabled and object_list is a queryset,
# it's better to do a cheap query than to load the unpaginated
# queryset in memory.
if (self.get_paginate_by(self.object_list) is not None
and hasattr(self.object_list, 'exists')):
is_empty = not self.object_list.exists()
else:
is_empty = len(self.object_list) == 0
if is_empty:
raise Http404(_("Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data(object_list=self.object_list)
return self.render_to_response(context)
class MultipleObjectTemplateResponseMixin(TemplateResponseMixin):
"""
Mixin for responding with a template and list of objects.
"""
template_name_suffix = '_list'
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
try:
names = super(MultipleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If the list is a queryset, we'll invent a template name based on the
# app and model name. This name gets put at the end of the template
# name list so that user-supplied names override the automatically-
# generated ones.
if hasattr(self.object_list, 'model'):
opts = self.object_list.model._meta
names.append("%s/%s%s.html" % (opts.app_label, opts.object_name.lower(), self.template_name_suffix))
return names
class ListView(MultipleObjectTemplateResponseMixin, BaseListView):
"""
Render some list of objects, set by `self.model` or `self.queryset`.
`self.queryset` can actually be any iterable of items, not just a queryset.
"""
|
mit
|
RYWU/slick
|
slick/blueprints/dns/manager.py
|
4
|
3123
|
from SoftLayer import DNSManager, SoftLayerAPIError
from slick.utils.core import get_client
from slick.utils.date import parse_date
def add_record(zone_id, host, rec_type, data, ttl=60):
""" Creates a new record based upon the passed information.
:param int zone_id: The ID of the zone in which this record should be
created.
:param string host: Host entry for the new record
:param string rec_type: The DNS type of the new record
:param string data: The data value of the new record
:param int ttl: The TTL for the new record. Defaults to 60.
"""
record = {
'zone_id': zone_id,
'record': host,
'type': rec_type,
'data': data,
'ttl': ttl,
}
try:
get_dns_manager().create_record(**record)
success = True
message = 'Record created successfully.'
except SoftLayerAPIError as exception:
success = False
message = str(exception)
return (success, message)
def all_zones():
""" This method returns a dictionary of all available DNS zones.
Note that this method is memoized for performance reasons!
:param string username: The username of the current user. This is used
to ensure memoization uniqueness.
"""
zones = []
for zone in get_dns_manager().list_zones():
zones.append({'id': zone.get('id'),
'name': zone.get('name'),
'updated': parse_date(zone.get('updateDate'))})
return zones
def delete_record(record_id):
""" Deletes the specified record.
:param int record_id: The ID of the record being deleted.
"""
try:
get_dns_manager().delete_record(record_id)
success = True
message = 'Record has been removed.'
except SoftLayerAPIError as exception:
success = False
message = str(exception)
return (success, message)
def get_dns_manager():
return DNSManager(get_client())
def get_record(record_id):
api = get_client()['Dns_Domain_ResourceRecord']
try:
record = api.getObject(id=record_id, mask='domain')
except SoftLayerAPIError:
record = None
return record
def get_zone(zone_id):
mgr = get_dns_manager()
try:
zone = mgr.get_zone(zone_id, records=True)
except SoftLayerAPIError:
zone = None
return zone
def get_zone_id_by_name(zone):
return get_dns_manager()._get_zone_id_from_name(zone)[0]
def search_record(zone, record):
mgr = get_dns_manager()
zone_id = mgr._get_zone_id_from_name(zone)[0]
return mgr.get_records(zone_id, host=record)
def update_record(record):
""" Updates the specified record.
:param dict record: The full set of information about the record being
updated. Must contain an 'id' parameter, which will be updated.
"""
try:
get_dns_manager().edit_record(record)
success = True
message = 'Record updated successfully.'
except SoftLayerAPIError as exception:
success = False
message = str(exception)
return (success, message)
|
mit
|
x2nie/odoo
|
addons/product_extended/wizard/wizard_price.py
|
104
|
2687
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2011 OpenERP S.A. (<http://www.openerp.com>).
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class wizard_price(osv.osv):
_name = "wizard.price"
_description = "Compute price wizard"
_columns = {
'info_field': fields.text('Info', readonly=True),
'real_time_accounting': fields.boolean("Generate accounting entries when real-time"),
'recursive': fields.boolean("Change prices of child BoMs too"),
}
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_price, self).default_get(cr, uid, fields, context=context)
product_pool = self.pool.get('product.product')
product_obj = product_pool.browse(cr, uid, context.get('active_id', False))
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
res['info_field'] = str(product_pool.compute_price(cr, uid, [product_obj.id], test=True, context=context))
return res
def compute_from_bom(self, cr, uid, ids, context=None):
assert len(ids) == 1
if context is None:
context = {}
rec_id = context and context.get('active_id', False)
assert rec_id, _('Active ID is not set in Context.')
prod_obj = self.pool.get('product.product')
res = self.browse(cr, uid, ids, context=context)
prod = prod_obj.browse(cr, uid, rec_id, context=context)
prod_obj.compute_price(cr, uid, [prod.id], real_time_accounting=res[0].real_time_accounting, recursive=res[0].recursive, test=False, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
bdero/edx-platform
|
common/lib/capa/capa/safe_exec/lazymod.py
|
68
|
1207
|
"""A module proxy for delayed importing of modules.
From http://barnesc.blogspot.com/2006/06/automatic-python-imports-with-autoimp.html,
in the public domain.
"""
import sys
class LazyModule(object):
"""A lazy module proxy."""
def __init__(self, modname):
self.__dict__['__name__'] = modname
self._set_mod(None)
def _set_mod(self, mod):
if mod is not None:
self.__dict__ = mod.__dict__
self.__dict__['_lazymod_mod'] = mod
def _load_mod(self):
__import__(self.__name__)
self._set_mod(sys.modules[self.__name__])
def __getattr__(self, name):
if self.__dict__['_lazymod_mod'] is None:
self._load_mod()
mod = self.__dict__['_lazymod_mod']
if hasattr(mod, name):
return getattr(mod, name)
else:
try:
subname = '%s.%s' % (self.__name__, name)
__import__(subname)
submod = getattr(mod, name)
except ImportError:
raise AttributeError("'module' object has no attribute %r" % name)
self.__dict__[name] = LazyModule(subname, submod)
return self.__dict__[name]
|
agpl-3.0
|
hydroffice/hyo_soundspeed
|
examples/soundspeed/profile/ex_profile_stats.py
|
1
|
1775
|
from datetime import datetime
import random
import logging
import numpy as np
from hyo2.soundspeed.profile.profile import Profile
from hyo2.abc.lib.logging import set_logging
ns_list = ["hyo2.soundspeed", "hyo2.soundspeedmanager", "hyo2.soundspeedsettings"]
set_logging(ns_list=ns_list)
logger = logging.getLogger(__name__)
def fresh_profile():
ssp = Profile()
d = [random.gauss(val, .2) for val in np.arange(1.0, 51.0, 0.5)]
vs = [random.gauss(val, 5.0) for val in np.arange(1450.0, 1550.0, 1.0)]
t = [random.gauss(val, .2) for val in np.arange(15.0, 25.0, 0.1)]
s = [random.gauss(val, .1) for val in np.arange(10.0, 60.0, 0.5)]
ssp.init_proc(len(d))
ssp.proc.depth = np.array(d)
ssp.proc.speed = np.array(vs)
ssp.proc.temp = np.array(t)
ssp.proc.sal = np.array(s)
# ssp.proc.flag[40:50] = 1
# ssp.proc.flag[50:70] = 2
ssp.meta.latitude = 43.13555
ssp.meta.longitude = -70.9395
ssp.meta.utc_time = datetime.utcnow()
return ssp
ssp = fresh_profile()
logger.debug("min: %s %s %s %s" %
(ssp.proc_depth_min, ssp.proc_speed_min,
ssp.proc_temp_min, ssp.proc_sal_min))
logger.debug("max: %s %s %s %s" %
(ssp.proc_depth_max, ssp.proc_speed_max,
ssp.proc_temp_max, ssp.proc_sal_max))
logger.debug("median: %s %s %s %s" %
(ssp.proc_depth_median, ssp.proc_speed_median,
ssp.proc_temp_median, ssp.proc_sal_median))
logger.debug("avg: %s %s %s %s" %
(ssp.proc_depth_mean, ssp.proc_speed_mean,
ssp.proc_temp_mean, ssp.proc_sal_mean))
logger.debug("std: %s %s %s %s" %
(ssp.proc_depth_std, ssp.proc_speed_std,
ssp.proc_temp_std, ssp.proc_sal_std))
# ssp.proc_debug_plot()
# plt.show()
|
lgpl-2.1
|
go-bears/rally
|
tests/unit/plugins/openstack/scenarios/ec2/test_utils.py
|
1
|
2950
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from rally.plugins.openstack.scenarios.ec2 import utils
from tests.unit import test
CONF = cfg.CONF
class EC2ScenarioTestCase(test.ScenarioTestCase):
def setUp(self):
super(EC2ScenarioTestCase, self).setUp()
self.server1 = mock.MagicMock()
self.server2 = mock.MagicMock()
self.reservations = mock.MagicMock(instances=[self.server1,
self.server2])
def test__list_servers(self):
servers_list = []
self.clients("ec2").get_only_instances.return_value = servers_list
ec2_scenario = utils.EC2Scenario()
return_servers_list = ec2_scenario._list_servers()
self.assertEqual(servers_list, return_servers_list)
self._test_atomic_action_timer(ec2_scenario.atomic_actions(),
"ec2.list_servers")
def test__update_resource(self):
resource = mock.MagicMock()
scenario = utils.EC2Scenario()
self.assertEqual(scenario._update_resource(resource), resource)
resource.update.assert_called_once_with()
def test__boot_servers(self):
self.clients("ec2").run_instances.return_value = self.reservations
ec2_scenario = utils.EC2Scenario(context={})
ec2_scenario._update_resource = mock.Mock()
ec2_scenario._boot_servers("image", "flavor", 2)
expected = [
mock.call(
self.server1,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=ec2_scenario._update_resource,
check_interval=CONF.benchmark.ec2_server_boot_poll_interval,
timeout=CONF.benchmark.ec2_server_boot_timeout
),
mock.call(
self.server2,
is_ready=self.mock_resource_is.mock.return_value,
update_resource=ec2_scenario._update_resource,
check_interval=CONF.benchmark.ec2_server_boot_poll_interval,
timeout=CONF.benchmark.ec2_server_boot_timeout
)
]
self.mock_wait_for.mock.assert_has_calls(expected)
self.mock_resource_is.mock.assert_has_calls([mock.call("RUNNING")])
self._test_atomic_action_timer(ec2_scenario.atomic_actions(),
"ec2.boot_servers")
|
apache-2.0
|
laurivosandi/compiler-construction
|
toolchain/coder.py
|
1
|
8241
|
# coding: utf-8
"""
µ-Opal to UEBB machine instruction set compiler
"""
import sys
import lexer
from parser import Parser
from ctxcheck import context_check
import absy
import ir
import builtin
BUILTIN_MAPPING = {
builtin.DefinitionEq: ir.Eq,
builtin.DefinitionLessThan: ir.Lt,
builtin.DefinitionAdd: ir.Add,
builtin.DefinitionSub: ir.Sub,
builtin.DefinitionMul: ir.Mul,
builtin.DefinitionDiv: ir.Div
}
def elaborate(func):
def wrapped(old_env, node):
env = func(old_env, node)
print "Compiled node", node, "to", env.instructions[-len(env.instructions)+len(old_env.instructions):]
print "=> Stack:", env.stack
return env
return wrapped
class Environment(object):
"""
Environment holds compiler state: what functions are available,
what's in the stack and what instructions have been issued so far.
"""
def __init__(self, gctx, stack=(), instructions=()):
self.gctx = gctx
self.stack = stack
self.instructions = instructions
def push_constant(env, constant):
constant = int(constant) # Coarse True/False to 1/0
return Environment(env.gctx, env.stack + (constant,), env.instructions + (ir.PushInt(constant),))
def push_var(env, name):
if name not in env.stack: raise Exception("Variable not present in stack!")
# Find the first instance of the variable on the stack
offset = len(env.stack) - env.stack.index(name) - 1
return Environment(env.gctx, env.stack + (name,), env.instructions + (ir.Push(offset),))
def push(self, instruction):
stack = self.stack
if isinstance(instruction, ir.BinaryOperator): # Do this for builtins such as add, mul, div, sub
stack = stack[:-2] + ("%s%s" % (instruction.mnemonic, id(self)),)
elif isinstance(instruction, ir.PushAddr):
stack = stack + ("CallAddr%s%dx" % (instruction.mnemonic, id(self)),)
elif isinstance(instruction, ir.Call):
# For the caller it looks like the call address gets popped and substituted with return value
stack = stack[:-1] + ("CallReturnValue%d" % id(self),)
elif isinstance(instruction, ir.Return):
# Returning from function pops the return address from stack which is exactly before return value
stack = stack[:-2] + stack[-1:]
elif isinstance(instruction, ir.Slide):
assert len(stack) > instruction.offset, "Stack is %s, attempted to slide by %d" % (stack, instruction.offset)
stack = stack[:-instruction.offset-1] + stack[-1:]
print "Issued instruction", instruction #, "stack was:", self.stack,
print "=> Stack:", stack
return Environment(self.gctx, stack, self.instructions + (instruction,))
def __iter__(self):
return iter(self.instructions)
def compile(env, node):
if isinstance(node, absy.Variable):
# is is further in stack push i-th node
return env.push_var(node.name)
elif isinstance(node, absy.Nat) or isinstance(node, absy.Boolean):
return env.push_constant(node.value)
elif isinstance(node, absy.Apply):
# Resolve function definition
d = env.gctx[node.func_name]
assert len(node.parameters) == len(d.args), "Invalid invocation of %s" % node.func_name
# Push function application arguments to stack
for param in node.parameters:
env = env.compile(param)
# print "Pushed function call parameter:", param, "stack is now:", env.stack
if isinstance(d, absy.DefinitionBuiltin):
for dc, ic in BUILTIN_MAPPING.iteritems():
if isinstance(d, dc):
instruction = ic()
if isinstance(instruction, ir.Sub) or isinstance(instruction, ir.Div): # TODO Builtins are swappe in Machine JAR
env = env.push(ir.Swap())
return env.push(instruction)
raise Exception("Don't know how to compile built-in %s" % d.name)
else:
print "Compiling function call of", node.func_name
return env.push(ir.PushAddr(node.func_name)).push(ir.Call()).push(ir.Slide(len(node.parameters)))
elif isinstance(node, absy.Conditional):
print "Compiling conditional if-expression"
label_else = ir.Label("else%d" % id(env))
label_fi = ir.Label("fi%d" % id(env))
# If the expr_if is eq(_, 0) then we can skip some stuff
optimize = isinstance(node.expr_if, absy.Apply) and node.expr_if.func_name == "eq" and isinstance(node.expr_if.parameters[1], absy.Nat) and node.expr_if.parameters[1].value == 0
if not optimize:
env = env.push(ir.PushInt(0))
env = env.compile(node.expr_if)
if not optimize:
env = env.push(ir.Eq()) # Compare 0 to number previously on top of stack
# Stack persists for both branches, start from scratch for instructions
env_then = Environment(env.gctx, env.stack[:-1], ()).push(ir.ConditionalJump(label_else)).compile(node.expr_then).push(ir.Jump(label_fi))
env_else = Environment(env.gctx, env.stack[:-1], ()).push(label_else).compile(node.expr_else).push(label_fi)
assert len(env_then.stack) == len(env_else.stack), "This should not happen, %s in then stack and %s in else stack" % (env_then.stack, env_else.stack)
# Pop if-expression value and push then-else expression value placeholder
stack = env.stack[:-1] + ("cond%d" % id(env),)
# Merge instruction branches and add placeholder for conditional return value
instructions = env.instructions + env_then.instructions + env_else.instructions
print "Merging conditional branches"
# return Environment(env.gctx, stack, )
return Environment(env.gctx, env.stack, env.instructions + env_then.instructions + env_else.instructions)
else:
raise Exception("Don't know how to compile: %s of class %s" % (node, node.__class__.__name__))
def compile_program(gctx):
def compile_with_labels():
print "Compiling function MAIN"
for i in Environment(gctx).compile(gctx["MAIN"].body):
print i
yield i
yield ir.Stop()
print
for label, d in gctx.iteritems():
if not isinstance(d, absy.DefinitionBuiltin) and label != "MAIN":
print "Compiling function", label
yield ir.Label(label)
arg_names = tuple([arg_name for arg_name, arg_type in d.args])
for i in Environment(gctx, arg_names + ("ReturnAddress",)).compile(d.body).push(ir.Return()):
print i
yield i
print
def scrub():
offsets = {}
instructions = ()
j = 0
for i in compile_with_labels():
if isinstance(i, ir.Label):
offsets[i.name] = j
else:
instructions = instructions + (i,)
j += 1
for i in instructions:
if isinstance(i, ir.PushAddr) or isinstance(i, ir.Jump):
i.target = offsets[i.target]
return instructions
return scrub()
def uebb_compile(filename):
tokens = tuple(lexer.tokenize(filename))
defs, state = Parser(*tokens).parse()
gctx, errors = context_check(defs)
if errors:
for node, msg in errors:
print msg, "on line", node.line, "column", node.column
else:
fh = open(filename[:-3] + ".ma", "w")
for i in compile_program(gctx):
fh.write(repr(i) + "\n")
fh.close()
if __name__ == "__main__":
filenames = sys.argv[1:]
for filename in filenames:
uebb_compile(filename)
|
mit
|
savinash47/openstack-doc-tools
|
os_doc_tools/openstack/common/local.py
|
378
|
1677
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()
|
apache-2.0
|
ActiveState/code
|
recipes/Python/578354_Dynamically_create_dictionary_ascending/recipe-578354.py
|
1
|
1171
|
#!/usr/bin/env python
"""
Create a dictionary of ascending (sequential) directories.
"""
def make_pathdict(keys, fspath = None):
"""Quickly create a dictionary of ascending path components.
:param keys: list of dictionary keys (base -> root order)
:returns: dictionary of keyed paths
NOTICE: This does not check path-length::key-count, etc.!
Also, not as robust as os.path in x-platform use.
>>> fspath = "/and/the/player/asks/anyone/for_tennis.py"
>>> keys = "base midl root".split()
>>> ret_dict = make_pathdict(keys, fspath)
>>> for k in keys: print "{0:<6}{1}".format(k, ret_dict[k])
base /and/the/player/asks/anyone
midl /and/the/player/asks
root /and/the/player
"""
from os import path as os_path
_cache = {}
fspath = os_path.abspath(fspath or __file__)
# divide the path into len(keys) + 1 parts, the root, directories and file
tokenz = fspath.rsplit(os_path.sep, len(keys))
# iterate the keys assigning the decreasing-lenght path-portions
for idx, key in enumerate(keys):
_cache[key] = os_path.join(*tokenz[:-(idx + 1)])
return _cache
|
mit
|
BrunoDefrance/PLUi_versionning_manager
|
actions_versionning.py
|
1
|
47156
|
# coding: utf8
from PyQt4.QtGui import *
from qgis.gui import QgsMessageBar
from qgis.core import QgsMessageLog
from qgis.core import QgsProject
from qgis.core import QgsFeatureRequest
from qgis.core import QgsMapLayerRegistry
from qgis.core import QgsExpressionContextUtils
from qgis.core import QgsExpressionContextScope
import re
import psycopg2
# contenu de la macro openProject
#import qgis.utils
#import sys
#from qgis.core import QgsExpressionContextUtils
# environnement de test ou ecriture
#global enviro_versionning
#QgsExpressionContextUtils.setProjectVariable('enviro_versionning','ecriture')
#QgsExpressionContextUtils.setProjectVariable('enviro_versionning','test')
# A activer si les scripts ne sont pas au meme emplacement que le projet
#sys.path.append('S:\Metiers\Urbanisme\confidentiel\PLU\partage_aurg2\QGIS\PLUi\scripts\\')
#sys.path.append('S:\Metiers\Urbanisme\confidentiel\PLU\partage_aurg2\QGIS\PLUi\scripts_test\\')
#sys.path.insert(0, "/scripts_test")
global enviro_versionning
registry = QgsMapLayerRegistry.instance()
project = QgsProject.instance()
messageLog = QgsMessageLog.instance()
#enviro_versionning = QgsExpressionContextUtils.projectScope().variable('enviro_versionning')
enviro_versionning = 'ecriture'
QgsExpressionContextUtils.setProjectVariable('enviro_versionning',enviro_versionning)
######################################################################
#En deux parties : capitalisation du code python
#Les actions telles quelles
######################################################################
# idées
# une variable pour test ou ecriture pour exécuter les requetes dans le schéma adéquate OK
###########################CAPITALISATION#############################
# Récupère la version sélectionnée
def get_feature(layer_id, feature_id):
# récupération des données depuis la ligne cliquée
layer = registry.mapLayer(layer_id)
request = QgsFeatureRequest().setFilterFid(feature_id)
feature = next(layer.getFeatures(request))
return feature
#Execute la requête SQL passée en argument
def execute_request(qgis, sql, msg):
try:
with psycopg2.connect("service=bd_prod") as cnx:
with cnx.cursor() as cursor:
#QgsMessageLog.logMessage('sql : ' + sql, 'Python', 0)
cursor.execute(sql)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de traiter la demande en base : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
# zoom sur la version et renomme les couches avec le num de version
def zoom_rename(qgis, iddoc, com, num_proj, num_version, echelle):
current_layer = registry.mapLayer('v_limites_communales20170621142702660')
if current_layer is not None:
filter = current_layer.subsetString()
if echelle == 'com':
newFilter= re.sub('&.*&', '&*/trigram=\''+com+'\'/*&' , filter, flags=re.DOTALL)
current_layer.setSubsetString(newFilter)
qgis.utils.iface.mapCanvas().setExtent(current_layer.extent())
newFilter= re.sub('&.*&', '&*/trigram is not null/*&' , filter, flags=re.DOTALL)
current_layer.setSubsetString(newFilter)
# filtre pour les couches Zonage
newFilterZ= re.sub('&.*&', '&*/iddocumenturba=\''+iddoc+'\'/*&' , filter, flags=re.DOTALL)
# filtre pour les couches ponctuelles
newFilterP= re.sub('&.*&', '&*/iddocumenturba=\''+iddoc+'\' AND geom_pct <> \'\'/*&' , filter, flags=re.DOTALL)
# filtre pour les couches linéaires
newFilterL= re.sub('&.*&', '&*/iddocumenturba=\''+iddoc+'\' AND geom_lin <> \'\'/*&' , filter, flags=re.DOTALL)
# filtre pour les couches surfaciques
newFilterS= re.sub('&.*&', '&*/iddocumenturba=\''+iddoc+'\' AND geom_surf <> \'\'/*&' , filter, flags=re.DOTALL)
# filtre pour les couches textuelles
newFilterT= re.sub('&.*&', '&*/iddocumenturba=\''+iddoc+'\' AND geom_txt <> \'\'/*&' , filter, flags=re.DOTALL)
#Zonage
current_layer = registry.mapLayer('v_zoneurba_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterZ)
current_layer.setName('ZONAGE:'+com+'_P:'+num_proj+'_V:'+num_version)
#Zonage *suppr*
current_layer = registry.mapLayer('v_zoneurba_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterZ)
current_layer.setName('ZONAGE *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Prescriptions PCT
current_layer = registry.mapLayer('v_prescription_pct_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterP)
current_layer.setName('PRESCRIPTIONS PCT:'+com+'_P:'+num_proj+'_V:'+num_version)
#Prescriptions PCT *suppr*
current_layer = registry.mapLayer('v_prescription_pct_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterP)
current_layer.setName('PRESCRIPTIONS PCT *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Prescriptions LIN
current_layer = registry.mapLayer('v_prescription_lin_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterL)
current_layer.setName('PRESCRIPTIONS LIN:'+com+'_P:'+num_proj+'_V:'+num_version)
#Prescriptions LIN *suppr*
current_layer = registry.mapLayer('v_prescription_lin_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterL)
current_layer.setName('PRESCRIPTIONS LIN *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Prescriptions SURF
current_layer = registry.mapLayer('v_prescription_surf_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterS)
current_layer.setName('PRESCRIPTIONS SURF:'+com+'_P:'+num_proj+'_V:'+num_version)
#Prescriptions SURF *suppr*
current_layer = registry.mapLayer('v_prescription_surf_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterS)
current_layer.setName('PRESCRIPTIONS SURF *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Infos PCT
current_layer = registry.mapLayer('v_information_pct_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterP)
current_layer.setName('INFOS PCT:'+com+'_P:'+num_proj+'_V:'+num_version)
#Infos PCT *suppr*
current_layer = registry.mapLayer('v_information_pct_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterP)
current_layer.setName('INFOS PCT *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Infos LIN
current_layer = registry.mapLayer('v_information_lin_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterL)
current_layer.setName('INFOS LIN:'+com+'_P:'+num_proj+'_V:'+num_version)
#Infos LIN *suppr*
current_layer = registry.mapLayer('v_information_lin_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterL)
current_layer.setName('INFOS LIN *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Infos SURF
current_layer = registry.mapLayer('v_information_surf_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterS)
current_layer.setName('INFOS SURF:'+com+'_P:'+num_proj+'_V:'+num_version)
#Infos SURF *suppr*
current_layer = registry.mapLayer('v_information_surf_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterS)
current_layer.setName('INFOS SURF *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Habillage TXT
current_layer = registry.mapLayer('v_habillage_txt_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterT)
current_layer.setName('HABILLAGE TXT:'+com+'_P:'+num_proj+'_V:'+num_version)
#Habillage TXT *suppr*
current_layer = registry.mapLayer('v_habillage_txt_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterT)
current_layer.setName('HABILLAGE TXT *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Habillage PCT
current_layer = registry.mapLayer('v_habillage_pct_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterP)
current_layer.setName('HABILLAGE PCT:'+com+'_P:'+num_proj+'_V:'+num_version)
#Habillage PCT *suppr*
current_layer = registry.mapLayer('v_habillage_pct_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterP)
current_layer.setName('HABILLAGE PCT *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Habillage LIN
current_layer = registry.mapLayer('v_habillage_lin_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterL)
current_layer.setName('HABILLAGE LIN :'+com+'_P:'+num_proj+'_V:'+num_version)
#Habillage LIN *suppr*
current_layer = registry.mapLayer('v_habillage_lin_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterL)
current_layer.setName('HABILLAGE LIN *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
#Habillage SURF
current_layer = registry.mapLayer('v_habillage_surf_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterS)
current_layer.setName('HABILLAGE SURF:'+com+'_P:'+num_proj+'_V:'+num_version)
#Habillage SURF *suppr*
current_layer = registry.mapLayer('v_habillage_surf_suppr_layer_' + echelle)
if current_layer is not None:
current_layer.setSubsetString(newFilterS)
current_layer.setName('HABILLAGE SURF *suppr*:'+com+'_P:'+num_proj+'_V:'+num_version)
# rafraichit le style des prescriptions / informations en fonction de la version affichée
def load_layer_style(qgis, cnig, echelle, env):
current_layer = registry.mapLayer('v_prescription_surf_layer_' + echelle)
style_manager = current_layer.styleManager()
style_manager.removeStyle('plui') # Supprime l'éventuel style PLUI déjà chargé auparavant
current_layer = registry.mapLayer('v_prescription_lin_layer_' + echelle)
style_manager = current_layer.styleManager()
style_manager.removeStyle('plui') # Supprime l'éventuel style PLUI déjà chargé auparavant
current_layer = registry.mapLayer('v_prescription_pct_layer_' + echelle)
style_manager = current_layer.styleManager()
style_manager.removeStyle('plui') # Supprime l'éventuel style PLUI déjà chargé auparavant
current_layer = registry.mapLayer('v_information_surf_layer_' + echelle)
style_manager = current_layer.styleManager()
style_manager.removeStyle('plui') # Supprime l'éventuel style PLUI déjà chargé auparavant
current_layer = registry.mapLayer('v_information_lin_layer_' + echelle)
style_manager = current_layer.styleManager()
style_manager.removeStyle('plui') # Supprime l'éventuel style PLUI déjà chargé auparavant
current_layer = registry.mapLayer('v_information_pct_layer_' + echelle)
style_manager = current_layer.styleManager()
style_manager.removeStyle('plui') # Supprime l'éventuel style PLUI déjà chargé auparavant
with psycopg2.connect("service=bd_prod") as cnx:
with cnx.cursor() as cursor:
sql = 'SELECT layerid, stylename FROM urba_plu_' + enviro_versionning + '.corresp_layer_styles WHERE cnig = \'' + cnig + '\''
#QgsMessageLog.logMessage('sql : ' + sql, 'Python', 0)
cursor.execute(sql)
result = cursor.fetchall()
#QgsMessageLog.logMessage('type(result) : ' + str(type(result)), 'Python', 0)
if len(result) > 0:
for current_style in result:
layerid, stylename = current_style
#QgsMessageLog.logMessage('layerid : ' + layerid + ', stylename : ' + stylename, 'Python', 0)
sql = 'SELECT styleqml FROM public.layer_styles WHERE f_table_schema = \'urba_plu_' + enviro_versionning + '\' AND f_table_name = \'' + layerid + '\' AND stylename = \'' + stylename + '\''
#QgsMessageLog.logMessage('sql : ' + sql, 'Python', 0)
cursor.execute(sql)
# Modification, a été remplacé fetchall() par fetchone() le 24/04/18
result2 = cursor.fetchone()
#if len(result2) > 0:
try:
styleqml = result2[0]
#QgsMessageLog.logMessage('styleqml : ' + str(styleqml).decode("utf-8"), 'Python', 0)
current_layer = registry.mapLayer(layerid + '_layer_' + echelle)
style_manager = current_layer.styleManager()
#QgsMessageLog.logMessage('style courant : ' + style_manager.currentStyle(), 'Python', 0)
#style_manager.removeStyle('plui') # Supprime l'éventuel style PLUI déjà chargé auparavant
style_manager.addStyle('plui', qgis.core.QgsMapLayerStyle(str(styleqml).decode("utf-8"))) # Ajoute le nouveau style
style_manager.setCurrentStyle('plui') # Applique ce style sur la couche
qgis.utils.iface.messageBar().pushMessage("Info", u'Style mis à jour sur la couche ' + layerid, level=QgsMessageBar.INFO, duration=3)
except:
#qgis.utils.iface.messageBar().pushMessage("Info", u'Une correspondance de style a été trouvé mais le style est absent de la table public.layer_styles', level=QgsMessageBar.INFO, duration=4)
QMessageBox.warning(qgis.utils.iface.mainWindow(), u"Erreur(s)", u'Une correspondance de style a été trouvée dans la table urba_plu_' + enviro_versionning + '.corresp_layer_styles <br> mais le style est absent de la table public.layer_styles <br> layerid : <b>' + layerid + '</b>, stylename : <b>' + stylename + '</b>')
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Pas de style spécifique trouvé pour cette version', level=QgsMessageBar.INFO, duration=2)
# Chargement du contexte utilisateur
def load_user_context(qgis, iddoc):
sql = 'INSERT INTO urba_plu_' + enviro_versionning + '.v_user_context (current_version) VALUES (\'' + iddoc + '\')'
execute_request(qgis, sql, 'Afficher la version')
########################### LES ACTIONS #############################
# Affiche la version sélectionnée
def action_afficher(qgis, feature):
#récupération des données
iddoc = feature['version']
com = feature['com']
num_proj = feature['num_proj']
num_version = feature['num_version']
cnig = feature['code_cnig']
QgsExpressionContextUtils.setProjectVariable('version_courante_plui',iddoc)
QgsExpressionContextUtils.setProjectVariable('code_cnig_courant_plui',cnig)
# Chargement du contexte utilisateur
load_user_context(qgis, iddoc)
# ZOOM et rafraichissement du style des couches du projet depuis la table layer_styles (seulement pour prescriptions et informations)
if com != 'GAM':
zoom_rename(qgis, iddoc, com, num_proj, num_version, 'com')
if int(num_proj) >= 400 and cnig is not None:
load_layer_style(qgis, cnig, 'com', enviro_versionning)
else:
zoom_rename(qgis, iddoc, com, num_proj, num_version, 'epci')
if int(num_proj) >= 400 and cnig is not None:
load_layer_style(qgis, cnig, 'epci', enviro_versionning)
###############################################################################################################################
# Affiche la version mère
def action_afficher_la_version_mere(qgis, feature):
version_mere = feature['version_mere']
com = version_mere[:3]
num_proj = version_mere[4:7]
num_version = version_mere[-2:]
QgsExpressionContextUtils.setProjectVariable('version_courante_plui',version_mere)
# ZOOM
if com != 'GAM':
zoom_rename(qgis, version_mere, com, num_proj, num_version, 'com')
else:
zoom_rename(qgis, version_mere, com, num_proj, num_version, 'epci')
###############################################################################################################################
# Duplique ou met à jour le statut de version en fonction de l'opération renseignée
#
def action_operation_version(qgis, operation, feature):
version = feature['version']
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir <b>' + operation + '</b> la version \'' + version + '\' ? Dans l\'environnement de ' + enviro_versionning, QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.' + operation + '_version(\'' + version + '\');'
execute_request(qgis, sql, operation + ' la version')
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la version ' + version + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
###############################################################################################################################
def figer_version(qgis, feature):
try :
version = feature['version']
#QgsMessageLog.logMessage('version :' + version, 'Python')
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.figer_version(\'' + version + '\');'
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir <b>FIGER</b> la version \'' + version + '\' ? dans l\'environnement ' + enviro_versionning , QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
# connexion à la base
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
#commit + fermeture de connexion
cur.execute(sql)
conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la version ' + version + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de changer le statut de cette version : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
###############################################################################################################################
def valider_version(qgis, feature):
try :
version = feature['version']
#QgsMessageLog.logMessage('version :' + version, 'Python')
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.valider_version(\'' + version + '\');'
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'Attention ... ATTENTION !', u'CECI EST IRREVERSIBLE !!! Etes-vous sûr de vouloir <b>VALIDER</b> la version \'' + version + '\' ? dans l\'environnement ' + enviro_versionning , QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
# connexion à la base
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
#commit + fermeture de connexion
cur.execute(sql)
conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la version ' + version + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de changer le statut de cette version : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
###############################################################################################################################
def dupliquer_la_version(qgis, feature):
try :
version = feature['version']
#QgsMessageLog.logMessage('version : ' + version, 'Python', 0)
if version == 'GAM_000_00':
qgis.utils.iface.messageBar().pushMessage("Attention", u'Cette version ne peut pas être dupliquée', level=QgsMessageBar.WARNING)
else:
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir <b>DUPLIQUER</b> la version \'' + version + '\' ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
insee = feature['insee']
com = feature['com']
num_proj = feature['num_proj']
if num_proj == '000':
new_num_proj = '001'
else:
new_num_proj = feature['num_proj']
#else:
#A MODIFIER POUR LE CHOIX D UN PROJET !!! avec combobox ?
#NON on dupplique une version du même projet sauf si 000
# SANS COMBOBOX dupliquer la version uniquement dans le tableau des versions figées
# NON PLUS car version de référence à duppliquer !
num_version = feature['num_version']
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
# récupération du num de version max
sql = 'SELECT max(num_version::integer) FROM urba_plu_' + enviro_versionning + '.versionning_plui WHERE com=\''+com+'\' AND num_proj=\''+new_num_proj+'\';'
QgsMessageLog.logMessage('sql : ' + sql, 'Python', 0)
cur.execute(sql)
new_num_version = str(int(cur.fetchall()[0][0])+1).zfill(2)
new_version = com + '_' + new_num_proj + '_' + new_num_version
QgsMessageLog.logMessage('new_version : ' + new_version, 'Python', 0)
# appel de la fonction initialisation_version
sql = 'SELECT urba_plu_' + enviro_versionning + '.dupliquer_version(\'' + version + '\' , \'' + new_version + '\');'
QgsMessageLog.logMessage('sql : ' + sql, 'Python', 0)
execute_request(qgis, sql, 'Dupliquer la version')
# Chargement du contexte utilisateur
load_user_context(qgis, new_version)
# Affichage de la nouvelle version
QgsExpressionContextUtils.setProjectVariable('version_courante_plui',new_version)
# ZOOM
if com != 'GAM':
zoom_rename(qgis, new_version, com, num_proj, new_num_version, 'com')
else:
zoom_rename(qgis, new_version, com, num_proj, new_num_version, 'epci')
#qgis.utils.iface.showAttributeTable('v_versionning_plui_0220170209182519905')
qgis.utils.iface.messageBar().pushMessage("Info", u'Nouvelle version générée : commune : ' + com + u' / projet : ' + new_num_proj + u' / version : ' + new_num_version, level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de créer une nouvelle version : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
###############################################################################################################################
def dupliquer_le_projet(qgis, feature):
try :
version = feature['version']
#QgsMessageLog.logMessage('version : ' + version, 'Python', 0)
if version == 'GAM_000_00':
qgis.utils.iface.messageBar().pushMessage("Attention", u'Cette version ne peut pas être dupliquée', level=QgsMessageBar.WARNING)
else:
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir <b>DUPLIQUER LE PROJET</b> \'' + version + '\' ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
insee = feature['insee']
com = feature['com']
num_proj = feature['num_proj']
num_version = feature['num_version']
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
# récupération du num de projet max
if int(num_proj) < 400:
sql = 'SELECT max(num_proj::integer) FROM urba_plu_' + enviro_versionning + '.versionning_plui WHERE num_proj::integer < 400;'
elif int(num_proj) >= 400 and int(num_proj) < 800:
sql = 'SELECT max(num_proj::integer) FROM urba_plu_' + enviro_versionning + '.versionning_plui WHERE num_proj::integer >= 400 AND num_proj::integer < 800;'
else:
# Pour les informations
sql = 'SELECT max(num_proj::integer) FROM urba_plu_' + enviro_versionning + '.versionning_plui WHERE num_proj::integer >= 800;'
QgsMessageLog.logMessage('requete : ' + sql, 'Python', 0)
cur.execute(sql)
new_num_proj = str(int(cur.fetchall()[0][0])+1).zfill(3)
#QgsMessageLog.logMessage('new projet : ' + new_num_proj, 'Python', 0)
new_num_version = '01'
new_version = com + '_' + new_num_proj + '_' + new_num_version
QgsMessageLog.logMessage('new_version : ' + new_version, 'Python', 0)
# appel de la fonction initialisation_version
sql = 'SELECT urba_plu_' + enviro_versionning + '.dupliquer_version(\'' + version + '\' , \'' + new_version + '\');'
execute_request(qgis, sql, 'Dupliquer le projet')
#Affichage de la nouvelle version
QgsExpressionContextUtils.setProjectVariable('version_courante_plui',new_version)
# Chargement du contexte utilisateur
load_user_context(qgis, new_version)
# ZOOM
if com != 'GAM':
zoom_rename_com(qgis, new_version, com, new_num_proj, new_num_version)
else:
zoom_rename_epci(qgis, new_version, com, new_num_proj, new_num_version)
#qgis.utils.iface.showAttributeTable('v_versionning_plui_0220170209182519905')
qgis.utils.iface.messageBar().pushMessage("Info", u'Nouvelle version générée : commune : ' + com + u' / projet : ' + new_num_proj + u' / version : ' + new_num_version, level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de créer un nouveau projet : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
################################################################# SUPPRIMER UNE VERSION ##############################################################
def supprimer_la_version(qgis, feature):
try :
version = feature['version']
#QgsMessageLog.logMessage('version :' + version, 'Python')
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir <b>SUPPRIMER DEFINITIVEMENT</b> la version \'' + version + '\' ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
# connexion à la base
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.delete_ensemble_doc_version(\'' + version + '\');'
#commit + fermeture de connexion
cur.execute(sql)
conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la version ' + version + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de changer le statut de cette version : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
################################################################## SOUMETTRE UNE REUNION #############################################################
def soumettre_reunion(qgis, feature):
try :
# récupération des attributs de la version cliquée
version = feature['version']
#TESTS COMBO BOX
#d = QDialog(qgis.utils.iface.mainWindow())
#c = QComboBox(d)
#c.addItems(['version1','version2','version3'])
#d.show()
#result = d.exec_()
#QgsMessageLog.logMessage('version :' + version, 'Python')
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir <b>SOUMETTRE</b> la version \'' + version + '\' ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
# connexion à la base
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.soumettre_version(\'' + version + '\');'
#commit + fermeture de connexion
cur.execute(sql)
conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la version ' + version + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de changer le statut de cette version : '+ str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
################################################################## ENLEVER UNE VERSION D UNE REUNION #############################################################
def enlever_reunion(qgis, feature):
try :
# récupération des attributs de la version cliquée
version = feature['version']
#QgsMessageLog.logMessage('version :' + version, 'Python')
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de <b>NE PLUS VOULOIR SOUMETTRE</b> la version \'' + version + '\' ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
# connexion à la base
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.enlever_version(\'' + version + '\');'
#commit + fermeture de connexion
cur.execute(sql)
conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la version ' + version + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de changer le statut de cette version : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
################################################################## AFFICHER LA LISTE DES VERSIONS POUR UNE REUNION #############################################################
def afficher_version_reunion(qgis, feature):
commune = feature['com']
v_presentees = feature['v_presentees']
v_retenues = feature['v_retenues']
v_non_retenues = feature['v_non_retenues']
v_validees = feature['v_validees']
date_reu = feature['date_reu']
if commune == 'GAM':
trigram = 'epci'
else:
trigram = 'com'
# requete pour la couche versions PRESENTEES
versionning_layer = registry.mapLayer('v_planning_plui_pres_layer_' + trigram )
filter = versionning_layer.subsetString()
if v_presentees == None:
#qgis.utils.iface.messageBar().pushMessage("Info", u'Pas de version présentée pour cette réunion', level=QgsMessageBar.WARNING)
versionStringP = 'IS NULL'
else:
versionStringP = 'IN ' + v_presentees.replace('{','(\'').replace('}','\')').replace(',','\',\'')
newFilter= re.sub('&.*&', '&*/"version" '+ versionStringP +'/*&' , filter, flags=re.DOTALL)
versionning_layer.setSubsetString(newFilter)
versionning_layer.setName(u'Versions PRESENTEES à la réunion '+commune+' du '+date_reu.toString("dd/MM/yyyy"))
#qgis.utils.iface.showAttributeTable(versionning_layer)
# requete pour la couche versions RETENUES
versionning_layer = registry.mapLayer('v_planning_plui_ret_layer_' + trigram)
filter = versionning_layer.subsetString()
if v_retenues == None:
#qgis.utils.iface.messageBar().pushMessage("Info", u'Pas de version retenue pour cette réunion', level=QgsMessageBar.WARNING)
versionStringR = 'IS NULL'
else:
versionStringR = 'IN ' + v_retenues.replace('{','(\'').replace('}','\')').replace(',','\',\'')
newFilter= re.sub('&.*&', '&*/"version" '+ versionStringR +'/*&' , filter, flags=re.DOTALL)
versionning_layer.setSubsetString(newFilter)
versionning_layer.setName(u'Versions RETENUES à la réunion '+commune+' du '+date_reu.toString("dd/MM/yyyy"))
#qgis.utils.iface.showAttributeTable(versionning_layer)
# requete pour la couche versions REJETEES
versionning_layer = registry.mapLayer('v_planning_plui_rej_layer_' + trigram)
filter = versionning_layer.subsetString()
if v_non_retenues == None:
#qgis.utils.iface.messageBar().pushMessage("Info", u'Pas de version non retenue pour cette réunion', level=QgsMessageBar.WARNING)
versionStringNR = 'IS NULL'
else:
versionStringNR = 'IN ' + v_non_retenues.replace('{','(\'').replace('}','\')').replace(',','\',\'')
newFilter= re.sub('&.*&', '&*/"version" '+ versionStringNR +'/*&' , filter, flags=re.DOTALL)
versionning_layer.setSubsetString(newFilter)
versionning_layer.setName(u'Versions REJETEES à la réunion '+commune+' du '+date_reu.toString("dd/MM/yyyy"))
#qgis.utils.iface.showAttributeTable(versionning_layer)
# requete pour la couche versions VALIDEES
versionning_layer = registry.mapLayer('v_planning_plui_val_layer_' + trigram)
filter = versionning_layer.subsetString()
if v_validees == None:
#qgis.utils.iface.messageBar().pushMessage("Info", u'Pas de version validees pour cette réunion', level=QgsMessageBar.WARNING)
versionStringNR = 'IS NULL'
else:
versionStringNR = 'IN ' + v_validees.replace('{','(\'').replace('}','\')').replace(',','\',\'')
newFilter= re.sub('&.*&', '&*/"version" '+ versionStringNR +'/*&' , filter, flags=re.DOTALL)
versionning_layer.setSubsetString(newFilter)
versionning_layer.setName(u'Version VALIDEE à la réunion '+commune+' du '+date_reu.toString("dd/MM/yyyy"))
#qgis.utils.iface.showAttributeTable(versionning_layer)
################################################################## POST REUNION : REJETER LA VERSION #############################################################
def rejeter_reunion(qgis, feature):
try :
# récupération des attributs de la version cliquée
version = feature['version']
#QgsMessageLog.logMessage('version :' + version, 'Python')
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir <b>REJETER</b> la version \'' + version + '\' ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
# connexion à la base
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.rejeter_version(\'' + version + '\');'
#commit + fermeture de connexion
cur.execute(sql)
conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la version ' + version + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de changer le statut de cette version : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
################################################################## POST REUNION : RETENIR LA VERSION #############################################################
def retenir_reunion(qgis, feature):
try :
# récupération des attributs de la version cliquée
version = feature['version']
#QgsMessageLog.logMessage('version :' + version, 'Python')
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir <b>RETENIR</b> la version \'' + version + '\' ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
# connexion à la base
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.retenir_version(\'' + version + '\');'
#commit + fermeture de connexion
cur.execute(sql)
conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la version ' + version + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de changer le statut de cette version : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
################################################################## POST REUNION : VALIDER LA VERSION #############################################################
def valider_reunion(qgis, feature):
try :
# récupération des attributs de la version cliquée
version = feature['version']
#QgsMessageLog.logMessage('version :' + version, 'Python')
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir <b>VALIDER</b> la version \'' + version + '\' ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
# connexion à la base
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.valider_version(\'' + version + '\');'
#commit + fermeture de connexion
cur.execute(sql)
conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la version ' + version + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de changer le statut de cette version : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
################################################################## PRE REUNION : AFFECTER LES VERSIONS CANDIDATES #############################################################
def affecter_candidates(qgis, feature):
try :
# récupération des attributs de la version cliquée
com = feature['com']
reunion = feature['ogc_fid']
date_reu = feature['date_reu']
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir AFFECTER LES VERSIONS CANDIDATES à cette réunion ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
sql = 'SELECT urba_plu_' + enviro_versionning + '.affecter_versions_avant_reu(\'' + com + '\', \'' + str(reunion) + '\', \'' + date_reu.toString("dd/MM/yyyy") +'\');'
QgsMessageLog.logMessage('sql :' + sql, 'Python')
cur.execute(sql)
conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la reunion ' + str(reunion) + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except :
qgis.utils.iface.messageBar().pushMessage("Erreur", u"Impossible d'effectuer l'action sur cette réunion", level=QgsMessageBar.CRITICAL)
raise
################################################################## POST REUNION : AFFECTER LES VERSIONS RETENUES REJETEES VALIDEES #############################################################
def affecter_retenues_oupas(qgis, feature):
try :
# récupération des attributs de la version cliquée
com = feature['com']
reunion = feature['ogc_fid']
reply = QMessageBox.question(qgis.utils.iface.mainWindow(), 'ATTENTION', u'Etes-vous sûr de vouloir AFFECTER LES VERSIONS RETENUES ET REJETEES à cette réunion ?', QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.Yes:
conn = psycopg2.connect("service=bd_prod")
cur = conn.cursor()
sql = 'SELECT urba_plu_' + enviro_versionning + '.affecter_versions_apres_reu(\'' + com + '\', ' + str(reunion) + ');'
cur.execute(sql)
#conn.commit()
cur.close()
qgis.utils.iface.messageBar().pushMessage("Info", u'Action sur la reunion ' + str(reunion) + u' terminée avec succès', level=QgsMessageBar.INFO)
else:
qgis.utils.iface.messageBar().pushMessage("Info", u'Action annulée', level=QgsMessageBar.INFO, duration=2)
except :
qgis.utils.iface.messageBar().pushMessage("Erreur", u"Impossible d'effectuer l'action sur cette réunion", level=QgsMessageBar.CRITICAL)
raise
################################################################## VERIFIER SI DES ENTITES N'ONT PAS DE GEOMETRIE #############################################################
def verifier_geometries(qgis, feature):
try:
# récupération des attributs de la version cliquée
version = feature['version']
with psycopg2.connect("service=bd_prod") as cnx:
with cnx.cursor() as cursor:
#requete SQL
sql = 'SELECT urba_plu_' + enviro_versionning + '.check_geoms(\'' + version + '\');'
cursor.execute(sql)
result = cursor.fetchall()[0][0]
if result == 'OK':
QMessageBox.information(qgis.utils.iface.mainWindow(), u"Info", 'Tout est OK !')
else:
QMessageBox.warning(qgis.utils.iface.mainWindow(), u"Erreur(s)", result)
except Exception, e:
qgis.utils.iface.messageBar().pushMessage("Erreur", u'Impossible de vérifier les géométries : ' + str(e).decode("utf-8"), level=QgsMessageBar.CRITICAL)
|
gpl-3.0
|
phektus/Django-Google-AppEngine-OpenId-Auth
|
django/contrib/gis/db/backends/postgis/operations.py
|
292
|
25840
|
import re
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.db.backends.base import BaseSpatialOperations
from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.base import DatabaseOperations
from django.db.utils import DatabaseError
#### Classes used in constructing PostGIS spatial SQL ####
class PostGISOperator(SpatialOperation):
"For PostGIS operators (e.g. `&&`, `~`)."
def __init__(self, operator):
super(PostGISOperator, self).__init__(operator=operator)
class PostGISFunction(SpatialFunction):
"For PostGIS function calls (e.g., `ST_Contains(table, geom)`)."
def __init__(self, prefix, function, **kwargs):
super(PostGISFunction, self).__init__(prefix + function, **kwargs)
class PostGISFunctionParam(PostGISFunction):
"For PostGIS functions that take another parameter (e.g. DWithin, Relate)."
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)'
class PostGISDistance(PostGISFunction):
"For PostGIS distance operations."
dist_func = 'Distance'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s'
def __init__(self, prefix, operator):
super(PostGISDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSpheroidDistance(PostGISFunction):
"For PostGIS spherical distance operations (using the spheroid)."
dist_func = 'distance_spheroid'
sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s) %(operator)s %%s'
def __init__(self, prefix, operator):
# An extra parameter in `end_subst` is needed for the spheroid string.
super(PostGISSpheroidDistance, self).__init__(prefix, self.dist_func,
operator=operator)
class PostGISSphereDistance(PostGISDistance):
"For PostGIS spherical distance operations."
dist_func = 'distance_sphere'
class PostGISRelate(PostGISFunctionParam):
"For PostGIS Relate(<geom>, <pattern>) calls."
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def __init__(self, prefix, pattern):
if not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
super(PostGISRelate, self).__init__(prefix, 'Relate')
class PostGISOperations(DatabaseOperations, BaseSpatialOperations):
compiler_module = 'django.contrib.gis.db.models.sql.compiler'
name = 'postgis'
postgis = True
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
valid_aggregates = dict([(k, None) for k in
('Collect', 'Extent', 'Extent3D', 'MakeLine', 'Union')])
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
try:
if hasattr(settings, 'POSTGIS_VERSION'):
vtup = settings.POSTGIS_VERSION
if len(vtup) == 3:
# The user-supplied PostGIS version.
version = vtup
else:
# This was the old documented way, but it's stupid to
# include the string.
version = vtup[1:4]
else:
vtup = self.postgis_version_tuple()
version = vtup[1:]
# Getting the prefix -- even though we don't officially support
# PostGIS 1.2 anymore, keeping it anyway in case a prefix change
# for something else is necessary.
if version >= (1, 2, 2):
prefix = 'ST_'
else:
prefix = ''
self.geom_func_prefix = prefix
self.spatial_version = version
except DatabaseError:
raise ImproperlyConfigured('Cannot determine PostGIS version for database "%s". '
'GeoDjango requires at least PostGIS version 1.3. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
except Exception, e:
# TODO: Raise helpful exceptions as they become known.
raise
# PostGIS-specific operators. The commented descriptions of these
# operators come from Section 7.6 of the PostGIS 1.4 documentation.
self.geometry_operators = {
# The "&<" operator returns true if A's bounding box overlaps or
# is to the left of B's bounding box.
'overlaps_left' : PostGISOperator('&<'),
# The "&>" operator returns true if A's bounding box overlaps or
# is to the right of B's bounding box.
'overlaps_right' : PostGISOperator('&>'),
# The "<<" operator returns true if A's bounding box is strictly
# to the left of B's bounding box.
'left' : PostGISOperator('<<'),
# The ">>" operator returns true if A's bounding box is strictly
# to the right of B's bounding box.
'right' : PostGISOperator('>>'),
# The "&<|" operator returns true if A's bounding box overlaps or
# is below B's bounding box.
'overlaps_below' : PostGISOperator('&<|'),
# The "|&>" operator returns true if A's bounding box overlaps or
# is above B's bounding box.
'overlaps_above' : PostGISOperator('|&>'),
# The "<<|" operator returns true if A's bounding box is strictly
# below B's bounding box.
'strictly_below' : PostGISOperator('<<|'),
# The "|>>" operator returns true if A's bounding box is strictly
# above B's bounding box.
'strictly_above' : PostGISOperator('|>>'),
# The "~=" operator is the "same as" operator. It tests actual
# geometric equality of two features. So if A and B are the same feature,
# vertex-by-vertex, the operator returns true.
'same_as' : PostGISOperator('~='),
'exact' : PostGISOperator('~='),
# The "@" operator returns true if A's bounding box is completely contained
# by B's bounding box.
'contained' : PostGISOperator('@'),
# The "~" operator returns true if A's bounding box completely contains
# by B's bounding box.
'bbcontains' : PostGISOperator('~'),
# The "&&" operator returns true if A's bounding box overlaps
# B's bounding box.
'bboverlaps' : PostGISOperator('&&'),
}
self.geometry_functions = {
'equals' : PostGISFunction(prefix, 'Equals'),
'disjoint' : PostGISFunction(prefix, 'Disjoint'),
'touches' : PostGISFunction(prefix, 'Touches'),
'crosses' : PostGISFunction(prefix, 'Crosses'),
'within' : PostGISFunction(prefix, 'Within'),
'overlaps' : PostGISFunction(prefix, 'Overlaps'),
'contains' : PostGISFunction(prefix, 'Contains'),
'intersects' : PostGISFunction(prefix, 'Intersects'),
'relate' : (PostGISRelate, basestring),
}
# Valid distance types and substitutions
dtypes = (Decimal, Distance, float, int, long)
def get_dist_ops(operator):
"Returns operations for both regular and spherical distances."
return {'cartesian' : PostGISDistance(prefix, operator),
'sphere' : PostGISSphereDistance(prefix, operator),
'spheroid' : PostGISSpheroidDistance(prefix, operator),
}
self.distance_functions = {
'distance_gt' : (get_dist_ops('>'), dtypes),
'distance_gte' : (get_dist_ops('>='), dtypes),
'distance_lt' : (get_dist_ops('<'), dtypes),
'distance_lte' : (get_dist_ops('<='), dtypes),
}
# Versions 1.2.2+ have KML serialization support.
if version < (1, 2, 2):
ASKML = False
else:
ASKML = 'ST_AsKML'
self.geometry_functions.update(
{'coveredby' : PostGISFunction(prefix, 'CoveredBy'),
'covers' : PostGISFunction(prefix, 'Covers'),
})
self.distance_functions['dwithin'] = (PostGISFunctionParam(prefix, 'DWithin'), dtypes)
# Adding the distance functions to the geometries lookup.
self.geometry_functions.update(self.distance_functions)
# The union aggregate and topology operation use the same signature
# in versions 1.3+.
if version < (1, 3, 0):
UNIONAGG = 'GeomUnion'
UNION = 'Union'
MAKELINE = False
else:
UNIONAGG = 'ST_Union'
UNION = 'ST_Union'
MAKELINE = 'ST_MakeLine'
# Only PostGIS versions 1.3.4+ have GeoJSON serialization support.
if version < (1, 3, 4):
GEOJSON = False
else:
GEOJSON = prefix + 'AsGeoJson'
# ST_ContainsProperly ST_MakeLine, and ST_GeoHash added in 1.4.
if version >= (1, 4, 0):
GEOHASH = 'ST_GeoHash'
BOUNDINGCIRCLE = 'ST_MinimumBoundingCircle'
self.geometry_functions['contains_properly'] = PostGISFunction(prefix, 'ContainsProperly')
else:
GEOHASH, BOUNDINGCIRCLE = False, False
# Geography type support added in 1.5.
if version >= (1, 5, 0):
self.geography = True
# Only a subset of the operators and functions are available
# for the geography type.
self.geography_functions = self.distance_functions.copy()
self.geography_functions.update({
'coveredby' : self.geometry_functions['coveredby'],
'covers' : self.geometry_functions['covers'],
'intersects' : self.geometry_functions['intersects'],
})
self.geography_operators = {
'bboverlaps' : PostGISOperator('&&'),
}
# Creating a dictionary lookup of all GIS terms for PostGIS.
gis_terms = ['isnull']
gis_terms += self.geometry_operators.keys()
gis_terms += self.geometry_functions.keys()
self.gis_terms = dict([(term, None) for term in gis_terms])
self.area = prefix + 'Area'
self.bounding_circle = BOUNDINGCIRCLE
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.extent3d = prefix + 'Extent3D'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = GEOHASH
self.geojson = GEOJSON
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = ASKML
self.length = prefix + 'Length'
self.length3d = prefix + 'Length3D'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = MAKELINE
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points =prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.perimeter3d = prefix + 'Perimeter3D'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = UNION
self.unionagg = UNIONAGG
def check_aggregate_support(self, aggregate):
"""
Checks if the given aggregate name is supported (that is, if it's
in `self.valid_aggregates`).
"""
agg_name = aggregate.__class__.__name__
return agg_name in self.valid_aggregates
def convert_extent(self, box):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returnded by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given geometry field.
Typically this is `None` because geometry columns are added via
the `AddGeometryColumn` stored procedure, unless the field
has been specified to be of geography type instead.
"""
if f.geography:
if not self.geography:
raise NotImplementedError('PostGIS 1.5 required for geography column support.')
if f.srid != 4326:
raise NotImplementedError('PostGIS 1.5 supports geography columns '
'only with an SRID of 4326.')
return 'geography(%s,%d)'% (f.geom_type, f.srid)
else:
return None
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the newly introduced geography column type introudced in PostGIS 1.5.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography and self.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
if value is None or value.srid == f.srid:
placeholder = '%s'
else:
# Adding Transform() to the SQL placeholder.
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'expression'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
placeholder = placeholder % '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression]))
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
cursor = self.connection._cursor()
try:
try:
cursor.execute('SELECT %s()' % func)
row = cursor.fetchone()
except:
# Responsibility of callers to perform error handling.
raise
finally:
# Close out the connection. See #9437.
self.connection.close()
return row[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def num_params(self, lookup_type, num_param):
"""
Helper routine that returns a boolean indicating whether the number of
parameters is correct for the lookup type.
"""
def exactly_two(np): return np == 2
def two_to_three(np): return np >= 2 and np <=3
if (lookup_type in self.distance_functions and
lookup_type != 'dwithin'):
return two_to_three(num_param)
else:
return exactly_two(num_param)
def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn):
"""
Constructs spatial SQL from the given lookup value tuple a
(alias, col, db_type), the lookup type string, lookup value, and
the geometry field.
"""
alias, col, db_type = lvalue
# Getting the quoted geometry column.
geo_col = '%s.%s' % (qn(alias), qn(col))
if lookup_type in self.geometry_operators:
if field.geography and not lookup_type in self.geography_operators:
raise ValueError('PostGIS geography does not support the '
'"%s" lookup.' % lookup_type)
# Handling a PostGIS operator.
op = self.geometry_operators[lookup_type]
return op.as_sql(geo_col, self.get_geom_placeholder(field, value))
elif lookup_type in self.geometry_functions:
if field.geography and not lookup_type in self.geography_functions:
raise ValueError('PostGIS geography type does not support the '
'"%s" lookup.' % lookup_type)
# See if a PostGIS geometry function matches the lookup type.
tmp = self.geometry_functions[lookup_type]
# Lookup types that are tuples take tuple arguments, e.g., 'relate' and
# distance lookups.
if isinstance(tmp, tuple):
# First element of tuple is the PostGISOperation instance, and the
# second element is either the type or a tuple of acceptable types
# that may passed in as further parameters for the lookup type.
op, arg_type = tmp
# Ensuring that a tuple _value_ was passed in from the user
if not isinstance(value, (tuple, list)):
raise ValueError('Tuple required for `%s` lookup type.' % lookup_type)
# Geometry is first element of lookup tuple.
geom = value[0]
# Number of valid tuple parameters depends on the lookup type.
nparams = len(value)
if not self.num_params(lookup_type, nparams):
raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type)
# Ensuring the argument type matches what we expect.
if not isinstance(value[1], arg_type):
raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1])))
# For lookup type `relate`, the op instance is not yet created (has
# to be instantiated here to check the pattern parameter).
if lookup_type == 'relate':
op = op(self.geom_func_prefix, value[1])
elif lookup_type in self.distance_functions and lookup_type != 'dwithin':
if not field.geography and field.geodetic(self.connection):
# Geodetic distances are only availble from Points to
# PointFields on PostGIS 1.4 and below.
if not self.connection.ops.geography:
if field.geom_type != 'POINT':
raise ValueError('PostGIS spherical operations are only valid on PointFields.')
if str(geom.geom_type) != 'Point':
raise ValueError('PostGIS geometry distance parameter is required to be of type Point.')
# Setting up the geodetic operation appropriately.
if nparams == 3 and value[2] == 'spheroid':
op = op['spheroid']
else:
op = op['sphere']
else:
op = op['cartesian']
else:
op = tmp
geom = value
# Calling the `as_sql` function on the operation instance.
return op.as_sql(geo_col, self.get_geom_placeholder(field, geom))
elif lookup_type == 'isnull':
# Handling 'isnull' lookup type
return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or ''))
raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type))
def spatial_aggregate_sql(self, agg):
"""
Returns the spatial aggregate SQL template and function for the
given Aggregate instance.
"""
agg_name = agg.__class__.__name__
if not self.check_aggregate_support(agg):
raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name)
agg_name = agg_name.lower()
if agg_name == 'union': agg_name += 'agg'
sql_template = '%(function)s(%(field)s)'
sql_function = getattr(self, agg_name)
return sql_template, sql_function
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.postgis.models import GeometryColumns
return GeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.postgis.models import SpatialRefSys
return SpatialRefSys
|
bsd-3-clause
|
petewarden/tensorflow_makefile
|
tensorflow/contrib/layers/python/layers/__init__.py
|
5
|
1484
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""layers module with higher level NN primitives."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.layers.python.layers.feature_column import *
from tensorflow.contrib.layers.python.layers.feature_column_ops import *
from tensorflow.contrib.layers.python.layers.initializers import *
from tensorflow.contrib.layers.python.layers.layers import *
from tensorflow.contrib.layers.python.layers.optimizers import *
from tensorflow.contrib.layers.python.layers.regularizers import *
from tensorflow.contrib.layers.python.layers.summaries import *
from tensorflow.contrib.layers.python.ops.bucketization_op import *
from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op import *
|
apache-2.0
|
yghannam/teuthology
|
docs/_static/create_nodes.py
|
9
|
1891
|
#!/usr/bin/env python
# A sample script that can be used while setting up a new teuthology lab
# This script will connect to the machines in your lab, and populate a
# paddles instance with their information.
#
# You WILL need to modify it.
import logging
import sys
from teuthology.orchestra.remote import Remote
from teuthology.lock import update_inventory
paddles_url = 'http://paddles.example.com/nodes/'
machine_type = 'typica'
lab_domain = 'example.com'
# Don't change the user. It won't work at this time.
user = 'ubuntu'
# We are populating 'typica003' -> 'typica192'
machine_index_range = range(3, 192)
log = logging.getLogger(sys.argv[0])
logging.getLogger("requests.packages.urllib3.connectionpool").setLevel(
logging.WARNING)
def get_shortname(machine_type, index):
"""
Given a number, return a hostname. Example:
get_shortname('magna', 3) = 'magna003'
Modify to suit your needs.
"""
return machine_type + str(index).rjust(3, '0')
def get_info(user, fqdn):
remote = Remote('@'.join((user, fqdn)))
return remote.inventory_info
def main():
shortnames = [get_shortname(machine_type, i) for i in machine_index_range]
fqdns = ['.'.join((name, lab_domain)) for name in shortnames]
for fqdn in fqdns:
log.info("Creating %s", fqdn)
base_info = dict(
name=fqdn,
locked=True,
locked_by='initial@setup',
machine_type=machine_type,
description="Initial node creation",
)
try:
info = get_info(user, fqdn)
base_info.update(info)
base_info['up'] = True
except Exception as exc:
log.error("{fqdn} is down".format(fqdn=fqdn))
base_info['up'] = False
base_info['description'] = repr(exc)
update_inventory(base_info)
if __name__ == '__main__':
main()
|
mit
|
siddharthv/sm
|
drivers/RawHBASR.py
|
10
|
15273
|
#!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# FIXME
# RawHBASR: Hardware HBA LUN driver, e.g. Fibre Channel or SAS or
# hardware based iSCSI
# FIXME
import B_util
import SR, VDI, SRCommand, HBASR, LUNperVDI
import util, scsiutil, devscan
import xs_errors
import os
CAPABILITIES = ["SR_PROBE", "VDI_ATTACH", "VDI_DETACH", "VDI_DELETE"]
CONFIGURATION = [ [ 'type', 'FIXME (optional)' ] ]
DRIVER_INFO = {
'name': 'RawHBA LUN-per-VDI driver',
'description': 'SR plugin which represents LUNs as VDIs sourced by hardware HBA adapters, FC support',
'vendor': 'Citrix Systems Inc',
'copyright': '(C) 2012 Citrix Systems Inc',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
NEEDS_LOADVDIS = ["sr_scan"]
TYPE = 'rawhba'
class RawHBASR(HBASR.HBASR):
""" Raw LUN-per-VDI HBA storage repository"""
def handles(type):
return type == TYPE
handles = staticmethod(handles)
def load(self, sr_uuid):
"""Extend super class load by initializing the hba dict.
FIXME: verify this is needed
"""
super(RawHBASR, self).load(sr_uuid)
# There are some calls (e.g. probe) where it is not needed at all
#self._init_hbadict()
self._get_stats()
# For this SR the internal self.attached flag hs no meaning
# at all. Xapi knows about its status and if it is
# not attached all the commands relying on that are not
# executed. This variable is used in various places inside the
# base class so we cannot ignore it. Setting it always to true
# will prevent sr.create and sr.scan to throw exceptions but
# they need to be fixed: attached is not the flag they should
# look for.
self.attached = True
def _loadvdis(self):
if self.cmd not in NEEDS_LOADVDIS:
return 0
if self.vdis:
return
self._init_hbadict()
count = 0
self.physical_size = 0
root_dev_id = util.getrootdevID()
xapi_session = self.session.xenapi
known_scsid = {} # dict of ids processed within the following loop
for key in self.hbadict.iterkeys():
# We need a fresh sm_config everytime because it is modified
# inside this loop
sm_config = xapi_session.SR.get_sm_config(self.sr_ref)
# The way we create vdi_path and the following check are
# not clear at all
vdi_path = os.path.join("/dev",key)
if not self.devs.has_key(vdi_path):
continue
scsi_id = scsiutil.getSCSIid(vdi_path)
if scsi_id == root_dev_id:
util.SMlog("Skipping root device %s" %scsi_id)
continue
# Avoid false positives: this SR can already contain this
# SCSIid during scan.
scsi_key = "scsi-" + scsi_id
if sm_config.has_key(scsi_key):
# if we know about this scsid we can skip this specific dev
if known_scsid.has_key(scsi_key):
util.SMlog("This SCSI id (%s) is already added" %scsi_id)
continue
else:
# marked as known to avoid adding it again to sm_config
known_scsid[scsi_key] = ""
elif util.test_SCSIid(self.session, None, scsi_id):
util.SMlog("This SCSI id (%s) is used by another SR" %scsi_id)
continue
# getuniqueserial invokes again getSCSIid -> Fix!
uuid = scsiutil.gen_uuid_from_string(
scsiutil.getuniqueserial(vdi_path)
)
# We could have checked the SCSIid but the dictionary has
# uuid as key.
# We have already checked known_scsid, though. This block is
# supposed to be always False
if self.vdis.has_key(uuid):
util.SMlog("Warning: unexpected code block reached with"
" uuid = %s" %scsi_id)
continue
obj = self.vdi(uuid)
path = self.mpathmodule.path(scsi_id)
ids = self.devs[vdi_path]
obj._query(vdi_path, ids[4], uuid, scsi_id)
self.vdis[uuid] = obj
self.physical_size += obj.size
count += 1
# If we know about it no need to add to sm_config
if known_scsid.has_key(scsi_key):
continue
# Prepare multipathing and make the other SRs know this SCSIid
# is reserved.
# Its counterpart is vdi_delete
try:
xapi_session.SR.add_to_sm_config(self.sr_ref, scsi_key, uuid)
known_scsid[scsi_key] = ""
except:
util.SMlog("Warning: add_to_sm_config failed unexpectedly")
return count
def scan(self, sr_uuid):
"""
This function is almost a copy of its base class equivalent.
Main differences are:
- Fixed erroneous size calculation
- Set VDI names automatically
- Avoid ScanRecord sync for missing VDIS (stale LUNs)
The last one is called in the sub-sub class so we cannot simply
extend the base class funcion but we need to override it
"""
self._init_hbadict()
if not self.passthrough:
if not self.attached:
raise xs_errors.XenError('SRUnavailable')
self._loadvdis()
# This block is almost SR.scan but without missing sync
self._db_update()
scanrecord = SR.ScanRecord(self)
scanrecord.synchronise_new()
scanrecord.synchronise_existing()
# Fixing sizes calculation
phys_util = 0
for key in self.vdis:
vdi_ref = self.session.xenapi.VDI.get_by_uuid(key)
if B_util.is_vdi_attached(self.session, vdi_ref):
phys_util += self.vdis[key].size
self._set_stats(phys_util=phys_util)
self._set_vdis_name()
def _set_vdis_name(self):
if not self.vdis:
return
for vdi_ref in self.session.xenapi.SR.get_VDIs(self.sr_ref):
vdi_uuid = self.session.xenapi.VDI.get_uuid(vdi_ref)
try:
vdi = self.vdis[vdi_uuid]
except:
util.SMlog("Cannot set name for for %s" %vdi_uuid)
continue
self.session.xenapi.VDI.set_name_label(vdi_ref, vdi.SCSIid)
def vdi(self, uuid):
return RawHBAVDI(self, uuid)
def _get_stats(self):
stats = self.get_stats()
self.physical_size = stats['physical_size']
self.physical_utilisation = stats['physical_utilisation']
self.virtual_allocation = stats['virtual_allocation']
def get_stats(self):
stats = {}
xapi_session = self.session.xenapi
sr_ref = xapi_session.SR.get_by_uuid(self.uuid)
stats['physical_size'] = int(xapi_session.SR.get_physical_size(sr_ref))
stats['physical_utilisation'] = int(xapi_session.SR.
get_physical_utilisation(sr_ref))
stats['virtual_allocation'] = int(xapi_session.SR.
get_virtual_allocation(sr_ref))
return stats
def _set_stats(self, phys_size=None, phys_util=None):
if phys_size != None:
self.physical_size = phys_size
if phys_util != None:
self.physical_utilisation = phys_util
self.virtual_allocation = phys_util
self._db_update()
def update_stats(self, phys_util):
new_util = self.physical_utilisation + phys_util
self._set_stats(phys_util=new_util)
def _add_pbd_other_config(self, key, value):
try:
pbd_ref = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
except:
util.SMlog("No pbd for sr_ref %s on host_ref %s"
%(self.sr_ref, self.host_ref))
return
try:
self.session.xenapi.PBD.add_to_other_config(pbd_ref, key, value)
except:
util.SMlog("add_to_other_config failed")
def attach(self, sr_uuid):
super(RawHBASR, self).attach(sr_uuid)
if self.mpath == 'true':
self._add_pbd_other_config('multipathed', 'true')
def _reset_pbd_other_config(self):
try:
pbd_ref = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
except:
util.SMlog("No pbd for sr_ref %s on host_ref %s"
%(self.sr_ref, self.host_ref))
for key in ["multipathed"]:
try:
self.session.xenapi.PBD.remove_from_other_config(pbd_ref, key)
except:
util.SMlog("remove_from_other_config failed")
def detach(self, sr_uuid):
""" Override base class function because we rely on xapi
to check if VDIs are in use.
Take care of removing multipath flags
"""
self._reset_pbd_other_config()
class RawHBAVDI(LUNperVDI.RAWVDI):
"""Customized LUN-per-VDI class fro RawHBA storage repository"""
def load(self, vdi_uuid):
super(RawHBAVDI, self).load(vdi_uuid)
self.managed = True
# _query can work in two ways, if scsi_id is provided when called, the
# fn gets all devices corresponding to the scsi_id and does a device rescan,
# else it just rescan the device being passed.
def _query(self, path, id, uuid=None, scsi_id=None):
"""Overloaded function with mostly duplicated code"""
if uuid:
self.uuid = uuid
else:
util.SMlog("RawHBA: uuid should not be generated..")
self.uuid = scsiutil.gen_uuid_from_string(
scsiutil.getuniqueserial(path)
)
if scsi_id:
self.SCSIid = scsi_id
else:
# It is usually unnecessary to calculate it again but scsi_id
# is used as a flag in this function and we cannot guarantee
# this info is already available at call time
self.SCSIid = scsiutil.getSCSIid(path)
self.location = self.uuid
self.vendor = scsiutil.getmanufacturer(path)
self.serial = scsiutil.getserial(path)
self.LUNid = id
# Handle resize done at the array size. The resize gets reflected
# only when the vdi is not in detached state. Do this if we the vdi
# is known to xapi
try:
vdi_ref = self.sr.session.xenapi.VDI.get_by_uuid(self.uuid)
# Check if the vbd is not in attached state, do a LUN rescan
# to reflect the array LUN
dev = [path]
if scsi_id:
# We want all the devices with this scsi_id
dev = scsiutil._genReverseSCSIidmap(scsi_id)
if self.sr.srcmd.cmd == "vdi_attach":
scsiutil.refreshdev(dev)
elif not B_util.is_vdi_attached(self.sr.session, vdi_ref):
scsiutil.refreshdev(dev)
except:
pass
self.size = scsiutil.getsize(path)
self.path = path
sm_config = util.default(self, "sm_config", lambda: {})
sm_config['LUNid'] = str(self.LUNid)
sm_config['SCSIid'] = self.SCSIid
# Make sure to use kernel blkback (not blktap3) for raw LUNs
sm_config['backend-kind'] = 'vbd'
self.sm_config = sm_config
def attach(self, sr_uuid, vdi_uuid):
# Perform a device scan for all paths, to check for any LUN resize
scsi_id = self.sm_config['SCSIid']
devices = scsiutil._genReverseSCSIidmap(scsi_id)
xapi_session = self.session.xenapi
# At the vdi attach stage if devices are not found against the scsi_id,
# the two reasons would be 1. The machine is slave on which a bus scan
# was not performed, perform a bus scan to rectify the same. 2. Genuine
# HBA bus error, throw an error back.
if len(devices) == 0:
devscan.adapters()
devices = scsiutil._genReverseSCSIidmap(scsi_id)
# If no devices are found after a bus rescan, flag an error
if len(devices) == 0:
raise xs_errors.XenError('InvalidDev', \
opterr=('No HBA Device detected with SCSI Id[%s]') % scsi_id)
# Run a query on devices against the scsi id to refresh the size
dev_lun_info = scsiutil.cacheSCSIidentifiers()
for dev in devices:
self._query(dev, dev_lun_info[dev][4], vdi_uuid)
#Update xapi with the new size
vdi_ref = self.sr.session.xenapi.VDI.get_by_uuid(vdi_uuid)
self.sr.session.xenapi.VDI.set_virtual_size(vdi_ref, str(self.size))
# Multipath enable
if self.sr.mpath == "true":
self.sr.mpathmodule.refresh(scsi_id, len(devices))
self.path = self.sr.mpathmodule.path(scsi_id)
# The SCSIid is already stored inside SR sm_config.
# We need only to trigger mpathcount
try:
cmd = ['/opt/xensource/sm/mpathcount.py', scsi_id]
util.pread2(cmd)
except:
util.SMlog("RawHBA: something wrong with mpathcount")
ret = VDI.VDI.attach(self, sr_uuid, vdi_uuid)
self.sr.update_stats(self.size)
return ret
def delete(self, sr_uuid, vdi_uuid):
util.SMlog("Raw LUN VDI delete")
scsi_id = self.sm_config['SCSIid']
xapi_session = self.session.xenapi
# Cleaning up SR sm_config
scsi_key = "scsi-" + scsi_id
xapi_session.SR.remove_from_sm_config(self.sr.sr_ref, scsi_key)
def detach(self, sr_uuid, vdi_uuid):
scsi_id = self.sm_config['SCSIid']
xapi_session = self.session.xenapi
# Multipath disable
if self.sr.mpath == "true":
#devices = scsiutil._genReverseSCSIidmap(scsi_id)
self.sr.mpathmodule.reset(scsi_id, True)
util.remove_mpathcount_field(self.sr.session, self.sr.host_ref,
self.sr.sr_ref, scsi_id)
# Get size from xapi db
vdi_ref = xapi_session.VDI.get_by_uuid(vdi_uuid)
size = int(xapi_session.VDI.get_virtual_size(vdi_ref))
self.sr.update_stats(-size)
if __name__ == '__main__':
SRCommand.run(RawHBASR, DRIVER_INFO)
else:
SR.registerSR(RawHBASR)
|
lgpl-2.1
|
jerli/sympy
|
sympy/ntheory/modular.py
|
108
|
7676
|
from __future__ import print_function, division
from sympy.core.numbers import igcdex, igcd
from sympy.core.mul import prod
from sympy.core.compatibility import as_int, reduce
from sympy.ntheory.primetest import isprime
from sympy.polys.domains import ZZ
from sympy.polys.galoistools import gf_crt, gf_crt1, gf_crt2
def symmetric_residue(a, m):
"""Return the residual mod m such that it is within half of the modulus.
>>> from sympy.ntheory.modular import symmetric_residue
>>> symmetric_residue(1, 6)
1
>>> symmetric_residue(4, 6)
-2
"""
if a <= m // 2:
return a
return a - m
def crt(m, v, symmetric=False, check=True):
r"""Chinese Remainder Theorem.
The moduli in m are assumed to be pairwise coprime. The output
is then an integer f, such that f = v_i mod m_i for each pair out
of v and m. If ``symmetric`` is False a positive integer will be
returned, else \|f\| will be less than or equal to the LCM of the
moduli, and thus f may be negative.
If the moduli are not co-prime the correct result will be returned
if/when the test of the result is found to be incorrect. This result
will be None if there is no solution.
The keyword ``check`` can be set to False if it is known that the moduli
are coprime.
As an example consider a set of residues ``U = [49, 76, 65]``
and a set of moduli ``M = [99, 97, 95]``. Then we have::
>>> from sympy.ntheory.modular import crt, solve_congruence
>>> crt([99, 97, 95], [49, 76, 65])
(639985, 912285)
This is the correct result because::
>>> [639985 % m for m in [99, 97, 95]]
[49, 76, 65]
If the moduli are not co-prime, you may receive an incorrect result
if you use ``check=False``:
>>> crt([12, 6, 17], [3, 4, 2], check=False)
(954, 1224)
>>> [954 % m for m in [12, 6, 17]]
[6, 0, 2]
>>> crt([12, 6, 17], [3, 4, 2]) is None
True
>>> crt([3, 6], [2, 5])
(5, 6)
Note: the order of gf_crt's arguments is reversed relative to crt,
and that solve_congruence takes residue, modulus pairs.
Programmer's note: rather than checking that all pairs of moduli share
no GCD (an O(n**2) test) and rather than factoring all moduli and seeing
that there is no factor in common, a check that the result gives the
indicated residuals is performed -- an O(n) operation.
See Also
========
solve_congruence
sympy.polys.galoistools.gf_crt : low level crt routine used by this routine
"""
if check:
m = list(map(as_int, m))
v = list(map(as_int, v))
result = gf_crt(v, m, ZZ)
mm = prod(m)
if check:
if not all(v % m == result % m for v, m in zip(v, m)):
result = solve_congruence(*list(zip(v, m)),
check=False, symmetric=symmetric)
if result is None:
return result
result, mm = result
if symmetric:
return symmetric_residue(result, mm), mm
return result, mm
def crt1(m):
"""First part of Chinese Remainder Theorem, for multiple application.
Examples
========
>>> from sympy.ntheory.modular import crt1
>>> crt1([18, 42, 6])
(4536, [252, 108, 756], [0, 2, 0])
"""
return gf_crt1(m, ZZ)
def crt2(m, v, mm, e, s, symmetric=False):
"""Second part of Chinese Remainder Theorem, for multiple application.
Examples
========
>>> from sympy.ntheory.modular import crt1, crt2
>>> mm, e, s = crt1([18, 42, 6])
>>> crt2([18, 42, 6], [0, 0, 0], mm, e, s)
(0, 4536)
"""
result = gf_crt2(v, m, mm, e, s, ZZ)
if symmetric:
return symmetric_residue(result, mm), mm
return result, mm
def solve_congruence(*remainder_modulus_pairs, **hint):
"""Compute the integer ``n`` that has the residual ``ai`` when it is
divided by ``mi`` where the ``ai`` and ``mi`` are given as pairs to
this function: ((a1, m1), (a2, m2), ...). If there is no solution,
return None. Otherwise return ``n`` and its modulus.
The ``mi`` values need not be co-prime. If it is known that the moduli are
not co-prime then the hint ``check`` can be set to False (default=True) and
the check for a quicker solution via crt() (valid when the moduli are
co-prime) will be skipped.
If the hint ``symmetric`` is True (default is False), the value of ``n``
will be within 1/2 of the modulus, possibly negative.
Examples
========
>>> from sympy.ntheory.modular import solve_congruence
What number is 2 mod 3, 3 mod 5 and 2 mod 7?
>>> solve_congruence((2, 3), (3, 5), (2, 7))
(23, 105)
>>> [23 % m for m in [3, 5, 7]]
[2, 3, 2]
If you prefer to work with all remainder in one list and
all moduli in another, send the arguments like this:
>>> solve_congruence(*zip((2, 3, 2), (3, 5, 7)))
(23, 105)
The moduli need not be co-prime; in this case there may or
may not be a solution:
>>> solve_congruence((2, 3), (4, 6)) is None
True
>>> solve_congruence((2, 3), (5, 6))
(5, 6)
The symmetric flag will make the result be within 1/2 of the modulus:
>>> solve_congruence((2, 3), (5, 6), symmetric=True)
(-1, 6)
See Also
========
crt : high level routine implementing the Chinese Remainder Theorem
"""
def combine(c1, c2):
"""Return the tuple (a, m) which satisfies the requirement
that n = a + i*m satisfy n = a1 + j*m1 and n = a2 = k*m2.
References
==========
- http://en.wikipedia.org/wiki/Method_of_successive_substitution
"""
a1, m1 = c1
a2, m2 = c2
a, b, c = m1, a2 - a1, m2
g = reduce(igcd, [a, b, c])
a, b, c = [i//g for i in [a, b, c]]
if a != 1:
inv_a, _, g = igcdex(a, c)
if g != 1:
return None
b *= inv_a
a, m = a1 + m1*b, m1*c
return a, m
rm = remainder_modulus_pairs
symmetric = hint.get('symmetric', False)
if hint.get('check', True):
rm = [(as_int(r), as_int(m)) for r, m in rm]
# ignore redundant pairs but raise an error otherwise; also
# make sure that a unique set of bases is sent to gf_crt if
# they are all prime.
#
# The routine will work out less-trivial violations and
# return None, e.g. for the pairs (1,3) and (14,42) there
# is no answer because 14 mod 42 (having a gcd of 14) implies
# (14/2) mod (42/2), (14/7) mod (42/7) and (14/14) mod (42/14)
# which, being 0 mod 3, is inconsistent with 1 mod 3. But to
# preprocess the input beyond checking of another pair with 42
# or 3 as the modulus (for this example) is not necessary.
uniq = {}
for r, m in rm:
r %= m
if m in uniq:
if r != uniq[m]:
return None
continue
uniq[m] = r
rm = [(r, m) for m, r in uniq.items()]
del uniq
# if the moduli are co-prime, the crt will be significantly faster;
# checking all pairs for being co-prime gets to be slow but a prime
# test is a good trade-off
if all(isprime(m) for r, m in rm):
r, m = list(zip(*rm))
return crt(m, r, symmetric=symmetric, check=False)
rv = (0, 1)
for rmi in rm:
rv = combine(rv, rmi)
if rv is None:
break
n, m = rv
n = n % m
else:
if symmetric:
return symmetric_residue(n, m), m
return n, m
|
bsd-3-clause
|
CSF-JH/crossbarexamples
|
launchpad/test4.py
|
12
|
1629
|
import launchpad
import time, random
class Test:
FENCE = [(-1,-1), (0,-1), (1,-1), (1,0), (1,1), (0,1), (-1,1), (-1,0)]
def __init__(self, lp, mode = 0):
self.lp = lp
self.mode = mode
self.matrix = []
for y in xrange(9):
self.matrix.append([])
for x in xrange(9):
self.matrix[y].append([])
def pushlight(self, x, y, r, g):
if x >= 0 and x < 9 and y >= 0 and y < 9:
self.matrix[y][x].append((r, g))
self.lp.light(x, y, r, g)
def poplight(self, x, y):
if x >= 0 and x < 9 and y >= 0 and y < 9:
if len(self.matrix[y][x]) > 0:
self.matrix[y][x].pop()
if len(self.matrix[y][x]) > 0:
c = self.matrix[y][x][-1]
else:
c = (0,0)
self.lp.light(x, y, c[0], c[1])
def pushrect(self, x0, y0, w, h, r, g):
for x in xrange(w):
for y in xrange(h):
self.pushlight(x0 + x, y0 + y, r, g)
def poprect(self, x0, y0, w, h):
for x in xrange(w):
for y in xrange(h):
self.poplight(x0 + x, y0 + y)
def run(self):
while True:
e = self.lp.poll()
if e:
if e[2]:
self.pushrect(e[0] - 1, e[1] - 1, 3, 3, 3, 3)
self.pushlight(e[0], e[1], 3, 0)
else:
self.poprect(e[0] - 1, e[1] - 1, 3, 3)
self.poplight(e[0], e[1])
time.sleep(0.01)
if __name__=="__main__":
launchPads = launchpad.findLaunchpads()
l = launchpad.launchpad(*launchPads[-1])
l.reset()
t = Test(l, 1)
t.run()
|
apache-2.0
|
haeusser/tensorflow
|
tensorflow/python/ops/accumulate_n_benchmark.py
|
114
|
5470
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for accumulate_n() in math_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import time
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
class AccumulateNBenchmark(test.Benchmark):
def _AccumulateNTemplate(self, inputs, init, shape, validate_shape):
var = gen_state_ops._temporary_variable(
shape=shape, dtype=inputs[0].dtype.base_dtype)
ref = state_ops.assign(var, init, validate_shape=validate_shape)
update_ops = [
state_ops.assign_add(
ref, tensor, use_locking=True).op for tensor in inputs
]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name)
def _AccumulateNInitializedWithFirst(self, inputs):
return self._AccumulateNTemplate(
inputs,
init=array_ops.zeros_like(inputs[0]),
shape=inputs[0].get_shape(),
validate_shape=True)
def _AccumulateNInitializedWithMerge(self, inputs):
return self._AccumulateNTemplate(
inputs,
init=array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0]),
shape=tensor_shape.vector(0),
validate_shape=False)
def _AccumulateNInitializedWithShape(self, inputs):
return self._AccumulateNTemplate(
inputs,
init=array_ops.zeros(
shape=inputs[0].get_shape(), dtype=inputs[0].dtype.base_dtype),
shape=inputs[0].get_shape(),
validate_shape=True)
def _GenerateUnorderedInputs(self, size, n):
inputs = [random_ops.random_uniform(shape=[size]) for _ in xrange(n)]
random.shuffle(inputs)
return inputs
def _GenerateReplicatedInputs(self, size, n):
return n * self._GenerateUnorderedInputs(size, 1)
def _GenerateOrderedInputs(self, size, n):
inputs = self._GenerateUnorderedInputs(size, 1)
queue = data_flow_ops.FIFOQueue(
capacity=1, dtypes=[inputs[0].dtype], shapes=[inputs[0].get_shape()])
for _ in xrange(n - 1):
op = queue.enqueue(inputs[-1])
with ops.control_dependencies([op]):
inputs.append(math_ops.tanh(1.0 + queue.dequeue()))
return inputs
def _GenerateReversedInputs(self, size, n):
inputs = self._GenerateOrderedInputs(size, n)
inputs.reverse()
return inputs
def _SetupAndRunBenchmark(self, graph, inputs, repeats, format_args):
with graph.as_default():
add_n = math_ops.add_n(inputs)
acc_n_first = self._AccumulateNInitializedWithFirst(inputs)
acc_n_merge = self._AccumulateNInitializedWithMerge(inputs)
acc_n_shape = self._AccumulateNInitializedWithShape(inputs)
test_ops = (("AddN", add_n.op),
("AccNFirst", acc_n_first.op),
("AccNMerge", acc_n_merge.op),
("AccNShape", acc_n_shape.op))
with session.Session(graph=graph):
for tag, op in test_ops:
for _ in xrange(100):
op.run() # Run for warm up.
start = time.time()
for _ in xrange(repeats):
op.run()
duration = time.time() - start
args = format_args + (tag, duration)
print(self._template.format(*args))
def _RunBenchmark(self, tag, input_fn, sizes, ninputs, repeats):
for size in sizes:
for ninput in ninputs:
graph = ops.Graph()
with graph.as_default():
inputs = input_fn(size, ninput)
format_args = (tag, size, ninput, repeats)
self._SetupAndRunBenchmark(graph, inputs, repeats, format_args)
def benchmarkAccumulateN(self):
self._template = "{:<15}" * 6
args = {
"sizes": (128, 128**2),
"ninputs": (1, 10, 100, 300),
"repeats": 100
}
benchmarks = (("Replicated", self._GenerateReplicatedInputs),
("Unordered", self._GenerateUnorderedInputs),
("Ordered", self._GenerateOrderedInputs),
("Reversed", self._GenerateReversedInputs))
print(self._template.format("", "Size", "#Inputs", "#Repeat", "Method",
"Duration"))
print("-" * 90)
for benchmark in benchmarks:
self._RunBenchmark(*benchmark, **args)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
nicememory/pie
|
pyglet/pyglet/libs/x11/xinerama.py
|
46
|
2240
|
'''Wrapper for Xinerama
Generated with:
tools/genwrappers.py xinerama
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
import pyglet.lib
_lib = pyglet.lib.load_library('Xinerama')
_int_types = (c_int16, c_int32)
if hasattr(ctypes, 'c_int64'):
# Some builds of ctypes apparently do not have c_int64
# defined; it's a pretty good bet that these builds do not
# have 64-bit pointers.
_int_types += (ctypes.c_int64,)
for t in _int_types:
if sizeof(t) == sizeof(c_size_t):
c_ptrdiff_t = t
class c_void(Structure):
# c_void_p is a buggy return type, converting to int, so
# POINTER(None) == c_void_p is actually written as
# POINTER(c_void), so it can be treated as a real pointer.
_fields_ = [('dummy', c_int)]
import pyglet.libs.x11.xlib
class struct_anon_93(Structure):
__slots__ = [
'screen_number',
'x_org',
'y_org',
'width',
'height',
]
struct_anon_93._fields_ = [
('screen_number', c_int),
('x_org', c_short),
('y_org', c_short),
('width', c_short),
('height', c_short),
]
XineramaScreenInfo = struct_anon_93 # /usr/include/X11/extensions/Xinerama.h:40
Display = pyglet.libs.x11.xlib.Display
# /usr/include/X11/extensions/Xinerama.h:44
XineramaQueryExtension = _lib.XineramaQueryExtension
XineramaQueryExtension.restype = c_int
XineramaQueryExtension.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/Xinerama.h:50
XineramaQueryVersion = _lib.XineramaQueryVersion
XineramaQueryVersion.restype = c_int
XineramaQueryVersion.argtypes = [POINTER(Display), POINTER(c_int), POINTER(c_int)]
# /usr/include/X11/extensions/Xinerama.h:56
XineramaIsActive = _lib.XineramaIsActive
XineramaIsActive.restype = c_int
XineramaIsActive.argtypes = [POINTER(Display)]
# /usr/include/X11/extensions/Xinerama.h:67
XineramaQueryScreens = _lib.XineramaQueryScreens
XineramaQueryScreens.restype = POINTER(XineramaScreenInfo)
XineramaQueryScreens.argtypes = [POINTER(Display), POINTER(c_int)]
__all__ = ['XineramaScreenInfo', 'XineramaQueryExtension',
'XineramaQueryVersion', 'XineramaIsActive', 'XineramaQueryScreens']
|
apache-2.0
|
chlorisdroid/linux-rt-odroid-c1
|
tools/perf/scripts/python/sched-migration.py
|
11215
|
11670
|
#!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
|
gpl-2.0
|
andrew-aladev/samba-talloc-debug
|
lib/dnspython/dns/rdtypes/IN/A.py
|
100
|
2054
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.ipv4
import dns.rdata
import dns.tokenizer
class A(dns.rdata.Rdata):
"""A record.
@ivar address: an IPv4 address
@type address: string (in the standard "dotted quad" format)"""
__slots__ = ['address']
def __init__(self, rdclass, rdtype, address):
super(A, self).__init__(rdclass, rdtype)
# check that it's OK
junk = dns.ipv4.inet_aton(address)
self.address = address
def to_text(self, origin=None, relativize=True, **kw):
return self.address
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
address = tok.get_identifier()
tok.get_eol()
return cls(rdclass, rdtype, address)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
file.write(dns.ipv4.inet_aton(self.address))
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
address = dns.ipv4.inet_ntoa(wire[current : current + rdlen])
return cls(rdclass, rdtype, address)
from_wire = classmethod(from_wire)
def _cmp(self, other):
sa = dns.ipv4.inet_aton(self.address)
oa = dns.ipv4.inet_aton(other.address)
return cmp(sa, oa)
|
gpl-3.0
|
soarpenguin/ansible
|
lib/ansible/modules/web_infrastructure/rundeck_acl_policy.py
|
9
|
8354
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Loic Blot <[email protected]>
# Sponsored by Infopro Digital. http://www.infopro-digital.com/
# Sponsored by E.T.A.I. http://www.etai.fr/
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rundeck_acl_policy
short_description: Manage Rundeck ACL policies.
description:
- Create, update and remove Rundeck ACL policies through HTTP API.
version_added: "2.4"
author: "Loic Blot (@nerzhul)"
options:
state:
description:
- Create or remove Rundeck project.
choices: ['present', 'absent']
default: 'present'
name:
description:
- Sets the project name.
required: True
url:
description:
- Sets the rundeck instance URL.
required: True
api_version:
description:
- Sets the API version used by module.
- API version must be at least 14.
default: 14
token:
description:
- Sets the token to authenticate against Rundeck API.
required: True
project:
description:
- Sets the project which receive the ACL policy.
- If unset, it's a system ACL policy.
policy:
description:
- Sets the ACL policy content.
- ACL policy content is a YAML object as described in http://rundeck.org/docs/man5/aclpolicy.html.
- It can be a YAML string or a pure Ansible inventory YAML object.
'''
EXAMPLES = '''
- name: Create or update a rundeck ACL policy in project Ansible
rundeck_acl_policy:
name: "Project_01"
api_version: 18
url: "https://rundeck.example.org"
token: "mytoken"
state: present
project: "Ansible"
policy:
description: "my policy"
context:
application: rundeck
for:
project:
- allow: read
by:
group: "build"
- name: Remove a rundeck system policy
rundeck_acl_policy:
name: "Project_02"
url: "https://rundeck.example.org"
token: "mytoken"
state: absent
'''
RETURN = '''
rundeck_response:
description: Rundeck response when a failure occurs.
returned: failed
type: string
before:
description: dictionnary containing ACL policy informations before modification.
returned: success
type: dict
after:
description: dictionnary containing ACL policy informations after modification.
returned: success
type: dict
'''
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
import json
class RundeckACLManager:
def __init__(self, module):
self.module = module
def handle_http_code_if_needed(self, infos):
if infos["status"] == 403:
self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
"permissions.", rundeck_response=infos["body"])
elif infos["status"] >= 500:
self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
def request_rundeck_api(self, query, data=None, method="GET"):
resp, info = fetch_url(self.module,
"%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
data=json.dumps(data),
method=method,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
"X-Rundeck-Auth-Token": self.module.params["token"]
})
self.handle_http_code_if_needed(info)
if resp is not None:
resp = resp.read()
if resp != "":
try:
json_resp = json.loads(resp)
return json_resp, info
except ValueError as e:
self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
"Object was: %s" % (str(e), resp))
return resp, info
def get_acl(self):
resp, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"])
return resp
def create_or_update_acl(self):
facts = self.get_acl()
if facts is None:
# If in check mode don't create project, simulate a fake project creation
if self.module.check_mode:
self.module.exit_json(changed=True, before={}, after=self.module.params["policy"])
_, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
method="POST",
data={"contents": self.module.params["policy"]})
if info["status"] == 201:
self.module.exit_json(changed=True, before={}, after=self.get_acl())
elif info["status"] == 400:
self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
self.module.params["name"])
elif info["status"] == 409:
self.module.fail_json(msg="ACL %s already exists" % self.module.params["name"])
else:
self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
before={}, after=self.get_acl())
else:
if facts["contents"] == self.module.params["policy"]:
self.module.exit_json(changed=False, before=facts, after=facts)
if self.module.check_mode:
self.module.exit_json(changed=True, before=facts, after=facts)
_, info = self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"],
method="PUT",
data={"contents": self.module.params["policy"]})
if info["status"] == 200:
self.module.exit_json(changed=True, before=facts, after=self.get_acl())
elif info["status"] == 400:
self.module.fail_json(msg="Unable to validate acl %s. Please ensure it's a valid ACL" %
self.module.params["name"])
elif info["status"] == 404:
self.module.fail_json(msg="ACL %s doesn't exists. Cannot update." % self.module.params["name"])
def remove_acl(self):
facts = self.get_acl()
if facts is None:
self.module.exit_json(changed=False, before={}, after={})
else:
# If not in check mode, remove the project
if not self.module.check_mode:
self.request_rundeck_api("system/acl/%s.aclpolicy" % self.module.params["name"], method="DELETE")
self.module.exit_json(changed=True, before=facts, after={})
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
url=dict(required=True, type='str'),
api_version=dict(type='int', default=14),
token=dict(required=True, type='str', no_log=True),
policy=dict(type='str'),
project=dict(type='str'),
),
required_if=[
['state', 'present', ['policy']],
],
supports_check_mode=True
)
if module.params["api_version"] < 14:
module.fail_json(msg="API version should be at least 14")
rundeck = RundeckACLManager(module)
if module.params['state'] == 'present':
rundeck.create_or_update_acl()
elif module.params['state'] == 'absent':
rundeck.remove_acl()
if __name__ == '__main__':
main()
|
gpl-3.0
|
soarpenguin/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_region.py
|
18
|
5397
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_region
short_description: Manages regions on Apache CloudStack based clouds.
description:
- Add, update and remove regions.
version_added: "2.3"
author: "René Moser (@resmo)"
options:
id:
description:
- ID of the region.
- Must be an number (int).
required: true
name:
description:
- Name of the region.
- Required if C(state=present)
required: false
default: null
endpoint:
description:
- Endpoint URL of the region.
- Required if C(state=present)
required: false
default: null
state:
description:
- State of the region.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create a region
local_action:
module: cs_region
id: 2
name: geneva
endpoint: https://cloud.gva.example.com
# remove a region with ID 2
local_action:
module: cs_region
id: 2
state: absent
'''
RETURN = '''
---
id:
description: ID of the region.
returned: success
type: int
sample: 1
name:
description: Name of the region.
returned: success
type: string
sample: local
endpoint:
description: Endpoint of the region.
returned: success
type: string
sample: http://cloud.example.com
gslb_service_enabled:
description: Whether the GSLB service is enabled or not.
returned: success
type: bool
sample: true
portable_ip_service_enabled:
description: Whether the portable IP service is enabled or not.
returned: success
type: bool
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackRegion(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackRegion, self).__init__(module)
self.returns = {
'endpoint': 'endpoint',
'gslbserviceenabled': 'gslb_service_enabled',
'portableipserviceenabled': 'portable_ip_service_enabled',
}
def get_region(self):
id = self.module.params.get('id')
regions = self.query_api('listRegions', id=id)
if regions:
return regions['region'][0]
return None
def present_region(self):
region = self.get_region()
if not region:
region = self._create_region(region=region)
else:
region = self._update_region(region=region)
return region
def _create_region(self, region):
self.result['changed'] = True
args = {
'id': self.module.params.get('id'),
'name': self.module.params.get('name'),
'endpoint': self.module.params.get('endpoint')
}
if not self.module.check_mode:
res = self.query_api('addRegion', **args)
region = res['region']
return region
def _update_region(self, region):
args = {
'id': self.module.params.get('id'),
'name': self.module.params.get('name'),
'endpoint': self.module.params.get('endpoint')
}
if self.has_changed(args, region):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateRegion', **args)
region = res['region']
return region
def absent_region(self):
region = self.get_region()
if region:
self.result['changed'] = True
if not self.module.check_mode:
self.query_api('removeRegion', id=region['id'])
return region
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
id=dict(required=True, type='int'),
name=dict(),
endpoint=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
required_if=[
('state', 'present', ['name', 'endpoint']),
],
supports_check_mode=True
)
acs_region = AnsibleCloudStackRegion(module)
state = module.params.get('state')
if state == 'absent':
region = acs_region.absent_region()
else:
region = acs_region.present_region()
result = acs_region.get_result(region)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
mezz64/home-assistant
|
homeassistant/components/demo/stt.py
|
21
|
1969
|
"""Support for the demo for speech to text service."""
from typing import List
from aiohttp import StreamReader
from homeassistant.components.stt import Provider, SpeechMetadata, SpeechResult
from homeassistant.components.stt.const import (
AudioBitRates,
AudioChannels,
AudioCodecs,
AudioFormats,
AudioSampleRates,
SpeechResultState,
)
SUPPORT_LANGUAGES = ["en", "de"]
async def async_get_engine(hass, config, discovery_info=None):
"""Set up Demo speech component."""
return DemoProvider()
class DemoProvider(Provider):
"""Demo speech API provider."""
@property
def supported_languages(self) -> List[str]:
"""Return a list of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_formats(self) -> List[AudioFormats]:
"""Return a list of supported formats."""
return [AudioFormats.WAV]
@property
def supported_codecs(self) -> List[AudioCodecs]:
"""Return a list of supported codecs."""
return [AudioCodecs.PCM]
@property
def supported_bit_rates(self) -> List[AudioBitRates]:
"""Return a list of supported bit rates."""
return [AudioBitRates.BITRATE_16]
@property
def supported_sample_rates(self) -> List[AudioSampleRates]:
"""Return a list of supported sample rates."""
return [AudioSampleRates.SAMPLERATE_16000, AudioSampleRates.SAMPLERATE_44100]
@property
def supported_channels(self) -> List[AudioChannels]:
"""Return a list of supported channels."""
return [AudioChannels.CHANNEL_STEREO]
async def async_process_audio_stream(
self, metadata: SpeechMetadata, stream: StreamReader
) -> SpeechResult:
"""Process an audio stream to STT service."""
# Read available data
async for _ in stream.iter_chunked(4096):
pass
return SpeechResult("Turn the Kitchen Lights on", SpeechResultState.SUCCESS)
|
apache-2.0
|
ii0/bits
|
python/bits/cdata.py
|
2
|
7490
|
# Copyright (c) 2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""bits.cdata module."""
from __future__ import print_function
import binascii
import ctypes, _ctypes
import textwrap
import ttypager
import uuid
def print_fields(cls):
with ttypager.page():
print("{}".format(cls.__name__))
print("{:20s} {:6} {:6}".format('field', 'length', 'offset'))
for f in cls._fields_:
a = getattr(cls, f[0])
print("{:20s} {:6} {:6}".format(f[0], a.size, a.offset))
def to_bytes(var):
return (ctypes.c_char * ctypes.sizeof(var)).from_buffer(var).raw
_CTYPES_HEX_TYPES = (
ctypes.c_void_p,
ctypes.c_uint8, ctypes.c_uint16, ctypes.c_uint32, ctypes.c_uint64,
ctypes.c_ubyte, ctypes.c_ushort, ctypes.c_uint, ctypes.c_ulong, ctypes.c_ulonglong,
)
class c_base(object):
"""Base class for ctypes structures and unions."""
@staticmethod
def _formatval(t, val):
if val is not None and t in _CTYPES_HEX_TYPES:
return "{:#x}".format(val)
if issubclass(t, _ctypes.Array):
if issubclass(t._type_, (ctypes.c_char, ctypes.c_wchar)):
return "'{}'".format(val)
else:
return "[{}]".format(", ".join(Struct._formatval(t._type_, item) for item in val))
return "{}".format(val)
def _formatter(self, field):
name = field[0]
t = field[1]
val = getattr(self, name)
if hasattr(self, '_formats'):
f = self._formats.get(name, None)
if f:
return f(val)
if issubclass(t, (Struct, Union)):
val._indent = self._indent
return str(val)
if issubclass(t, _ctypes.Array):
if issubclass(t._type_, (Struct, Union)):
s = "["
for item in val:
item._indent = self._indent + " "
s += "\n" + str(item)
s += "]"
return s
return self._formatval(t, val)
_indent = ""
def _wrap(self, str, indent=True):
line_len = 77 - len(self._indent + ' ')
_wrapper = textwrap.TextWrapper(width=line_len, initial_indent=self._indent, subsequent_indent=self._indent + ' ')
_wrapper_indentall = textwrap.TextWrapper(width=line_len, initial_indent=self._indent + ' ', subsequent_indent=self._indent + ' ')
def __wrap():
wrapper = _wrapper
for line in str.split("\n"):
# Preserve blank lines, for which wrapper emits an empty list
if not line:
yield ""
for wrapped_line in wrapper.wrap(line):
yield wrapped_line
if indent:
wrapper = _wrapper_indentall
return '\n'.join(__wrap())
def preface_field(self, field):
a = getattr(self.__class__, field[0])
return "ofs={} ".format(a.offset)
def bitfield_info(self, field):
a = getattr(self.__class__, field[0])
bit_count = a.size >> 16
lo_bit = a.size & 0xFFFF
hi_bit = lo_bit + bit_count - 1
return bit_count, hi_bit, lo_bit
def preface_bitfield(self, field):
bit_count, hi_bit, lo_bit = self.bitfield_info(field)
if bit_count > 1:
return "bits[{}:{}]=".format(hi_bit, lo_bit)
if bit_count == 1:
return "bit[{}]=".format(lo_bit)
return ""
def __str__(self):
self._indent += " "
s = "{}({})".format(self.__class__.__name__, "".join("\n{}{}={}{}".format(self.preface_field(field), field[0], self.preface_bitfield(field), self._formatter(field)) for field in self._fields_))
self._indent = ""
return self._wrap(s)
class Struct(ctypes.Structure, c_base):
"""Base class for ctypes structures."""
def __hash__(self):
buf = (ctypes.c_uint8 * ctypes.sizeof(self)).from_buffer(self)
return binascii.crc32(buf)
def __cmp__(self, other):
return cmp(hash(self), hash(other))
class Union(ctypes.Union, c_base):
"""Base class for ctypes unions."""
def __hash__(self):
buf = (ctypes.c_uint8 * ctypes.sizeof(self)).from_buffer(self)
return binascii.crc32(buf)
def __cmp__(self, other):
return cmp(hash(self), hash(other))
class GUID(Struct):
_fields_ = [
('Data', ctypes.c_ubyte * 16),
]
def __init__(self, *args, **kwargs):
"""Create a GUID. Accepts any arguments the uuid.UUID constructor
would accept. Also accepts an instance of uuid.UUID, either as the
first argument or as a keyword argument "uuid". As with other
ctypes structures, passing no parameters yields a zero-initialized
structure."""
u = kwargs.get("uuid")
if u is not None:
self.uuid = u
elif not(args) and not(kwargs):
self.uuid = uuid.UUID(int=0)
elif args and isinstance(args[0], uuid.UUID):
self.uuid = args[0]
else:
self.uuid = uuid.UUID(*args, **kwargs)
def _get_uuid(self):
return uuid.UUID(bytes_le=to_bytes(self))
def _set_uuid(self, u):
ctypes.memmove(ctypes.addressof(self), ctypes.c_char_p(u.bytes_le), ctypes.sizeof(self))
uuid = property(_get_uuid, _set_uuid)
def __cmp__(self, other):
if isinstance(other, GUID):
return cmp(self.uuid, other.uuid)
if isinstance(other, uuid.UUID):
return cmp(self.uuid, other)
return NotImplemented
def __hash__(self):
return hash(self.uuid)
def __repr__(self):
return "GUID({})".format(self.uuid)
def __str__(self):
return "{}".format(self.uuid)
def _format_guid(val):
try:
import efi
guid_str = efi.known_uuids.get(val.uuid, None)
except:
guid_str = None
if guid_str:
return '{} ({})'.format(val, guid_str)
return '{}'.format(val)
|
bsd-3-clause
|
bernard357/shellbot
|
shellbot/stores/sqlite.py
|
1
|
5489
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import colorlog
import logging
import os
from multiprocessing import Lock, Manager
import sqlite3
from .base import Store
class SqliteStore(Store):
"""
Stores data for one space
This is a basic permanent key-value store.
Example::
store = SqliteStore(db='shellstore.db', id=space.id)
"""
def on_init(self,
prefix='sqlite',
id=None,
db=None,
**kwargs):
"""
Adds processing to initialization
:param prefix: the main keyword for configuration of this space
:type prefix: str
:param id: the unique identifier of the related space (optional)
:type id: str
:param db: name of the file that contains Sqlite data (optional)
:type db: str
Example::
store = SqliteStore(context=context, prefix='sqlite')
Here we create a new store powered by Sqlite, and use
settings under the key ``sqlite`` in the context of this bot.
"""
assert prefix
self.prefix = prefix
self.id = id if id else '*id'
if db:
self.context.set(self.prefix+'.db', db)
def check(self):
"""
Checks configuration
"""
self.context.check(self.prefix+'.db', 'store.db')
def get_db(self):
"""
Gets a handle on the database
"""
db = self.context.get(self.prefix+'.db', 'store.db')
return sqlite3.connect(db)
def bond(self, id=None):
"""
Creates or uses a file to store data
:param id: the unique identifier of the related space
:type id: str
"""
if id:
self.id = id
handle = self.get_db()
try:
handle.execute("CREATE TABLE store \
(id INTEGER PRIMARY KEY, \
context TEXT, \
key TEXT UNIQUE, \
value TEXT)")
except sqlite3.OperationalError as feedback:
logging.debug(feedback)
def _set(self, key, value, handle=None):
"""
Sets a permanent value
:param key: name of the value
:type key: str
:param value: actual value
:type value: any serializable type is accepted
:param handle: an optional instance of a Sqlite database
:type handle: a connection
This functions stores or updates a value in the back-end storage
system.
Example::
store._set('parameter_123', 'George')
"""
handle = handle if handle else self.get_db()
cursor = handle.cursor()
cursor.execute("DELETE FROM store WHERE context=? AND key=?",
(self.id, key))
cursor.execute("INSERT INTO store (context,key,value) VALUES (?,?,?)",
(self.id, key, value))
handle.commit()
cursor.close()
def _get(self, key, handle=None):
"""
Gets a permanent value
:param key: name of the value
:type key: str
:param handle: an optional instance of a Sqlite database
:type handle: a connection
:return: the actual value, or None
Example::
value = store._get('parameter_123')
"""
handle = handle if handle else self.get_db()
cursor = handle.cursor()
cursor.execute("SELECT value FROM store WHERE context=? AND key=?",
(self.id, key))
result = cursor.fetchone()
try:
return result[0]
except TypeError:
return None
def _clear(self, key=None, handle=None):
"""
Forgets a value or all values
:param key: name of the value to forget, or None
:type key: str
:param handle: an optional instance of a Sqlite database
:type handle: a connection
To clear only one value, provide the name of it.
For example::
store._clear('parameter_123')
To clear all values in the store, just call the function
without a value.
For example::
store._clear()
"""
handle = handle if handle else self.get_db()
if not key:
cursor = handle.cursor()
cursor.execute("DELETE FROM store WHERE context=?",
(self.id,))
handle.commit()
cursor.close()
else:
cursor = handle.cursor()
cursor.execute("DELETE FROM store WHERE context=? AND key=?",
(self.id, key))
handle.commit()
cursor.close()
|
apache-2.0
|
evilhero/mylar
|
mylar/scheduler.py
|
2
|
2776
|
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import time
import threading
import traceback
from mylar import logger
class Scheduler:
def __init__(self, action, cycleTime=datetime.timedelta(minutes=10), runImmediately=True,
threadName="ScheduledThread", silent=False, delay=None):
if runImmediately:
self.lastRun = datetime.datetime.fromordinal(1)
else:
self.lastRun = datetime.datetime.now()
self.action = action
self.cycleTime = cycleTime
self.thread = None
self.threadName = threadName
self.silent = silent
self.delay = delay
self.initThread()
self.abort = False
def initThread(self):
if self.thread == None or not self.thread.isAlive():
self.thread = threading.Thread(None, self.runAction, self.threadName)
def timeLeft(self):
return self.cycleTime - (datetime.datetime.now() - self.lastRun)
def forceRun(self):
if not self.action.amActive:
self.lastRun = datetime.datetime.fromordinal(1)
return True
return False
def runAction(self):
while True:
currentTime = datetime.datetime.now()
if currentTime - self.lastRun > self.cycleTime:
self.lastRun = currentTime
try:
if not self.silent:
logger.fdebug("Starting new thread: " + self.threadName)
if self.delay:
logger.info('delaying thread for ' + str(self.delay) + ' seconds to avoid locks.')
time.sleep(self.delay)
self.action.run()
except Exception, e:
logger.fdebug("Exception generated in thread " + self.threadName + ": %s" % e)
logger.fdebug(repr(traceback.format_exc()))
if self.abort:
self.abort = False
self.thread = None
return
time.sleep(1)
|
gpl-3.0
|
rdamas/e2openplugin-OpenWebif
|
plugin/controllers/models/info.py
|
3
|
26681
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##########################################################################
# OpenWebif: info
##########################################################################
# Copyright (C) 2011 - 2021 E2OpenPlugins
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
##########################################################################
from __future__ import print_function
import os
import six
import time
from twisted import version
from socket import has_ipv6, AF_INET6, AF_INET, inet_ntop, inet_pton, getaddrinfo
import NavigationInstance
from Components.About import about
from Components.config import config
from Components.NimManager import nimmanager
from Components.Harddisk import harddiskmanager
from Components.Network import iNetwork
from ServiceReference import ServiceReference
from RecordTimer import parseEvent, RecordTimerEntry
from timer import TimerEntry
from Screens.InfoBar import InfoBar
from Tools.Directories import fileExists
from enigma import eEPGCache, eDVBVolumecontrol, eServiceCenter, eServiceReference
from Plugins.Extensions.OpenWebif.controllers.i18n import _
from Plugins.Extensions.OpenWebif.controllers.defaults import OPENWEBIFVER, TRANSCODING
from Plugins.Extensions.OpenWebif.controllers.utilities import removeBad, removeBad2
try:
from boxbranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate
from enigma import getEnigmaVersionString
except: # nosec # noqa: E722
from Plugins.Extensions.OpenWebif.controllers.models.owibranding import getBoxType, getMachineBuild, getMachineBrand, getMachineName, getImageDistro, getImageVersion, getImageBuild, getOEVersion, getDriverDate, getLcd, getGrabPip
def getEnigmaVersionString():
return about.getEnigmaVersionString()
STATICBOXINFO = None
def getFriendlyImageDistro():
dist = getImageDistro().replace("openatv", "OpenATV").replace("openhdf", "OpenHDF").replace("openpli", "OpenPLi").replace("openvix", "OpenViX")
return dist
def getIPMethod(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipmethod = _("SLAAC")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in open(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet6" in line and iface in line:
if "static" in line:
ipmethod = _("static")
if "dhcp" in line:
ipmethod = _("DHCP")
if "manual" in line:
ipmethod = _("manual/disabled")
if "6to4" in line:
ipmethod = "6to4"
return ipmethod
def getIPv4Method(iface):
# iNetwork.getAdapterAttribute is crap and not portable
ipv4method = _("static")
if fileExists('/etc/network/interfaces'):
ifaces = '/etc/network/interfaces'
for line in open(ifaces).readlines():
if not line.startswith('#'):
if line.startswith('iface') and "inet " in line and iface in line:
if "static" in line:
ipv4method = _("static")
if "dhcp" in line:
ipv4method = _("DHCP")
if "manual" in line:
ipv4method = _("manual/disabled")
return ipv4method
def getLinkSpeed(iface):
speed = _("unknown")
try:
with open('/sys/class/net/' + iface + '/speed', 'r') as f:
speed = f.read().strip()
except: # nosec # noqa: E722
if os.path.isdir('/sys/class/net/' + iface + '/wireless'):
try:
speed = os.popen('iwconfig ' + iface + ' | grep "Bit Rate"').read().split(':')[1].split(' ')[0]
except: # nosec # noqa: E722
pass
speed = str(speed) + " MBit/s"
speed = speed.replace("10000 MBit/s", "10 GBit/s")
speed = speed.replace("1000 MBit/s", "1 GBit/s")
return speed
def getNICChipSet(iface):
nic = _("unknown")
try:
nic = os.path.realpath('/sys/class/net/' + iface + '/device/driver').split('/')[-1]
nic = str(nic)
except: # nosec # noqa: E722
pass
return nic
def getFriendlyNICChipSet(iface):
friendlynic = getNICChipSet(iface)
friendlynic = friendlynic.replace("bcmgenet", "Broadcom Gigabit Ethernet")
friendlynic = friendlynic.replace("bcmemac", "Broadcom STB 10/100 EMAC")
return friendlynic
def normalize_ipv6(orig):
net = []
if '/' in orig:
net = orig.split('/')
if net[1] == "128":
del net[1]
else:
net.append(orig)
addr = net[0]
addr = inet_ntop(AF_INET6, inet_pton(AF_INET6, addr))
if len(net) == 2:
addr += "/" + net[1]
return (addr)
def getAdapterIPv6(ifname):
addr = _("IPv4-only kernel")
firstpublic = None
if fileExists('/proc/net/if_inet6'):
addr = _("IPv4-only Python/Twisted")
if has_ipv6 and version.major >= 12:
proc = '/proc/net/if_inet6'
tempaddrs = []
for line in open(proc).readlines():
if line.startswith('fe80'):
continue
tmpaddr = ""
tmp = line.split()
if ifname == tmp[5]:
tmpaddr = ":".join([tmp[0][i:i + 4] for i in list(range(0, len(tmp[0]), 4))])
if firstpublic is None and (tmpaddr.startswith('2') or tmpaddr.startswith('3')):
firstpublic = normalize_ipv6(tmpaddr)
if tmp[2].lower() != "ff":
tmpaddr = "%s/%s" % (tmpaddr, int(tmp[2].lower(), 16))
tmpaddr = normalize_ipv6(tmpaddr)
tempaddrs.append(tmpaddr)
if len(tempaddrs) > 1:
tempaddrs.sort()
addr = ', '.join(tempaddrs)
elif len(tempaddrs) == 1:
addr = tempaddrs[0]
elif len(tempaddrs) == 0:
addr = _("none/IPv4-only network")
return {'addr': addr, 'firstpublic': firstpublic}
def formatIp(ip):
if ip is None or len(ip) != 4:
return "0.0.0.0" # nosec
return "%d.%d.%d.%d" % (ip[0], ip[1], ip[2], ip[3])
def getInfo(session=None, need_fullinfo=False):
# TODO: get webif versione somewhere!
info = {}
global STATICBOXINFO
if not (STATICBOXINFO is None or need_fullinfo):
return STATICBOXINFO
info['brand'] = getMachineBrand()
info['model'] = getMachineName()
info['boxtype'] = getBoxType()
info['machinebuild'] = getMachineBuild()
try: # temporary due OE-A
info['lcd'] = getLcd()
except: # nosec # noqa: E722
info['lcd'] = 0
try: # temporary due OE-A
info['grabpip'] = getGrabPip()
except: # nosec # noqa: E722
info['grabpip'] = 0
chipset = "unknown"
if fileExists("/etc/.box"):
f = open("/etc/.box", 'r')
model = f.readline().strip().lower()
f.close()
if model.startswith("ufs") or model.startswith("ufc"):
if model in ("ufs910", "ufs922", "ufc960"):
chipset = "SH4 @266MHz"
else:
chipset = "SH4 @450MHz"
elif model in ("topf", "tf7700hdpvr"):
chipset = "SH4 @266MHz"
elif model.startswith("azbox"):
f = open("/proc/stb/info/model", 'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "SH4 @540MHz"
else:
chipset = "SH4 @450MHz"
elif fileExists("/proc/stb/info/azmodel"):
f = open("/proc/stb/info/model", 'r')
model = f.readline().strip().lower()
f.close()
if model == "me":
chipset = "SIGMA 8655"
elif model == "minime":
chipset = "SIGMA 8653"
else:
chipset = "SIGMA 8634"
elif fileExists("/proc/stb/info/model"):
f = open("/proc/stb/info/model", 'r')
model = f.readline().strip().lower()
f.close()
if model == "tf7700hdpvr":
chipset = "SH4 @266MHz"
elif model == "nbox":
chipset = "STi7100 @266MHz"
elif model == "arivalink200":
chipset = "STi7109 @266MHz"
elif model in ("adb2850", "adb2849", "dsi87"):
chipset = "STi7111 @450MHz"
elif model in ("sagemcom88", "esi88"):
chipset = "STi7105 @450MHz"
elif model.startswith("spark"):
if model == "spark7162":
chipset = "STi7162 @540MHz"
else:
chipset = "STi7111 @450MHz"
elif model == "dm800":
chipset = "bcm7401"
elif model in ("dm800se", "dm500hd", "dm7020hd", "dm800sev2", "dm500hdv2", "dm7020hdv2"):
chipset = "bcm7405"
elif model == "dm8000":
chipset = "bcm7400"
elif model in ("dm820", "dm7080"):
chipset = "bcm7435"
elif model in ("dm520", "dm525"):
chipset = "bcm73625"
elif model in ("dm900", "dm920"):
chipset = "bcm7252S"
if fileExists("/proc/stb/info/chipset"):
f = open("/proc/stb/info/chipset", 'r')
chipset = f.readline().strip()
f.close()
info['chipset'] = chipset
memFree = 0
for line in open("/proc/meminfo", 'r'):
parts = line.split(':')
key = parts[0].strip()
if key == "MemTotal":
info['mem1'] = parts[1].strip().replace("kB", _("kB"))
elif key in ("MemFree", "Buffers", "Cached"):
memFree += int(parts[1].strip().split(' ', 1)[0])
info['mem2'] = "%s %s" % (memFree, _("kB"))
info['mem3'] = _("%s free / %s total") % (info['mem2'], info['mem1'])
try:
f = open("/proc/uptime", "r")
uptime = int(float(f.readline().split(' ', 2)[0].strip()))
f.close()
uptimetext = ''
if uptime > 86400:
d = uptime / 86400
uptime = uptime % 86400
uptimetext += '%dd ' % d
uptimetext += "%d:%.2d" % (uptime / 3600, (uptime % 3600) / 60)
except: # nosec # noqa: E722
uptimetext = "?"
info['uptime'] = uptimetext
info["webifver"] = OPENWEBIFVER
info['imagedistro'] = getImageDistro()
info['friendlyimagedistro'] = getFriendlyImageDistro()
info['oever'] = getOEVersion()
info['imagever'] = getImageVersion()
ib = getImageBuild()
if ib:
info['imagever'] = info['imagever'] + "." + ib
info['enigmaver'] = getEnigmaVersionString()
info['driverdate'] = getDriverDate()
info['kernelver'] = about.getKernelVersionString()
try:
from Tools.StbHardware import getFPVersion
except ImportError:
from Tools.DreamboxHardware import getFPVersion
try:
info['fp_version'] = getFPVersion()
except: # nosec # noqa: E722
info['fp_version'] = None
friendlychipsetdescription = _("Chipset")
friendlychipsettext = info['chipset'].replace("bcm", "Broadcom ")
if friendlychipsettext in ("7335", "7356", "7362", "73625", "7424", "7425", "7429"):
friendlychipsettext = "Broadcom " + friendlychipsettext
if not (info['fp_version'] is None or info['fp_version'] == 0):
friendlychipsetdescription = friendlychipsetdescription + " (" + _("Front processor version") + ")"
friendlychipsettext = friendlychipsettext + " (" + str(info['fp_version']) + ")"
info['friendlychipsetdescription'] = friendlychipsetdescription
info['friendlychipsettext'] = friendlychipsettext
info['tuners'] = []
for i in list(range(0, nimmanager.getSlotCount())):
print("[OpenWebif] -D- tuner '%d' '%s' '%s'" % (i, nimmanager.getNimName(i), nimmanager.getNim(i).getSlotName()))
info['tuners'].append({
"name": nimmanager.getNim(i).getSlotName(),
"type": nimmanager.getNimName(i) + " (" + nimmanager.getNim(i).getFriendlyType() + ")",
"rec": "",
"live": ""
})
info['ifaces'] = []
ifaces = iNetwork.getConfiguredAdapters()
for iface in ifaces:
info['ifaces'].append({
"name": iNetwork.getAdapterName(iface),
"friendlynic": getFriendlyNICChipSet(iface),
"linkspeed": getLinkSpeed(iface),
"mac": iNetwork.getAdapterAttribute(iface, "mac"),
"dhcp": iNetwork.getAdapterAttribute(iface, "dhcp"),
"ipv4method": getIPv4Method(iface),
"ip": formatIp(iNetwork.getAdapterAttribute(iface, "ip")),
"mask": formatIp(iNetwork.getAdapterAttribute(iface, "netmask")),
"v4prefix": sum([bin(int(x)).count('1') for x in formatIp(iNetwork.getAdapterAttribute(iface, "netmask")).split('.')]),
"gw": formatIp(iNetwork.getAdapterAttribute(iface, "gateway")),
"ipv6": getAdapterIPv6(iface)['addr'],
"ipmethod": getIPMethod(iface),
"firstpublic": getAdapterIPv6(iface)['firstpublic']
})
info['hdd'] = []
for hdd in harddiskmanager.hdd:
dev = hdd.findMount()
if dev:
stat = os.statvfs(dev)
free = stat.f_bavail * stat.f_frsize / 1048576.
else:
free = -1
if free <= 1024:
free = "%i %s" % (free, _("MB"))
else:
free = free / 1024.
free = "%.1f %s" % (free, _("GB"))
size = hdd.diskSize() * 1000000 / 1048576.
if size > 1048576:
size = "%.1f %s" % ((size / 1048576.), _("TB"))
elif size > 1024:
size = "%.1f %s" % ((size / 1024.), _("GB"))
else:
size = "%d %s" % (size, _("MB"))
iecsize = hdd.diskSize()
# Harddisks > 1000 decimal Gigabytes are labelled in TB
if iecsize > 1000000:
iecsize = (iecsize + 50000) // float(100000) / 10
# Omit decimal fraction if it is 0
if (iecsize % 1 > 0):
iecsize = "%.1f %s" % (iecsize, _("TB"))
else:
iecsize = "%d %s" % (iecsize, _("TB"))
# Round harddisk sizes beyond ~300GB to full tens: 320, 500, 640, 750GB
elif iecsize > 300000:
iecsize = "%d %s" % (((iecsize + 5000) // 10000 * 10), _("GB"))
# ... be more precise for media < ~300GB (Sticks, SSDs, CF, MMC, ...): 1, 2, 4, 8, 16 ... 256GB
elif iecsize > 1000:
iecsize = "%d %s" % (((iecsize + 500) // 1000), _("GB"))
else:
iecsize = "%d %s" % (iecsize, _("MB"))
info['hdd'].append({
"model": hdd.model(),
"capacity": size,
"labelled_capacity": iecsize,
"free": free,
"mount": dev,
"friendlycapacity": _("%s free / %s total") % (free, size + ' ("' + iecsize + '")')
})
info['shares'] = []
autofiles = ('/etc/auto.network', '/etc/auto.network_vti')
for autofs in autofiles:
if fileExists(autofs):
method = "autofs"
for line in open(autofs).readlines():
if not line.startswith('#'):
# Replace escaped spaces that can appear inside credentials with underscores
# Not elegant but we wouldn't want to expose credentials on the OWIF anyways
tmpline = line.replace("\ ", "_")
tmp = tmpline.split()
if not len(tmp) == 3:
continue
name = tmp[0].strip()
type = "unknown"
if "cifs" in tmp[1]:
# Linux still defaults to SMBv1
type = "SMBv1.0"
settings = tmp[1].split(",")
for setting in settings:
if setting.startswith("vers="):
type = setting.replace("vers=", "SMBv")
elif "nfs" in tmp[1]:
type = "NFS"
# Default is r/w
mode = _("r/w")
settings = tmp[1].split(",")
for setting in settings:
if setting == "ro":
mode = _("r/o")
uri = tmp[2]
parts = []
parts = tmp[2].split(':')
if parts[0] == "":
server = uri.split('/')[2]
uri = uri.strip()[1:]
else:
server = parts[0]
ipaddress = None
if server:
# Will fail on literal IPs
try:
# Try IPv6 first, as will Linux
if has_ipv6:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET6)
if tmpaddress:
ipaddress = "[" + list(tmpaddress)[0][4][0] + "]"
# Use IPv4 if IPv6 fails or is not present
if ipaddress is None:
tmpaddress = None
tmpaddress = getaddrinfo(server, 0, AF_INET)
if tmpaddress:
ipaddress = list(tmpaddress)[0][4][0]
except: # nosec # noqa: E722
pass
friendlyaddress = server
if ipaddress is not None and not ipaddress == server:
friendlyaddress = server + " (" + ipaddress + ")"
info['shares'].append({
"name": name,
"method": method,
"type": type,
"mode": mode,
"path": uri,
"host": server,
"ipaddress": ipaddress,
"friendlyaddress": friendlyaddress
})
# TODO: fstab
info['transcoding'] = TRANSCODING
info['EX'] = ''
if session:
try:
# gets all current stream clients for images using eStreamServer
# TODO: get tuner info for streams
# TODO: get recoding/timer info if more than one
info['streams'] = GetStreamInfo()
recs = NavigationInstance.instance.getRecordings()
if recs:
# only one stream
s_name = ''
if len(info['streams']) == 1:
sinfo = info['streams'][0]
s_name = sinfo["name"] + ' (' + sinfo["ip"] + ')'
print("[OpenWebif] -D- s_name '%s'" % s_name)
sname = ''
timers = []
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.isRunning() and not timer.justplay:
timers.append(removeBad(timer.service_ref.getServiceName()))
print("[OpenWebif] -D- timer '%s'" % timer.service_ref.getServiceName())
# TODO: more than one recording
if len(timers) == 1:
sname = timers[0]
if sname == '' and s_name != '':
sname = s_name
print("[OpenWebif] -D- recs count '%d'" % len(recs))
for rec in recs:
feinfo = rec.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
if frontendData is not None:
cur_info = feinfo.getTransponderData(True)
if cur_info:
nr = frontendData['tuner_number']
info['tuners'][nr]['rec'] = getOrbitalText(cur_info) + ' / ' + sname
service = session.nav.getCurrentService()
if service is not None:
sname = service.info().getName()
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
if frontendData is not None:
cur_info = feinfo.getTransponderData(True)
if cur_info:
nr = frontendData['tuner_number']
info['tuners'][nr]['live'] = getOrbitalText(cur_info) + ' / ' + sname
except Exception as error:
info['EX'] = error
info['timerpipzap'] = False
info['timerautoadjust'] = False
try:
timer = RecordTimerEntry(ServiceReference("1:0:1:0:0:0:0:0:0:0"), 0, 0, '', '', 0)
if hasattr(timer, "pipzap"):
info['timerpipzap'] = True
if hasattr(timer, "autoadjust"):
info['timerautoadjust'] = True
except Exception as error:
print("[OpenWebif] -D- RecordTimerEntry check %s" % error)
STATICBOXINFO = info
return info
def getStreamServiceAndEvent(ref):
sname = "(unknown service)"
eventname = ""
if not isinstance(ref, eServiceReference):
ref = eServiceReference(ref)
servicereference = ServiceReference(ref)
if servicereference:
sname = removeBad(servicereference.getServiceName())
epg = eEPGCache.getInstance()
event = epg and epg.lookupEventTime(ref, -1, 0)
if event:
eventname = event.getEventName()
return sname, eventname
def GetStreamInfo():
streams = []
nostreamServer = True
try:
from enigma import eStreamServer
streamServer = eStreamServer.getInstance()
if streamServer is not None:
nostreamServer = False
for x in streamServer.getConnectedClients():
servicename, eventname = getStreamServiceAndEvent(x[1])
if int(x[2]) == 0:
strtype = "S"
else:
strtype = "T"
streams.append({
"ref": x[1],
"name": servicename,
"eventname": eventname,
"ip": x[0], # TODO: ip Address format
"type": strtype
})
except Exception as error: # nosec # noqa: E722
# print("[OpenWebif] -D- no eStreamServer %s" % error)
pass
if nostreamServer:
from Plugins.Extensions.OpenWebif.controllers.stream import streamList
if len(streamList) > 0:
for stream in streamList:
servicename, eventname = getStreamServiceAndEvent(stream.ref)
streams.append({
"ref": stream.ref.toString(),
"name": servicename,
"eventname": eventname,
"ip": stream.clientIP,
"type": "S" # TODO : Transcoding
})
return streams
def getOrbitalText(cur_info):
if cur_info:
tunerType = cur_info.get('tuner_type')
if tunerType == "DVB-S":
pos = int(cur_info.get('orbital_position'))
return getOrb(pos)
if cur_info.get("system", -1) == 1:
tunerType += "2"
return tunerType
return ''
def getOrb(pos):
direction = _("E")
if pos > 1800:
pos = 3600 - pos
direction = _("W")
return "%d.%d° %s" % (pos / 10, pos % 10, direction)
def getFrontendStatus(session):
inf = {}
inf['tunertype'] = ""
inf['tunernumber'] = ""
inf['snr'] = ""
inf['snr_db'] = ""
inf['agc'] = ""
inf['ber'] = ""
from Screens.Standby import inStandby
if inStandby is None:
inf['inStandby'] = "false"
else:
inf['inStandby'] = "true"
service = session.nav.getCurrentService()
if service is None:
return inf
feinfo = service.frontendInfo()
frontendData = feinfo and feinfo.getAll(True)
if frontendData is not None:
inf['tunertype'] = frontendData.get("tuner_type", "UNKNOWN")
inf['tunernumber'] = frontendData.get("tuner_number")
frontendStatus = feinfo and feinfo.getFrontendStatus()
if frontendStatus is not None:
percent = frontendStatus.get("tuner_signal_quality")
if percent is not None:
inf['snr'] = int(percent * 100 / 65535)
inf['snr_db'] = inf['snr']
percent = frontendStatus.get("tuner_signal_quality_db")
if percent is not None:
inf['snr_db'] = "%3.02f" % (percent / 100.0)
percent = frontendStatus.get("tuner_signal_power")
if percent is not None:
inf['agc'] = int(percent * 100 / 65535)
percent = frontendStatus.get("tuner_bit_error_rate")
if percent is not None:
inf['ber'] = int(percent * 100 / 65535)
return inf
def getCurrentTime():
t = time.localtime()
return {
"status": True,
"time": "%2d:%02d:%02d" % (t.tm_hour, t.tm_min, t.tm_sec)
}
def getStatusInfo(self):
# Get Current Volume and Mute Status
vcontrol = eDVBVolumecontrol.getInstance()
statusinfo = {
'volume': vcontrol.getVolume(),
'muted': vcontrol.isMuted(),
'transcoding': TRANSCODING,
'currservice_filename': "",
'currservice_id': -1,
}
# Get currently running Service
event = None
serviceref = NavigationInstance.instance.getCurrentlyPlayingServiceReference()
serviceref_string = None
currservice_station = None
if serviceref is not None:
serviceHandler = eServiceCenter.getInstance()
serviceHandlerInfo = serviceHandler.info(serviceref)
service = NavigationInstance.instance.getCurrentService()
serviceinfo = service and service.info()
event = serviceinfo and serviceinfo.getEvent(0)
serviceref_string = serviceref.toString()
currservice_station = removeBad(serviceHandlerInfo.getName(serviceref))
else:
event = None
serviceHandlerInfo = None
if event is not None:
# (begin, end, name, description, eit)
curEvent = parseEvent(event)
begin_timestamp = int(curEvent[0]) + (config.recording.margin_before.value * 60)
end_timestamp = int(curEvent[1]) - (config.recording.margin_after.value * 60)
statusinfo['currservice_name'] = removeBad(curEvent[2])
statusinfo['currservice_serviceref'] = serviceref_string
statusinfo['currservice_begin'] = time.strftime("%H:%M", (time.localtime(begin_timestamp)))
statusinfo['currservice_begin_timestamp'] = begin_timestamp
statusinfo['currservice_end'] = time.strftime("%H:%M", (time.localtime(end_timestamp)))
statusinfo['currservice_end_timestamp'] = end_timestamp
desc = curEvent[3]
if six.PY2:
desc = desc.decode('utf-8')
if len(desc) > 220:
desc = desc + u"..."
if six.PY2:
desc = desc.encode('utf-8')
statusinfo['currservice_description'] = desc
statusinfo['currservice_station'] = currservice_station
if statusinfo['currservice_serviceref'].startswith('1:0:0'):
statusinfo['currservice_filename'] = '/' + '/'.join(serviceref_string.split("/")[1:])
full_desc = statusinfo['currservice_name'] + '\n'
full_desc += statusinfo['currservice_begin'] + " - " + statusinfo['currservice_end'] + '\n\n'
full_desc += removeBad2(event.getExtendedDescription())
statusinfo['currservice_fulldescription'] = full_desc
statusinfo['currservice_id'] = curEvent[4]
else:
statusinfo['currservice_name'] = "N/A"
statusinfo['currservice_begin'] = ""
statusinfo['currservice_end'] = ""
statusinfo['currservice_description'] = ""
statusinfo['currservice_fulldescription'] = "N/A"
if serviceref:
statusinfo['currservice_serviceref'] = serviceref_string
if statusinfo['currservice_serviceref'].startswith('1:0:0') or statusinfo['currservice_serviceref'].startswith('4097:0:0'):
this_path = '/' + '/'.join(serviceref_string.split("/")[1:])
if os.path.exists(this_path):
statusinfo['currservice_filename'] = this_path
if serviceHandlerInfo:
statusinfo['currservice_station'] = currservice_station
elif serviceref_string.find("http") != -1:
statusinfo['currservice_station'] = serviceref_string.replace('%3a', ':')[serviceref_string.find("http"):]
else:
statusinfo['currservice_station'] = "N/A"
# Get Standby State
from Screens.Standby import inStandby
if inStandby is None:
statusinfo['inStandby'] = "false"
else:
statusinfo['inStandby'] = "true"
# Get recording state
recs = NavigationInstance.instance.getRecordings()
if recs:
statusinfo['isRecording'] = "true"
statusinfo['Recording_list'] = "\n"
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.state == TimerEntry.StateRunning:
if not timer.justplay:
statusinfo['Recording_list'] += removeBad(timer.service_ref.getServiceName()) + ": " + timer.name + "\n"
if statusinfo['Recording_list'] == "\n":
statusinfo['isRecording'] = "false"
else:
statusinfo['isRecording'] = "false"
# Get streaminfo
streams = GetStreamInfo()
Streaming_list = []
try:
# TODO move this code to javascript getStatusinfo
for stream in streams:
st = ''
s = stream["name"]
e = stream["eventname"]
i = stream["ip"]
del stream
if i is not None:
st += i + ": "
st += s + ' - ' + e
if st != '':
Streaming_list.append(st)
except Exception as error: # nosec # noqa: E722
# print("[OpenWebif] -D- build Streaming_list %s" % error)
pass
if len(streams) > 0:
statusinfo['Streaming_list'] = '\n'.join(Streaming_list)
statusinfo['isStreaming'] = 'true'
else:
statusinfo['Streaming_list'] = ''
statusinfo['isStreaming'] = 'false'
return statusinfo
def getAlternativeChannels(service):
alternativeServices = eServiceCenter.getInstance().list(eServiceReference(service))
return alternativeServices and alternativeServices.getContent("S", True)
def GetWithAlternative(service, onlyFirst=True):
if service.startswith('1:134:'):
channels = getAlternativeChannels(service)
if channels:
if onlyFirst:
return channels[0]
else:
return channels
if onlyFirst:
return service
else:
return None
def getPipStatus():
return int(getInfo()['grabpip'] and hasattr(InfoBar.instance, 'session') and InfoBar.instance.session.pipshown)
def testPipStatus(self):
pipinfo = {
'pip': getPipStatus(),
}
return pipinfo
|
gpl-3.0
|
citassa1985/youtube-dl
|
youtube_dl/extractor/goldenmoustache.py
|
159
|
1739
|
from __future__ import unicode_literals
from .common import InfoExtractor
class GoldenMoustacheIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?goldenmoustache\.com/(?P<display_id>[\w-]+)-(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.goldenmoustache.com/suricate-le-poker-3700/',
'md5': '0f904432fa07da5054d6c8beb5efb51a',
'info_dict': {
'id': '3700',
'ext': 'mp4',
'title': 'Suricate - Le Poker',
'description': 'md5:3d1f242f44f8c8cb0a106f1fd08e5dc9',
'thumbnail': 're:^https?://.*\.jpg$',
}
}, {
'url': 'http://www.goldenmoustache.com/le-lab-tout-effacer-mc-fly-et-carlito-55249/',
'md5': '27f0c50fb4dd5f01dc9082fc67cd5700',
'info_dict': {
'id': '55249',
'ext': 'mp4',
'title': 'Le LAB - Tout Effacer (Mc Fly et Carlito)',
'description': 'md5:9b7fbf11023fb2250bd4b185e3de3b2a',
'thumbnail': 're:^https?://.*\.(?:png|jpg)$',
}
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'data-src-type="mp4" data-src="([^"]+)"', webpage, 'video URL')
title = self._html_search_regex(
r'<title>(.*?)(?: - Golden Moustache)?</title>', webpage, 'title')
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
return {
'id': video_id,
'url': video_url,
'ext': 'mp4',
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
unlicense
|
cripplet/django_iin_lookup
|
tests.py
|
1
|
1075
|
import requests
from django.test import TestCase
from models import IINInfo
class TestIINInfo(TestCase):
def test_iin(self):
self.assertEqual(IINInfo.objects.count(), 0)
info = IINInfo.objects.fetch_iin(iin='546116')
self.assertEqual(IINInfo.objects.count(), 1)
self.assertEqual(info.bin, '546116')
self.assertEqual(info.iin, info.bin)
self.assertEqual(info.card_brand, 'MASTERCARD')
self.assertEqual(info.card_type, 'D')
info = IINInfo.objects.fetch_iin(iin='546116')
self.assertEqual(info.card_brand, 'MASTERCARD')
self.assertEqual(info.card_type, 'D')
info = IINInfo.objects.fetch_iin(iin='601100')
self.assertEqual(IINInfo.objects.count(), 2)
self.assertEqual(info.card_brand, 'DISCOVER')
self.assertEqual(info.card_type, 'C')
self.assertRaises(ValueError, IINInfo.objects.fetch_iin, 5)
self.assertRaises(ValueError, IINInfo.objects.fetch_iin, '5')
self.assertRaises(ValueError, IINInfo.objects.fetch_iin, '5461160')
self.assertRaises(ValueError, IINInfo.objects.fetch_iin, '546116a')
self.assertEqual(IINInfo.objects.count(), 2)
|
mit
|
gusai-francelabs/datafari
|
windows/python/Lib/site-packages/pip/_vendor/_markerlib/markers.py
|
1769
|
3979
|
# -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
|
apache-2.0
|
arantebillywilson/python-snippets
|
microblog/flask/lib/python3.5/site-packages/sqlalchemy/orm/state.py
|
32
|
27492
|
# orm/state.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines instrumentation of instances.
This module is usually not directly visible to user applications, but
defines a large part of the ORM's interactivity.
"""
import weakref
from .. import util
from .. import inspection
from . import exc as orm_exc, interfaces
from .path_registry import PathRegistry
from .base import PASSIVE_NO_RESULT, SQL_OK, NEVER_SET, ATTR_WAS_SET, \
NO_VALUE, PASSIVE_NO_INITIALIZE, INIT_OK, PASSIVE_OFF
from . import base
@inspection._self_inspects
class InstanceState(interfaces.InspectionAttr):
"""tracks state information at the instance level.
The :class:`.InstanceState` is a key object used by the
SQLAlchemy ORM in order to track the state of an object;
it is created the moment an object is instantiated, typically
as a result of :term:`instrumentation` which SQLAlchemy applies
to the ``__init__()`` method of the class.
:class:`.InstanceState` is also a semi-public object,
available for runtime inspection as to the state of a
mapped instance, including information such as its current
status within a particular :class:`.Session` and details
about data on individual attributes. The public API
in order to acquire a :class:`.InstanceState` object
is to use the :func:`.inspect` system::
>>> from sqlalchemy import inspect
>>> insp = inspect(some_mapped_object)
.. seealso::
:ref:`core_inspection_toplevel`
"""
session_id = None
key = None
runid = None
load_options = util.EMPTY_SET
load_path = ()
insert_order = None
_strong_obj = None
modified = False
expired = False
_deleted = False
_load_pending = False
is_instance = True
callables = ()
"""A namespace where a per-state loader callable can be associated.
In SQLAlchemy 1.0, this is only used for lazy loaders / deferred
loaders that were set up via query option.
Previously, callables was used also to indicate expired attributes
by storing a link to the InstanceState itself in this dictionary.
This role is now handled by the expired_attributes set.
"""
def __init__(self, obj, manager):
self.class_ = obj.__class__
self.manager = manager
self.obj = weakref.ref(obj, self._cleanup)
self.committed_state = {}
self.expired_attributes = set()
expired_attributes = None
"""The set of keys which are 'expired' to be loaded by
the manager's deferred scalar loader, assuming no pending
changes.
see also the ``unmodified`` collection which is intersected
against this set when a refresh operation occurs."""
@util.memoized_property
def attrs(self):
"""Return a namespace representing each attribute on
the mapped object, including its current value
and history.
The returned object is an instance of :class:`.AttributeState`.
This object allows inspection of the current data
within an attribute as well as attribute history
since the last flush.
"""
return util.ImmutableProperties(
dict(
(key, AttributeState(self, key))
for key in self.manager
)
)
@property
def transient(self):
"""Return true if the object is :term:`transient`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and \
not self._attached
@property
def pending(self):
"""Return true if the object is :term:`pending`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is None and \
self._attached
@property
def deleted(self):
"""Return true if the object is :term:`deleted`.
An object that is in the deleted state is guaranteed to
not be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`; however if the session's transaction is rolled
back, the object will be restored to the persistent state and
the identity map.
.. note::
The :attr:`.InstanceState.deleted` attribute refers to a specific
state of the object that occurs between the "persistent" and
"detached" states; once the object is :term:`detached`, the
:attr:`.InstanceState.deleted` attribute **no longer returns
True**; in order to detect that a state was deleted, regardless
of whether or not the object is associated with a :class:`.Session`,
use the :attr:`.InstanceState.was_deleted` accessor.
.. versionadded: 1.1
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and \
self._attached and self._deleted
@property
def was_deleted(self):
"""Return True if this object is or was previously in the
"deleted" state and has not been reverted to persistent.
This flag returns True once the object was deleted in flush.
When the object is expunged from the session either explicitly
or via transaction commit and enters the "detached" state,
this flag will continue to report True.
.. versionadded:: 1.1 - added a local method form of
:func:`.orm.util.was_deleted`.
.. seealso::
:attr:`.InstanceState.deleted` - refers to the "deleted" state
:func:`.orm.util.was_deleted` - standalone function
:ref:`session_object_states`
"""
return self._deleted
@property
def persistent(self):
"""Return true if the object is :term:`persistent`.
An object that is in the persistent state is guaranteed to
be within the :attr:`.Session.identity_map` of its parent
:class:`.Session`.
.. versionchanged:: 1.1 The :attr:`.InstanceState.persistent`
accessor no longer returns True for an object that was
"deleted" within a flush; use the :attr:`.InstanceState.deleted`
accessor to detect this state. This allows the "persistent"
state to guarantee membership in the identity map.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and \
self._attached and not self._deleted
@property
def detached(self):
"""Return true if the object is :term:`detached`.
.. seealso::
:ref:`session_object_states`
"""
return self.key is not None and not self._attached
@property
@util.dependencies("sqlalchemy.orm.session")
def _attached(self, sessionlib):
return self.session_id is not None and \
self.session_id in sessionlib._sessions
@property
@util.dependencies("sqlalchemy.orm.session")
def session(self, sessionlib):
"""Return the owning :class:`.Session` for this instance,
or ``None`` if none available.
Note that the result here can in some cases be *different*
from that of ``obj in session``; an object that's been deleted
will report as not ``in session``, however if the transaction is
still in progress, this attribute will still refer to that session.
Only when the transaction is completed does the object become
fully detached under normal circumstances.
"""
return sessionlib._state_session(self)
@property
def object(self):
"""Return the mapped object represented by this
:class:`.InstanceState`."""
return self.obj()
@property
def identity(self):
"""Return the mapped identity of the mapped object.
This is the primary key identity as persisted by the ORM
which can always be passed directly to
:meth:`.Query.get`.
Returns ``None`` if the object has no primary key identity.
.. note::
An object which is :term:`transient` or :term:`pending`
does **not** have a mapped identity until it is flushed,
even if its attributes include primary key values.
"""
if self.key is None:
return None
else:
return self.key[1]
@property
def identity_key(self):
"""Return the identity key for the mapped object.
This is the key used to locate the object within
the :attr:`.Session.identity_map` mapping. It contains
the identity as returned by :attr:`.identity` within it.
"""
# TODO: just change .key to .identity_key across
# the board ? probably
return self.key
@util.memoized_property
def parents(self):
return {}
@util.memoized_property
def _pending_mutations(self):
return {}
@util.memoized_property
def mapper(self):
"""Return the :class:`.Mapper` used for this mapepd object."""
return self.manager.mapper
@property
def has_identity(self):
"""Return ``True`` if this object has an identity key.
This should always have the same value as the
expression ``state.persistent or state.detached``.
"""
return bool(self.key)
@classmethod
def _detach_states(self, states, session, to_transient=False):
persistent_to_detached = \
session.dispatch.persistent_to_detached or None
deleted_to_detached = \
session.dispatch.deleted_to_detached or None
pending_to_transient = \
session.dispatch.pending_to_transient or None
persistent_to_transient = \
session.dispatch.persistent_to_transient or None
for state in states:
deleted = state._deleted
pending = state.key is None
persistent = not pending and not deleted
state.session_id = None
if to_transient and state.key:
del state.key
if persistent:
if to_transient:
if persistent_to_transient is not None:
obj = state.obj()
if obj is not None:
persistent_to_transient(session, obj)
elif persistent_to_detached is not None:
obj = state.obj()
if obj is not None:
persistent_to_detached(session, obj)
elif deleted and deleted_to_detached is not None:
obj = state.obj()
if obj is not None:
deleted_to_detached(session, obj)
elif pending and pending_to_transient is not None:
obj = state.obj()
if obj is not None:
pending_to_transient(session, obj)
state._strong_obj = None
def _detach(self, session=None):
if session:
InstanceState._detach_states([self], session)
else:
self.session_id = self._strong_obj = None
def _dispose(self):
self._detach()
del self.obj
def _cleanup(self, ref):
"""Weakref callback cleanup.
This callable cleans out the state when it is being garbage
collected.
this _cleanup **assumes** that there are no strong refs to us!
Will not work otherwise!
"""
instance_dict = self._instance_dict()
if instance_dict is not None:
instance_dict._fast_discard(self)
del self._instance_dict
# we can't possibly be in instance_dict._modified
# b.c. this is weakref cleanup only, that set
# is strong referencing!
# assert self not in instance_dict._modified
self.session_id = self._strong_obj = None
del self.obj
def obj(self):
return None
@property
def dict(self):
"""Return the instance dict used by the object.
Under normal circumstances, this is always synonymous
with the ``__dict__`` attribute of the mapped object,
unless an alternative instrumentation system has been
configured.
In the case that the actual object has been garbage
collected, this accessor returns a blank dictionary.
"""
o = self.obj()
if o is not None:
return base.instance_dict(o)
else:
return {}
def _initialize_instance(*mixed, **kwargs):
self, instance, args = mixed[0], mixed[1], mixed[2:] # noqa
manager = self.manager
manager.dispatch.init(self, args, kwargs)
try:
return manager.original_init(*mixed[1:], **kwargs)
except:
with util.safe_reraise():
manager.dispatch.init_failure(self, args, kwargs)
def get_history(self, key, passive):
return self.manager[key].impl.get_history(self, self.dict, passive)
def get_impl(self, key):
return self.manager[key].impl
def _get_pending_mutation(self, key):
if key not in self._pending_mutations:
self._pending_mutations[key] = PendingCollection()
return self._pending_mutations[key]
def __getstate__(self):
state_dict = {'instance': self.obj()}
state_dict.update(
(k, self.__dict__[k]) for k in (
'committed_state', '_pending_mutations', 'modified',
'expired', 'callables', 'key', 'parents', 'load_options',
'class_', 'expired_attributes'
) if k in self.__dict__
)
if self.load_path:
state_dict['load_path'] = self.load_path.serialize()
state_dict['manager'] = self.manager._serialize(self, state_dict)
return state_dict
def __setstate__(self, state_dict):
inst = state_dict['instance']
if inst is not None:
self.obj = weakref.ref(inst, self._cleanup)
self.class_ = inst.__class__
else:
# None being possible here generally new as of 0.7.4
# due to storage of state in "parents". "class_"
# also new.
self.obj = None
self.class_ = state_dict['class_']
self.committed_state = state_dict.get('committed_state', {})
self._pending_mutations = state_dict.get('_pending_mutations', {})
self.parents = state_dict.get('parents', {})
self.modified = state_dict.get('modified', False)
self.expired = state_dict.get('expired', False)
if 'callables' in state_dict:
self.callables = state_dict['callables']
try:
self.expired_attributes = state_dict['expired_attributes']
except KeyError:
self.expired_attributes = set()
# 0.9 and earlier compat
for k in list(self.callables):
if self.callables[k] is self:
self.expired_attributes.add(k)
del self.callables[k]
self.__dict__.update([
(k, state_dict[k]) for k in (
'key', 'load_options',
) if k in state_dict
])
if 'load_path' in state_dict:
self.load_path = PathRegistry.\
deserialize(state_dict['load_path'])
state_dict['manager'](self, inst, state_dict)
def _reset(self, dict_, key):
"""Remove the given attribute and any
callables associated with it."""
old = dict_.pop(key, None)
if old is not None and self.manager[key].impl.collection:
self.manager[key].impl._invalidate_collection(old)
self.expired_attributes.discard(key)
if self.callables:
self.callables.pop(key, None)
def _copy_callables(self, from_):
if 'callables' in from_.__dict__:
self.callables = dict(from_.callables)
@classmethod
def _instance_level_callable_processor(cls, manager, fn, key):
impl = manager[key].impl
if impl.collection:
def _set_callable(state, dict_, row):
if 'callables' not in state.__dict__:
state.callables = {}
old = dict_.pop(key, None)
if old is not None:
impl._invalidate_collection(old)
state.callables[key] = fn
else:
def _set_callable(state, dict_, row):
if 'callables' not in state.__dict__:
state.callables = {}
state.callables[key] = fn
return _set_callable
def _expire(self, dict_, modified_set):
self.expired = True
if self.modified:
modified_set.discard(self)
self.committed_state.clear()
self.modified = False
self._strong_obj = None
if '_pending_mutations' in self.__dict__:
del self.__dict__['_pending_mutations']
if 'parents' in self.__dict__:
del self.__dict__['parents']
self.expired_attributes.update(
[impl.key for impl in self.manager._scalar_loader_impls
if impl.expire_missing or impl.key in dict_]
)
if self.callables:
for k in self.expired_attributes.intersection(self.callables):
del self.callables[k]
for k in self.manager._collection_impl_keys.intersection(dict_):
collection = dict_.pop(k)
collection._sa_adapter.invalidated = True
for key in self.manager._all_key_set.intersection(dict_):
del dict_[key]
self.manager.dispatch.expire(self, None)
def _expire_attributes(self, dict_, attribute_names, no_loader=False):
pending = self.__dict__.get('_pending_mutations', None)
callables = self.callables
for key in attribute_names:
impl = self.manager[key].impl
if impl.accepts_scalar_loader:
if no_loader and (
impl.callable_ or
key in callables
):
continue
self.expired_attributes.add(key)
if callables and key in callables:
del callables[key]
old = dict_.pop(key, None)
if impl.collection and old is not None:
impl._invalidate_collection(old)
self.committed_state.pop(key, None)
if pending:
pending.pop(key, None)
self.manager.dispatch.expire(self, attribute_names)
def _load_expired(self, state, passive):
"""__call__ allows the InstanceState to act as a deferred
callable for loading expired attributes, which is also
serializable (picklable).
"""
if not passive & SQL_OK:
return PASSIVE_NO_RESULT
toload = self.expired_attributes.\
intersection(self.unmodified)
self.manager.deferred_scalar_loader(self, toload)
# if the loader failed, or this
# instance state didn't have an identity,
# the attributes still might be in the callables
# dict. ensure they are removed.
self.expired_attributes.clear()
return ATTR_WAS_SET
@property
def unmodified(self):
"""Return the set of keys which have no uncommitted changes"""
return set(self.manager).difference(self.committed_state)
def unmodified_intersection(self, keys):
"""Return self.unmodified.intersection(keys)."""
return set(keys).intersection(self.manager).\
difference(self.committed_state)
@property
def unloaded(self):
"""Return the set of keys which do not have a loaded value.
This includes expired attributes and any other attribute that
was never populated or modified.
"""
return set(self.manager).\
difference(self.committed_state).\
difference(self.dict)
@property
def _unloaded_non_object(self):
return self.unloaded.intersection(
attr for attr in self.manager
if self.manager[attr].impl.accepts_scalar_loader
)
def _instance_dict(self):
return None
def _modified_event(
self, dict_, attr, previous, collection=False, force=False):
if not attr.send_modified_events:
return
if attr.key not in self.committed_state or force:
if collection:
if previous is NEVER_SET:
if attr.key in dict_:
previous = dict_[attr.key]
if previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
self.committed_state[attr.key] = previous
# assert self._strong_obj is None or self.modified
if (self.session_id and self._strong_obj is None) \
or not self.modified:
self.modified = True
instance_dict = self._instance_dict()
if instance_dict:
instance_dict._modified.add(self)
# only create _strong_obj link if attached
# to a session
inst = self.obj()
if self.session_id:
self._strong_obj = inst
if inst is None:
raise orm_exc.ObjectDereferencedError(
"Can't emit change event for attribute '%s' - "
"parent object of type %s has been garbage "
"collected."
% (
self.manager[attr.key],
base.state_class_str(self)
))
def _commit(self, dict_, keys):
"""Commit attributes.
This is used by a partial-attribute load operation to mark committed
those attributes which were refreshed from the database.
Attributes marked as "expired" can potentially remain "expired" after
this step if a value was not populated in state.dict.
"""
for key in keys:
self.committed_state.pop(key, None)
self.expired = False
self.expired_attributes.difference_update(
set(keys).intersection(dict_))
# the per-keys commit removes object-level callables,
# while that of commit_all does not. it's not clear
# if this behavior has a clear rationale, however tests do
# ensure this is what it does.
if self.callables:
for key in set(self.callables).\
intersection(keys).\
intersection(dict_):
del self.callables[key]
def _commit_all(self, dict_, instance_dict=None):
"""commit all attributes unconditionally.
This is used after a flush() or a full load/refresh
to remove all pending state from the instance.
- all attributes are marked as "committed"
- the "strong dirty reference" is removed
- the "modified" flag is set to False
- any "expired" markers for scalar attributes loaded are removed.
- lazy load callables for objects / collections *stay*
Attributes marked as "expired" can potentially remain
"expired" after this step if a value was not populated in state.dict.
"""
self._commit_all_states([(self, dict_)], instance_dict)
@classmethod
def _commit_all_states(self, iter, instance_dict=None):
"""Mass / highly inlined version of commit_all()."""
for state, dict_ in iter:
state_dict = state.__dict__
state.committed_state.clear()
if '_pending_mutations' in state_dict:
del state_dict['_pending_mutations']
state.expired_attributes.difference_update(dict_)
if instance_dict and state.modified:
instance_dict._modified.discard(state)
state.modified = state.expired = False
state._strong_obj = None
class AttributeState(object):
"""Provide an inspection interface corresponding
to a particular attribute on a particular mapped object.
The :class:`.AttributeState` object is accessed
via the :attr:`.InstanceState.attrs` collection
of a particular :class:`.InstanceState`::
from sqlalchemy import inspect
insp = inspect(some_mapped_object)
attr_state = insp.attrs.some_attribute
"""
def __init__(self, state, key):
self.state = state
self.key = key
@property
def loaded_value(self):
"""The current value of this attribute as loaded from the database.
If the value has not been loaded, or is otherwise not present
in the object's dictionary, returns NO_VALUE.
"""
return self.state.dict.get(self.key, NO_VALUE)
@property
def value(self):
"""Return the value of this attribute.
This operation is equivalent to accessing the object's
attribute directly or via ``getattr()``, and will fire
off any pending loader callables if needed.
"""
return self.state.manager[self.key].__get__(
self.state.obj(), self.state.class_)
@property
def history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method will **not** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
:func:`.attributes.get_history` - underlying function
"""
return self.state.get_history(self.key,
PASSIVE_NO_INITIALIZE)
def load_history(self):
"""Return the current pre-flush change history for
this attribute, via the :class:`.History` interface.
This method **will** emit loader callables if the value of the
attribute is unloaded.
.. seealso::
:attr:`.AttributeState.history`
:func:`.attributes.get_history` - underlying function
.. versionadded:: 0.9.0
"""
return self.state.get_history(self.key,
PASSIVE_OFF ^ INIT_OK)
class PendingCollection(object):
"""A writable placeholder for an unloaded collection.
Stores items appended to and removed from a collection that has not yet
been loaded. When the collection is loaded, the changes stored in
PendingCollection are applied to it to produce the final result.
"""
def __init__(self):
self.deleted_items = util.IdentitySet()
self.added_items = util.OrderedIdentitySet()
def append(self, value):
if value in self.deleted_items:
self.deleted_items.remove(value)
else:
self.added_items.add(value)
def remove(self, value):
if value in self.added_items:
self.added_items.remove(value)
else:
self.deleted_items.add(value)
|
mit
|
EverlyWell/redash
|
redash/authentication/saml_auth.py
|
13
|
5120
|
import logging
import requests
from flask import redirect, url_for, Blueprint, request
from redash.authentication.google_oauth import create_and_login_user
from redash.authentication.org_resolving import current_org
from redash import settings
from saml2 import BINDING_HTTP_POST, BINDING_HTTP_REDIRECT, entity
from saml2.client import Saml2Client
from saml2.config import Config as Saml2Config
from saml2.saml import NAMEID_FORMAT_TRANSIENT
logger = logging.getLogger('saml_auth')
blueprint = Blueprint('saml_auth', __name__)
def get_saml_client():
"""
Return SAML configuration.
The configuration is a hash for use by saml2.config.Config
"""
if settings.SAML_CALLBACK_SERVER_NAME:
acs_url = settings.SAML_CALLBACK_SERVER_NAME + url_for("saml_auth.idp_initiated")
else:
acs_url = url_for("saml_auth.idp_initiated", _external=True)
# NOTE:
# Ideally, this should fetch the metadata and pass it to
# PySAML2 via the "inline" metadata type.
# However, this method doesn't seem to work on PySAML2 v2.4.0
#
# SAML metadata changes very rarely. On a production system,
# this data should be cached as approprate for your production system.
if settings.SAML_METADATA_URL != "":
rv = requests.get(settings.SAML_METADATA_URL)
import tempfile
tmp = tempfile.NamedTemporaryFile()
f = open(tmp.name, 'w')
f.write(rv.text)
f.close()
metadata_path = tmp.name
else:
metadata_path = settings.SAML_LOCAL_METADATA_PATH
saml_settings = {
'metadata': {
# 'inline': metadata,
"local": [metadata_path]
},
'service': {
'sp': {
'endpoints': {
'assertion_consumer_service': [
(acs_url, BINDING_HTTP_REDIRECT),
(acs_url, BINDING_HTTP_POST)
],
},
# Don't verify that the incoming requests originate from us via
# the built-in cache for authn request ids in pysaml2
'allow_unsolicited': True,
# Don't sign authn requests, since signed requests only make
# sense in a situation where you control both the SP and IdP
'authn_requests_signed': False,
'logout_requests_signed': True,
'want_assertions_signed': True,
'want_response_signed': False,
},
},
}
if settings.SAML_ENTITY_ID != "":
saml_settings['entityid'] = settings.SAML_ENTITY_ID
spConfig = Saml2Config()
spConfig.load(saml_settings)
spConfig.allow_unknown_attributes = True
saml_client = Saml2Client(config=spConfig)
if settings.SAML_METADATA_URL != "":
tmp.close()
return saml_client
@blueprint.route("/saml/callback", methods=['POST'])
def idp_initiated():
saml_client = get_saml_client()
authn_response = saml_client.parse_authn_request_response(
request.form['SAMLResponse'],
entity.BINDING_HTTP_POST)
authn_response.get_identity()
user_info = authn_response.get_subject()
email = user_info.text
name = "%s %s" % (authn_response.ava['FirstName'][0], authn_response.ava['LastName'][0])
# This is what as known as "Just In Time (JIT) provisioning".
# What that means is that, if a user in a SAML assertion
# isn't in the user store, we create that user first, then log them in
user = create_and_login_user(current_org, name, email)
if 'RedashGroups' in authn_response.ava:
group_names = authn_response.ava.get('RedashGroups')
user.update_group_assignments(group_names)
url = url_for('redash.index')
return redirect(url)
@blueprint.route("/saml/login")
def sp_initiated():
if not settings.SAML_METADATA_URL and not settings.SAML_LOCAL_METADATA_PATH:
logger.error("Cannot invoke saml endpoint without metadata url in settings.")
return redirect(url_for('redash.index'))
saml_client = get_saml_client()
if settings.SAML_NAMEID_FORMAT != "":
nameid_format = settings.SAML_NAMEID_FORMAT
else:
nameid_format = NAMEID_FORMAT_TRANSIENT
reqid, info = saml_client.prepare_for_authenticate(nameid_format=nameid_format)
redirect_url = None
# Select the IdP URL to send the AuthN request to
for key, value in info['headers']:
if key is 'Location':
redirect_url = value
response = redirect(redirect_url, code=302)
# NOTE:
# I realize I _technically_ don't need to set Cache-Control or Pragma:
# http://stackoverflow.com/a/5494469
# However, Section 3.2.3.2 of the SAML spec suggests they are set:
# http://docs.oasis-open.org/security/saml/v2.0/saml-bindings-2.0-os.pdf
# We set those headers here as a "belt and suspenders" approach,
# since enterprise environments don't always conform to RFCs
response.headers['Cache-Control'] = 'no-cache, no-store'
response.headers['Pragma'] = 'no-cache'
return response
|
bsd-2-clause
|
zentner-kyle/servo
|
python/mozlog/tests/test_logger.py
|
44
|
11335
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import json
import socket
import threading
import time
import unittest
import mozfile
import mozlog
class ListHandler(mozlog.Handler):
"""Mock handler appends messages to a list for later inspection."""
def __init__(self):
mozlog.Handler.__init__(self)
self.messages = []
def emit(self, record):
self.messages.append(self.format(record))
class TestLogging(unittest.TestCase):
"""Tests behavior of basic mozlog api."""
def test_logger_defaults(self):
"""Tests the default logging format and behavior."""
default_logger = mozlog.getLogger('default.logger')
self.assertEqual(default_logger.name, 'default.logger')
self.assertEqual(len(default_logger.handlers), 1)
self.assertTrue(isinstance(default_logger.handlers[0],
mozlog.StreamHandler))
f = mozfile.NamedTemporaryFile()
list_logger = mozlog.getLogger('file.logger',
handler=mozlog.FileHandler(f.name))
self.assertEqual(len(list_logger.handlers), 1)
self.assertTrue(isinstance(list_logger.handlers[0],
mozlog.FileHandler))
f.close()
self.assertRaises(ValueError, mozlog.getLogger,
'file.logger', handler=ListHandler())
def test_timestamps(self):
"""Verifies that timestamps are included when asked for."""
log_name = 'test'
handler = ListHandler()
handler.setFormatter(mozlog.MozFormatter())
log = mozlog.getLogger(log_name, handler=handler)
log.info('no timestamp')
self.assertTrue(handler.messages[-1].startswith('%s ' % log_name))
handler.setFormatter(mozlog.MozFormatter(include_timestamp=True))
log.info('timestamp')
# Just verify that this raises no exceptions.
datetime.datetime.strptime(handler.messages[-1][:23],
'%Y-%m-%d %H:%M:%S,%f')
class TestStructuredLogging(unittest.TestCase):
"""Tests structured output in mozlog."""
def setUp(self):
self.handler = ListHandler()
self.handler.setFormatter(mozlog.JSONFormatter())
self.logger = mozlog.MozLogger('test.Logger')
self.logger.addHandler(self.handler)
self.logger.setLevel(mozlog.DEBUG)
def check_messages(self, expected, actual):
"""Checks actual for equality with corresponding fields in actual.
The actual message should contain all fields in expected, and
should be identical, with the exception of the timestamp field.
The actual message should contain no fields other than the timestamp
field and those present in expected."""
self.assertTrue(isinstance(actual['_time'], (int, long)))
for k, v in expected.items():
self.assertEqual(v, actual[k])
for k in actual.keys():
if k != '_time':
self.assertTrue(expected.get(k) is not None)
def test_structured_output(self):
self.logger.log_structured('test_message',
{'_level': mozlog.INFO,
'_message': 'message one'})
self.logger.log_structured('test_message',
{'_level': mozlog.INFO,
'_message': 'message two'})
self.logger.log_structured('error_message',
{'_level': mozlog.ERROR,
'diagnostic': 'unexpected error'})
message_one_expected = {'_namespace': 'test.Logger',
'_level': 'INFO',
'_message': 'message one',
'action': 'test_message'}
message_two_expected = {'_namespace': 'test.Logger',
'_level': 'INFO',
'_message': 'message two',
'action': 'test_message'}
message_three_expected = {'_namespace': 'test.Logger',
'_level': 'ERROR',
'diagnostic': 'unexpected error',
'action': 'error_message'}
message_one_actual = json.loads(self.handler.messages[0])
message_two_actual = json.loads(self.handler.messages[1])
message_three_actual = json.loads(self.handler.messages[2])
self.check_messages(message_one_expected, message_one_actual)
self.check_messages(message_two_expected, message_two_actual)
self.check_messages(message_three_expected, message_three_actual)
def test_unstructured_conversion(self):
""" Tests that logging to a logger with a structured formatter
via the traditional logging interface works as expected. """
self.logger.info('%s %s %d', 'Message', 'number', 1)
self.logger.error('Message number 2')
self.logger.debug('Message with %s', 'some extras',
extra={'params': {'action': 'mozlog_test_output',
'is_failure': False}})
message_one_expected = {'_namespace': 'test.Logger',
'_level': 'INFO',
'_message': 'Message number 1'}
message_two_expected = {'_namespace': 'test.Logger',
'_level': 'ERROR',
'_message': 'Message number 2'}
message_three_expected = {'_namespace': 'test.Logger',
'_level': 'DEBUG',
'_message': 'Message with some extras',
'action': 'mozlog_test_output',
'is_failure': False}
message_one_actual = json.loads(self.handler.messages[0])
message_two_actual = json.loads(self.handler.messages[1])
message_three_actual = json.loads(self.handler.messages[2])
self.check_messages(message_one_expected, message_one_actual)
self.check_messages(message_two_expected, message_two_actual)
self.check_messages(message_three_expected, message_three_actual)
def message_callback(self):
if len(self.handler.messages) == 3:
message_one_expected = {'_namespace': 'test.Logger',
'_level': 'DEBUG',
'_message': 'socket message one',
'action': 'test_message'}
message_two_expected = {'_namespace': 'test.Logger',
'_level': 'DEBUG',
'_message': 'socket message two',
'action': 'test_message'}
message_three_expected = {'_namespace': 'test.Logger',
'_level': 'DEBUG',
'_message': 'socket message three',
'action': 'test_message'}
message_one_actual = json.loads(self.handler.messages[0])
message_two_actual = json.loads(self.handler.messages[1])
message_three_actual = json.loads(self.handler.messages[2])
self.check_messages(message_one_expected, message_one_actual)
self.check_messages(message_two_expected, message_two_actual)
self.check_messages(message_three_expected, message_three_actual)
def test_log_listener(self):
connection = '127.0.0.1', 0
self.log_server = mozlog.LogMessageServer(connection,
self.logger,
message_callback=self.message_callback,
timeout=0.5)
message_string_one = json.dumps({'_message': 'socket message one',
'action': 'test_message',
'_level': 'DEBUG'})
message_string_two = json.dumps({'_message': 'socket message two',
'action': 'test_message',
'_level': 'DEBUG'})
message_string_three = json.dumps({'_message': 'socket message three',
'action': 'test_message',
'_level': 'DEBUG'})
message_string = message_string_one + '\n' + \
message_string_two + '\n' + \
message_string_three + '\n'
server_thread = threading.Thread(target=self.log_server.handle_request)
server_thread.start()
host, port = self.log_server.server_address
sock = socket.socket()
sock.connect((host, port))
# Sleeps prevent listener from receiving entire message in a single call
# to recv in order to test reconstruction of partial messages.
sock.sendall(message_string[:8])
time.sleep(.01)
sock.sendall(message_string[8:32])
time.sleep(.01)
sock.sendall(message_string[32:64])
time.sleep(.01)
sock.sendall(message_string[64:128])
time.sleep(.01)
sock.sendall(message_string[128:])
server_thread.join()
class Loggable(mozlog.LoggingMixin):
"""Trivial class inheriting from LoggingMixin"""
pass
class TestLoggingMixin(unittest.TestCase):
"""Tests basic use of LoggingMixin"""
def test_mixin(self):
loggable = Loggable()
self.assertTrue(not hasattr(loggable, "_logger"))
loggable.log(mozlog.INFO, "This will instantiate the logger")
self.assertTrue(hasattr(loggable, "_logger"))
self.assertEqual(loggable._logger.name, "test_logger.Loggable")
self.assertRaises(ValueError, loggable.set_logger,
"not a logger")
logger = mozlog.MozLogger('test.mixin')
handler = ListHandler()
logger.addHandler(handler)
loggable.set_logger(logger)
self.assertTrue(isinstance(loggable._logger.handlers[0],
ListHandler))
self.assertEqual(loggable._logger.name, "test.mixin")
loggable.log(mozlog.WARN, 'message for "log" method')
loggable.info('message for "info" method')
loggable.error('message for "error" method')
loggable.log_structured('test_message',
params={'_message': 'message for ' + \
'"log_structured" method'})
expected_messages = ['message for "log" method',
'message for "info" method',
'message for "error" method',
'message for "log_structured" method']
actual_messages = loggable._logger.handlers[0].messages
self.assertEqual(expected_messages, actual_messages)
if __name__ == '__main__':
unittest.main()
|
mpl-2.0
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.2/Lib/test/test_metaclass.py
|
59
|
6062
|
doctests = """
Basic class construction.
>>> class C:
... def meth(self): print("Hello")
...
>>> C.__class__ is type
True
>>> a = C()
>>> a.__class__ is C
True
>>> a.meth()
Hello
>>>
Use *args notation for the bases.
>>> class A: pass
>>> class B: pass
>>> bases = (A, B)
>>> class C(*bases): pass
>>> C.__bases__ == bases
True
>>>
Use a trivial metaclass.
>>> class M(type):
... pass
...
>>> class C(metaclass=M):
... def meth(self): print("Hello")
...
>>> C.__class__ is M
True
>>> a = C()
>>> a.__class__ is C
True
>>> a.meth()
Hello
>>>
Use **kwds notation for the metaclass keyword.
>>> kwds = {'metaclass': M}
>>> class C(**kwds): pass
...
>>> C.__class__ is M
True
>>> a = C()
>>> a.__class__ is C
True
>>>
Use a metaclass with a __prepare__ static method.
>>> class M(type):
... @staticmethod
... def __prepare__(*args, **kwds):
... print("Prepare called:", args, kwds)
... return dict()
... def __new__(cls, name, bases, namespace, **kwds):
... print("New called:", kwds)
... return type.__new__(cls, name, bases, namespace)
... def __init__(cls, *args, **kwds):
... pass
...
>>> class C(metaclass=M):
... def meth(self): print("Hello")
...
Prepare called: ('C', ()) {}
New called: {}
>>>
Also pass another keyword.
>>> class C(object, metaclass=M, other="haha"):
... pass
...
Prepare called: ('C', (<class 'object'>,)) {'other': 'haha'}
New called: {'other': 'haha'}
>>> C.__class__ is M
True
>>> C.__bases__ == (object,)
True
>>> a = C()
>>> a.__class__ is C
True
>>>
Check that build_class doesn't mutate the kwds dict.
>>> kwds = {'metaclass': type}
>>> class C(**kwds): pass
...
>>> kwds == {'metaclass': type}
True
>>>
Use various combinations of explicit keywords and **kwds.
>>> bases = (object,)
>>> kwds = {'metaclass': M, 'other': 'haha'}
>>> class C(*bases, **kwds): pass
...
Prepare called: ('C', (<class 'object'>,)) {'other': 'haha'}
New called: {'other': 'haha'}
>>> C.__class__ is M
True
>>> C.__bases__ == (object,)
True
>>> class B: pass
>>> kwds = {'other': 'haha'}
>>> class C(B, metaclass=M, *bases, **kwds): pass
...
Prepare called: ('C', (<class 'test.test_metaclass.B'>, <class 'object'>)) {'other': 'haha'}
New called: {'other': 'haha'}
>>> C.__class__ is M
True
>>> C.__bases__ == (B, object)
True
>>>
Check for duplicate keywords.
>>> class C(metaclass=type, metaclass=type): pass
...
Traceback (most recent call last):
[...]
SyntaxError: keyword argument repeated
>>>
Another way.
>>> kwds = {'metaclass': type}
>>> class C(metaclass=type, **kwds): pass
...
Traceback (most recent call last):
[...]
TypeError: __build_class__() got multiple values for keyword argument 'metaclass'
>>>
Use a __prepare__ method that returns an instrumented dict.
>>> class LoggingDict(dict):
... def __setitem__(self, key, value):
... print("d[%r] = %r" % (key, value))
... dict.__setitem__(self, key, value)
...
>>> class Meta(type):
... @staticmethod
... def __prepare__(name, bases):
... return LoggingDict()
...
>>> class C(metaclass=Meta):
... foo = 2+2
... foo = 42
... bar = 123
...
d['__module__'] = 'test.test_metaclass'
d['foo'] = 4
d['foo'] = 42
d['bar'] = 123
>>>
Use a metaclass that doesn't derive from type.
>>> def meta(name, bases, namespace, **kwds):
... print("meta:", name, bases)
... print("ns:", sorted(namespace.items()))
... print("kw:", sorted(kwds.items()))
... return namespace
...
>>> class C(metaclass=meta):
... a = 42
... b = 24
...
meta: C ()
ns: [('__module__', 'test.test_metaclass'), ('a', 42), ('b', 24)]
kw: []
>>> type(C) is dict
True
>>> print(sorted(C.items()))
[('__module__', 'test.test_metaclass'), ('a', 42), ('b', 24)]
>>>
And again, with a __prepare__ attribute.
>>> def prepare(name, bases, **kwds):
... print("prepare:", name, bases, sorted(kwds.items()))
... return LoggingDict()
...
>>> meta.__prepare__ = prepare
>>> class C(metaclass=meta, other="booh"):
... a = 1
... a = 2
... b = 3
...
prepare: C () [('other', 'booh')]
d['__module__'] = 'test.test_metaclass'
d['a'] = 1
d['a'] = 2
d['b'] = 3
meta: C ()
ns: [('__module__', 'test.test_metaclass'), ('a', 2), ('b', 3)]
kw: [('other', 'booh')]
>>>
The default metaclass must define a __prepare__() method.
>>> type.__prepare__()
{}
>>>
Make sure it works with subclassing.
>>> class M(type):
... @classmethod
... def __prepare__(cls, *args, **kwds):
... d = super().__prepare__(*args, **kwds)
... d["hello"] = 42
... return d
...
>>> class C(metaclass=M):
... print(hello)
...
42
>>> print(C.hello)
42
>>>
Test failures in looking up the __prepare__ method work.
>>> class ObscureException(Exception):
... pass
>>> class FailDescr:
... def __get__(self, instance, owner):
... raise ObscureException
>>> class Meta(type):
... __prepare__ = FailDescr()
>>> class X(metaclass=Meta):
... pass
Traceback (most recent call last):
[...]
test.test_metaclass.ObscureException
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=False):
from test import support
from test import test_metaclass
support.run_doctest(test_metaclass, verbose)
if __name__ == "__main__":
test_main(verbose=True)
|
mit
|
newrocknj/horizon
|
openstack_dashboard/dashboards/admin/instances/urls.py
|
66
|
1554
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.instances import views
INSTANCES = r'^(?P<instance_id>[^/]+)/%s$'
urlpatterns = patterns(
'openstack_dashboard.dashboards.admin.instances.views',
url(r'^$', views.AdminIndexView.as_view(), name='index'),
url(INSTANCES % 'update', views.AdminUpdateView.as_view(), name='update'),
url(INSTANCES % 'detail', views.DetailView.as_view(), name='detail'),
url(INSTANCES % 'console', 'console', name='console'),
url(INSTANCES % 'vnc', 'vnc', name='vnc'),
url(INSTANCES % 'spice', 'spice', name='spice'),
url(INSTANCES % 'rdp', 'rdp', name='rdp'),
url(INSTANCES % 'live_migrate', views.LiveMigrateView.as_view(),
name='live_migrate'),
)
|
apache-2.0
|
bwesterb/py-demandimport
|
src/__init__.py
|
1
|
10488
|
# demandimport.py - global demand-loading of modules for Mercurial
#
# Copyright 2006, 2007 Matt Mackall <[email protected]>
# 2013, 2015 Bas Westerbaan <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''
demandimport - automatic demandloading of modules
To enable this module, do:
import demandimport; demandimport.enable()
Imports of the following forms will be demand-loaded:
import a, b.c
import a.b as c
from a import b,c # a will be loaded immediately
These imports will not be delayed:
from a import *
b = __import__(a)
'''
try:
import __builtin__ as builtins
except ImportError:
import builtins
import imp
from threading import RLock
_origimport = __import__
lock = RLock()
class _demandmod(object):
"""module demand-loader and proxy"""
def __init__(self, name, globals, locals, level=-1, parent_path=None):
global _ignore
if '.' in name:
head, rest = name.split('.', 1)
after = [rest]
else:
head = name
after = []
object.__setattr__(self, "_data", (head, globals, locals, after, level,
parent_path))
object.__setattr__(self, "_module", None)
object.__setattr__(self, "_ignore", set(_ignore))
def _extend(self, name):
"""add to the list of submodules to load"""
self._data[3].append(name)
def _load(self):
global _ignore, lock
with lock:
if self._module:
return
head, globals, locals, after, level, parent_path = self._data
old_ignore, _ignore = _ignore, self._ignore
path = parent_path + '.' + head if parent_path else head
if _log:
if after:
_log('Triggered to import %s and setup lazy submodules %s '+
'for %s', path, after, globals.get('__name__', '?')
if globals else '?')
else:
_log('Triggered to import %s for %s', path,
globals.get('__name__', '?') if globals else '?')
# If we are given a parent_path, we will ask __import__ to
# import parent.path.head. By default it returns the loaded
# module `parent'. However, we are interested in `head'.
# By passing a barely-non-trivial fromlist, __import__ returns
# the right-most module instead of the left-most.
fromlist = ['__name__'] if parent_path else []
if level == -1:
mod = _origimport(path, globals, locals, fromlist)
else:
mod = _origimport(path, globals, locals, fromlist, level)
assert not isinstance(mod, _demandmod)
_ignore = old_ignore
# load submodules
def subload(mod, modp, p):
h, t = p, None
if '.' in p:
h, t = p.split('.', 1)
if not hasattr(mod, h):
if _log:
_log('Delaying import of %s for %s as %s situation #4',
p, mod.__dict__.get('__name__', '?'), h)
setattr(mod, h, _demandmod(p, mod.__dict__, mod.__dict__,
parent_path=modp))
elif t:
subload(getattr(mod, h), modp+'.'+h, t)
for x in after:
subload(mod, path, x)
# are we in the locals dictionary still?
if locals and locals.get(head) is self:
locals[head] = mod
object.__setattr__(self, "_module", mod)
def __repr__(self):
if self._module:
return "<proxied module '%s'>" % self._data[0]
return "<unloaded module '%s'>" % self._data[0]
def __call__(self, *args, **kwargs):
raise TypeError("%s object is not callable" % repr(self))
def __getattribute__(self, attr):
if attr in ('_data', '_extend', '_load', '_module', '_ignore'):
return object.__getattribute__(self, attr)
self._load()
return getattr(self._module, attr)
def __setattr__(self, attr, val):
self._load()
setattr(self._module, attr, val)
def _demandimport(name, globals=None, locals=None, fromlist=None, level=-1):
global lock
with lock:
if not locals or name in _ignore or fromlist == ('*',):
# these cases we can't really delay
if level == -1:
return _origimport(name, globals, locals, fromlist)
else:
return _origimport(name, globals, locals, fromlist, level)
elif not fromlist:
# import a [as b]
if '.' in name: # a.b
base, rest = name.split('.', 1)
# email.__init__ loading email.mime
if globals and globals.get('__name__') == base:
if level != -1:
return _origimport(name, globals, locals, fromlist,
level)
else:
return _origimport(name, globals, locals, fromlist)
# if a is already demand-loaded, add b to its submodule list
if base in locals:
if isinstance(locals[base], _demandmod):
if _log:
_log('Adding %s to submodule list of %s', rest,
base)
locals[base]._extend(rest)
return locals[base]
else: # '.' not in name
# For an absolute import of an unnested module, we can check
# whether the module exists without loading anything.
# So lets do that.
if level == 0: # abs. import
imp.find_module(name)
if _log:
_log('Delaying import of %s for %s (level %s) situation #1',
name, globals.get('__name__', '?') if globals else '?',
level)
return _demandmod(name, globals, locals, level)
else:
if level != -1:
# from . import b,c,d or from .a import b,c,d
return _origimport(name, globals, locals, fromlist, level)
# from a import b,c,d
mod = _origimport(name, globals, locals)
# recurse down the module chain
for comp in name.split('.')[1:]:
if not hasattr(mod, comp):
if _log:
_log('Delaying import of %s for %s situation #2',
comp, mod.__dict__.get('__name__', '?'))
setattr(mod, comp, _demandmod(comp, mod.__dict__,
mod.__dict__))
mod = getattr(mod, comp)
for x in fromlist:
# set requested submodules for demand load
if not hasattr(mod, x):
if _log:
_log('Delaying import of %s for %s situation #3', x,
mod.__dict__.get('__name__', '?'))
setattr(mod, x, _demandmod(x, mod.__dict__, locals))
# This ensures
#
# with demandimport.ignored('a.b.c'):
# from a.b import c
#
# behaves as expected.
# TODO we should skip the `_demandmod'.
if name + '.' + x in _ignore:
getattr(mod, x)._load()
return mod
_ignore = set([
'__future__',
'_hashlib',
'_xmlplus',
'fcntl',
'win32com.gen_py',
'_winreg', # 2.7 mimetypes needs immediate ImportError
'pythoncom',
# imported by tarfile, not available under Windows
'pwd',
'grp',
# imported by profile, itself imported by hotshot.stats,
# not available under Windows
'resource',
# this trips up many extension authors
'gtk',
'sip',
'collections.abc',
# setuptools' pkg_resources.py expects "from __main__ import x" to
# raise ImportError if x not defined
'__main__',
'_ssl', # conditional imports in the stdlib
'typing.abc', # issue #8
])
is_enabled = False
_log = None
def ignore(module_name):
global _ignore
_ignore.add(module_name)
class ignored(object):
def __init__(self, module_name):
self.module_name = module_name
def __enter__(self):
global _ignore
self.added = self.module_name not in _ignore
if self.added:
_ignore.add(self.module_name)
def __exit__(self, *args):
global _ignore
if self.added:
_ignore.remove(self.module_name)
def enable():
"enable global demand-loading of modules"
global is_enabled
if not is_enabled:
builtins.__import__ = _demandimport
is_enabled = True
def disable():
"disable global demand-loading of modules"
global is_enabled
if is_enabled:
builtins.__import__ = _origimport
is_enabled = False
class disabled(object):
def __enter__(self):
global is_enabled
self.old = is_enabled
if is_enabled:
disable()
def __exit__(self, *args):
if self.old:
enable()
class enabled(object):
def __enter__(self):
global is_enabled
self.old = is_enabled
if not is_enabled:
enable()
def __exit__(self, *args):
if not self.old:
disable()
def is_proxy(module):
""" Checks whether the given module is a demandimport proxy object. """
return isinstance(module, _demandmod)
def is_loaded(module):
""" Checks whether the given module has been loaded.
Note that the object might still be a proxy object. Check this case
with the `is_proxy` function. """
if not is_proxy(module):
return True
return bool(module._module)
def set_logfunc(logfunc):
""" Sets a logger to which demandimport will report all of its actions.
Useful to debug problems with third-party modules. """
global _log
_log = logfunc
|
gpl-2.0
|
amorwilliams/gsoops
|
server/libs/aliyun-sls-sdk-python-0.6.0/google/protobuf/text_format.py
|
261
|
21737
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in text format."""
__author__ = '[email protected] (Kenton Varda)'
import cStringIO
import re
from collections import deque
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField',
'PrintFieldValue', 'Merge' ]
# Infinity and NaN are not explicitly supported by Python pre-2.6, and
# float('inf') does not work on Windows (pre-2.6).
_INFINITY = 1e10000 # overflows, thus will actually be infinity.
_NAN = _INFINITY * 0
class ParseError(Exception):
"""Thrown in case of ASCII parsing error."""
def MessageToString(message, as_utf8=False, as_one_line=False):
out = cStringIO.StringIO()
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False):
for field, value in message.ListFields():
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
PrintField(field, element, out, indent, as_utf8, as_one_line)
else:
PrintField(field, value, out, indent, as_utf8, as_one_line)
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False):
"""Print a single field name/value pair. For repeated fields, the value
should be a single element."""
out.write(' ' * indent);
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line)
if as_one_line:
out.write(' ')
else:
out.write('\n')
def PrintFieldValue(field, value, out, indent=0,
as_utf8=False, as_one_line=False):
"""Print a single field value (not including name). For repeated fields,
the value should be a single element."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if as_one_line:
out.write(' { ')
PrintMessage(value, out, indent, as_utf8, as_one_line)
out.write('}')
else:
out.write(' {\n')
PrintMessage(value, out, indent + 2, as_utf8, as_one_line)
out.write(' ' * indent + '}')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
out.write(field.enum_type.values_by_number[value].name)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if type(value) is unicode:
out.write(_CEscape(value.encode('utf-8'), as_utf8))
else:
out.write(_CEscape(value, as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write("true")
else:
out.write("false")
else:
out.write(str(value))
def Merge(text, message):
"""Merges an ASCII representation of a protocol message into a message.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On ASCII parsing problems.
"""
tokenizer = _Tokenizer(text)
while not tokenizer.AtEnd():
_MergeField(tokenizer, message)
def _MergeField(tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of ASCII parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
field = message.Extensions._FindExtensionByName(name)
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' % (
name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifier()
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' % (
message_descriptor.full_name, name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
_MergeField(tokenizer, sub_message)
else:
_MergeScalarField(tokenizer, message, field)
def _MergeScalarField(tokenizer, message, field):
"""Merges a single protocol message scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of ASCII parsing problems.
RuntimeError: On runtime errors.
"""
tokenizer.Consume(':')
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = tokenizer.ConsumeInt32()
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = tokenizer.ConsumeInt64()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = tokenizer.ConsumeUint32()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = tokenizer.ConsumeUint64()
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
# Enum can be specified by a number (the enum value), or by
# a string literal (the enum name).
enum_descriptor = field.enum_type
if tokenizer.LookingAtInteger():
number = tokenizer.ConsumeInt32()
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
else:
identifier = tokenizer.ConsumeIdentifier()
enum_value = enum_descriptor.values_by_name.get(identifier, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, identifier))
value = enum_value.number
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
message.Extensions[field] = value
else:
setattr(message, field.name, value)
class _Tokenizer(object):
"""Protocol buffer ASCII representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile(
'[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier
'[0-9+-][0-9a-zA-Z_.+-]*|' # a number
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string
_IDENTIFIER = re.compile('\w+')
_INTEGER_CHECKERS = [type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker()]
_FLOAT_INFINITY = re.compile('-?inf(inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile("nanf?", re.IGNORECASE)
def __init__(self, text_message):
self._text_message = text_message
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = deque(text_message.split('\n'))
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._SkipWhitespace()
self.NextToken()
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return self.token == ''
def _PopLine(self):
while len(self._current_line) <= self._column:
if not self._lines:
self._current_line = ''
return
self._line += 1
self._column = 0
self._current_line = self._lines.popleft()
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._WHITESPACE.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self._ParseError('Expected "%s".' % token)
def LookingAtInteger(self):
"""Checks if the current token is an integer.
Returns:
True iff the current token is an integer.
"""
if not self.token:
return False
c = self.token[0]
return (c >= '0' and c <= '9') or c == '-' or c == '+'
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self._ParseError('Expected identifier.')
self.NextToken()
return result
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
text = self.token
if self._FLOAT_INFINITY.match(text):
self.NextToken()
if text.startswith('-'):
return -_INFINITY
return _INFINITY
if self._FLOAT_NAN.match(text):
self.NextToken()
return _NAN
try:
result = float(text)
except ValueError, e:
raise self._FloatParseError(e)
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if self.token in ('true', 't', '1'):
self.NextToken()
return True
elif self.token in ('false', 'f', '0'):
self.NextToken()
return False
else:
raise self._ParseError('Expected "true" or "false".')
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
bytes = self.ConsumeByteString()
try:
return unicode(bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
list = [self._ConsumeSingleByteString()]
while len(self.token) > 0 and self.token[0] in ('\'', '"'):
list.append(self._ConsumeSingleByteString())
return "".join(list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Exptected string.')
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote.')
try:
result = _CUnescape(text[1:-1])
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def _ParseInteger(self, text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
pos = 0
if text.startswith('-'):
pos += 1
base = 10
if text.startswith('0x', pos) or text.startswith('0X', pos):
base = 16
elif text.startswith('0', pos):
base = 8
# Do the actual parsing. Exception handling is propagated to caller.
result = int(text, base)
# Check if the integer is sane. Exceptions handled by callers.
checker = self._INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError('%d:%d : %s' % (
self._previous_line + 1, self._previous_column + 1, message))
def _ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError('%d:%d : %s' % (
self._line + 1, self._column - len(self.token) + 1, message))
def _IntegerParseError(self, e):
return self._ParseError('Couldn\'t parse integer: ' + str(e))
def _FloatParseError(self, e):
return self._ParseError('Couldn\'t parse number: ' + str(e))
def _StringParseError(self, e):
return self._ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._lines and len(self._current_line) <= self._column:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
# text.encode('string_escape') does not seem to satisfy our needs as it
# encodes unprintable characters using two-digit hex escapes whereas our
# C++ unescaping function allows hex escapes to be any length. So,
# "\0011".encode('string_escape') ends up being "\\x011", which will be
# decoded in C++ as a single-character string with char code 0x11.
def _CEscape(text, as_utf8):
def escape(c):
o = ord(c)
if o == 10: return r"\n" # optional escape
if o == 13: return r"\r" # optional escape
if o == 9: return r"\t" # optional escape
if o == 39: return r"\'" # optional escape
if o == 34: return r'\"' # necessary escape
if o == 92: return r"\\" # necessary escape
# necessary escapes
if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o
return c
return "".join([escape(c) for c in text])
_CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])')
def _CUnescape(text):
def ReplaceHex(m):
return chr(int(m.group(0)[2:], 16))
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
return result.decode('string_escape')
|
mit
|
ryuunosukeyoshi/PartnerPoi-Bot
|
lib/urllib3/contrib/appengine.py
|
224
|
10865
|
"""
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Example usage::
from urllib3 import PoolManager
from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
if is_appengine_sandbox():
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
http = AppEngineManager()
else:
# PoolManager uses a socket-level API behind the scenes
http = PoolManager()
r = http.request('GET', 'https://google.com/')
There are `limitations <https://cloud.google.com/appengine/docs/python/\
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
the best choice for your application. There are three options for using
urllib3 on Google App Engine:
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
cost-effective in many circumstances as long as your usage is within the
limitations.
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
Sockets also have `limitations and restrictions
<https://cloud.google.com/appengine/docs/python/sockets/\
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
To use sockets, be sure to specify the following in your ``app.yaml``::
env_variables:
GAE_USE_SOCKETS_HTTPLIB : 'true'
3. If you are using `App Engine Flexible
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
:class:`PoolManager` without any configuration or special environment variables.
"""
from __future__ import absolute_import
import logging
import os
import warnings
from ..packages.six.moves.urllib.parse import urljoin
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation `here
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Notably it will raise an :class:`AppEnginePlatformError` if:
* URLFetch is not available.
* If you attempt to use this on App Engine Flexible, as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True,
urlfetch_retries=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.urlfetch_retries = urlfetch_retries
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
follow_redirects = (
redirect and
retries.redirect != 0 and
retries.total)
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=self.urlfetch_retries and follow_redirects,
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, retries=retries, **response_kw)
# Handle redirect?
redirect_location = redirect and http_response.get_redirect_location()
if redirect_location:
# Check for redirect response
if (self.urlfetch_retries and retries.raise_on_redirect):
raise MaxRetryError(self, url, "too many redirects")
else:
if http_response.status == 303:
method = 'GET'
try:
retries = retries.increment(method, url, response=http_response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
raise MaxRetryError(self, url, "too many redirects")
return http_response
retries.sleep_for_retry(http_response)
log.debug("Redirecting %s -> %s", url, redirect_location)
redirect_url = urljoin(url, redirect_location)
return self.urlopen(
method, redirect_url, body, headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
# Check if we should retry the HTTP response.
has_retry_after = bool(http_response.getheader('Retry-After'))
if retries.is_retry(method, http_response.status, has_retry_after):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.debug("Retry: %s", url)
retries.sleep(http_response)
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == 'chunked':
encodings = transfer_encoding.split(",")
encodings.remove('chunked')
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return None # Defer to URLFetch's default.
if isinstance(timeout, Timeout):
if timeout._read is not None or timeout._connect is not None:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total or default URLFetch timeout.",
AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true'
|
gpl-3.0
|
ddrown/irssiconnectbot-protobuf
|
examples/list_people.py
|
429
|
1135
|
#! /usr/bin/python
# See README.txt for information and build instructions.
import addressbook_pb2
import sys
# Iterates though all people in the AddressBook and prints info about them.
def ListPeople(address_book):
for person in address_book.person:
print "Person ID:", person.id
print " Name:", person.name
if person.HasField('email'):
print " E-mail address:", person.email
for phone_number in person.phone:
if phone_number.type == addressbook_pb2.Person.MOBILE:
print " Mobile phone #:",
elif phone_number.type == addressbook_pb2.Person.HOME:
print " Home phone #:",
elif phone_number.type == addressbook_pb2.Person.WORK:
print " Work phone #:",
print phone_number.number
# Main procedure: Reads the entire address book from a file and prints all
# the information inside.
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE"
sys.exit(-1)
address_book = addressbook_pb2.AddressBook()
# Read the existing address book.
f = open(sys.argv[1], "rb")
address_book.ParseFromString(f.read())
f.close()
ListPeople(address_book)
|
bsd-3-clause
|
MackZxh/OCA-Choice
|
server-tools/server_env_base_external_referentials/__openerp__.py
|
53
|
1954
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2011-2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Server environment for base_external_referential",
"version": "1.0",
"depends": ["base", 'server_environment', 'base_external_referentials'],
"author": "Camptocamp,Odoo Community Association (OCA)",
'license': 'AGPL-3',
"description": """This module is based on the server_environment module to use files for configuration.
Thus we can have a different file for each environment (dev, test, staging, prod).
This module define the config variables for the base_external_referential module.
In the configuration file, you can configure the url, login and password of the referentials
Exemple of the section to put in the configuration file :
[external_referential.name_of_my_external_referential]
location = http://localhost/magento/
apiusername = my_api_login
apipass = my_api_password
""",
"website": "http://www.camptocamp.com",
"category": "Tools",
"init_xml": [],
"demo_xml": [],
"update_xml": [],
"installable": False,
"active": False,
}
|
lgpl-3.0
|
0x0all/nupic
|
py/nupic/support/configuration_base.py
|
2
|
13684
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import with_statement
import os
import logging
from xml.etree import ElementTree
import nupic
# Turn on additional print statements
DEBUG = False
def _getLogger():
logger = logging.getLogger("com.numenta.nupic.tools.configuration_base")
if DEBUG:
logger.setLevel(logging.DEBUG)
return logger
###############################################################################
# Configuration class
###############################################################################
class Configuration(object):
""" This class can be used to fetch NuPic configuration settings which are
stored in one or more XML files.
If the environment variable 'NTA_CONF_PATH' is defined, then the configuration
files are expected to be in the NTA_CONF_PATH search path, which is a ':'
separated list of directories. If NTA_CONF_PATH is not defined, then it is
assumed to be NTA/conf/default (typically ~/nta/current/conf/default).
"""
# Once we read in the properties, they are stored in this dict
_properties = None
# This stores the paths we search for config files. It can be modified through
# the setConfigPaths() method.
_configPaths = None
# Any environment variable prefixed with this string serves as an override
# to property defined in the current configuration
envPropPrefix = 'NTA_CONF_PROP_'
@classmethod
def getString(cls, prop):
""" Retrieve the requested property as a string. If property does not exist,
then KeyError will be raised.
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as a string
"""
if cls._properties is None:
cls._readStdConfigFiles()
# Allow configuration properties to be overridden via environment variables
envValue = os.environ.get("%s%s" % (cls.envPropPrefix,
prop.replace('.', '_')), None)
if envValue is not None:
return envValue
return cls._properties[prop]
@classmethod
def getBool(cls, prop):
""" Retrieve the requested property and return it as a bool. If property
does not exist, then KeyError will be raised. If the property value is
neither 0 nor 1, then ValueError will be raised
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as bool
"""
value = cls.getInt(prop)
if value not in (0, 1):
raise ValueError("Expected 0 or 1, but got %r in config property %s" % (
value, prop))
return bool(value)
@classmethod
def getInt(cls, prop):
""" Retrieve the requested property and return it as an int. If property
does not exist, then KeyError will be raised.
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as int
"""
return int(cls.getString(prop))
@classmethod
def getFloat(cls, prop):
""" Retrieve the requested property and return it as a float. If property
does not exist, then KeyError will be raised.
Parameters:
----------------------------------------------------------------
prop: name of the property
retval: property value as float
"""
return float(cls.getString(prop))
@classmethod
def get(cls, prop, default=None):
""" Get the value of the given configuration property as string. This
returns a string which is the property value, or the value of "default" arg
if the property is not found. Use Configuration.getString() instead.
NOTE: it's atypical for our configuration properties to be missing - a
missing configuration property is usually a very serious error. Because
of this, it's preferable to use one of the getString, getInt, getFloat,
etc. variants instead of get(). Those variants will raise KeyError when
an expected property is missing.
Parameters:
----------------------------------------------------------------
prop: name of the property
default: default value to return if property does not exist
retval: property value (as a string), or default if the property does
not exist.
"""
try:
return cls.getString(prop)
except KeyError:
return default
@classmethod
def set(cls, prop, value):
""" Set the value of the given configuration property.
Parameters:
----------------------------------------------------------------
prop: name of the property
value: value to set
"""
if cls._properties is None:
cls._readStdConfigFiles()
cls._properties[prop] = str(value)
@classmethod
def dict(cls):
""" Return a dict containing all of the configuration properties
Parameters:
----------------------------------------------------------------
retval: dict containing all configuration properties.
"""
if cls._properties is None:
cls._readStdConfigFiles()
# Make a copy so we can update any current values obtained from environment
# variables
result = dict(cls._properties)
keys = os.environ.keys()
replaceKeys = filter(lambda x: x.startswith(cls.envPropPrefix),
keys)
for envKey in replaceKeys:
key = envKey[len(cls.envPropPrefix):]
key = key.replace('_', '.')
result[key] = os.environ[envKey]
return result
@classmethod
def readConfigFile(cls, filename, path=None):
""" Parse the given XML file and store all properties it describes.
Parameters:
----------------------------------------------------------------
filename: name of XML file to parse (no path)
path: path of the XML file. If None, then use the standard
configuration search path.
"""
properties = cls._readConfigFile(filename, path)
# Create properties dict if necessary
if cls._properties is None:
cls._properties = dict()
for name in properties:
if 'value' in properties[name]:
cls._properties[name] = properties[name]['value']
@classmethod
def _readConfigFile(cls, filename, path=None):
""" Parse the given XML file and return a dict describing the file.
Parameters:
----------------------------------------------------------------
filename: name of XML file to parse (no path)
path: path of the XML file. If None, then use the standard
configuration search path.
retval: returns a dict with each property as a key and a dict of all
the property's attributes as value
"""
outputProperties = dict()
# Get the path to the config files.
if path is None:
filePath = cls.findConfigFile(filename)
else:
filePath = os.path.join(path, filename)
# ------------------------------------------------------------------
# Read in the config file
try:
if filePath is not None:
try:
# Use warn since console log level is set to warning
_getLogger().debug("Loading config file: %s", filePath)
with open(filePath, 'rb') as inp:
contents = inp.read()
except Exception:
contents = '<configuration/>'
else:
contents = '<configuration/>'
elements = ElementTree.XML(contents)
if elements.tag != 'configuration':
raise RuntimeError("Expected top-level element to be 'configuration' "
"but got '%s'" % (elements.tag))
# ------------------------------------------------------------------
# Add in each property found
propertyElements = elements.findall('./property')
for propertyItem in propertyElements:
propInfo = dict()
# Parse this property element
propertyAttributes = list(propertyItem)
for propertyAttribute in propertyAttributes:
propInfo[propertyAttribute.tag] = propertyAttribute.text
# Get the name
name = propInfo.get('name', None)
# value is allowed to be empty string
if 'value' in propInfo and propInfo['value'] is None:
value = ''
else:
value = propInfo.get('value', None)
if value is None:
if 'novalue' in propInfo:
# Placeholder "novalue" properties are intended to be overridden
# via dynamic configuration or another configuration layer.
continue
else:
raise RuntimeError("Missing 'value' element within the property "
"element: => %s " % (str(propInfo)))
# The value is allowed to contain substituation tags of the form
# ${env.VARNAME}, which should be substituted with the corresponding
# environment variable values
restOfValue = value
value = ''
while True:
# Find the beginning of substitution tag
pos = restOfValue.find('${env.')
if pos == -1:
# No more environment variable substitutions
value += restOfValue
break
# Append prefix to value accumulator
value += restOfValue[0:pos]
# Find the end of current substitution tag
varTailPos = restOfValue.find('}', pos)
if varTailPos == -1:
raise RuntimeError("Trailing environment variable tag delimiter '}'"
" not found in %r" % (restOfValue))
# Extract environment variable name from tag
varname = restOfValue[pos+6:varTailPos]
if varname not in os.environ:
raise RuntimeError("Attempting to use the value of the environment"
" variable %r, which is not defined" % (varname))
envVarValue = os.environ[varname]
value += envVarValue
restOfValue = restOfValue[varTailPos+1:]
# Check for errors
if name is None:
raise RuntimeError("Missing 'name' element within following property "
"element:\n => %s " % (str(propInfo)))
propInfo['value'] = value
outputProperties[name] = propInfo
return outputProperties
except Exception:
_getLogger().exception("Error while parsing configuration file: %s.",
filePath)
raise
@classmethod
def clear(cls):
""" Clear out the entire configuration.
"""
cls._properties = None
cls._configPaths = None
@classmethod
def findConfigFile(cls, filename):
""" Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
Parameters:
----------------------------------------------------------------
filename: name of file to locate
"""
paths = cls.getConfigPaths()
for p in paths:
testPath = os.path.join(p, filename)
if os.path.isfile(testPath):
return os.path.join(p, filename)
@classmethod
def getConfigPaths(cls):
""" Return the list of paths to search for configuration files.
Parameters:
----------------------------------------------------------------
retval: list of paths.
"""
if cls._configPaths is not None:
return cls._configPaths
else:
if 'NTA_CONF_PATH' in os.environ:
configVar = os.environ['NTA_CONF_PATH']
# Return as a list of paths
configPaths = configVar.split(':')
elif (
not os.path.exists(os.path.join(
nupic.rootDir, 'conf', 'site', 'default')) and
os.path.exists(os.path.join(nupic.rootDir, 'conf', 'default'))):
configPaths = [os.path.join(nupic.rootDir, 'conf', 'default')]
else:
configPaths = [os.path.join(nupic.rootDir, 'conf', 'site', 'default')]
return configPaths
@classmethod
def setConfigPaths(cls, paths):
""" Modify the paths we use to search for configuration files.
Parameters:
----------------------------------------------------------------
paths: list of paths to search for config files.
"""
cls._configPaths = list(paths)
@classmethod
def _readStdConfigFiles(cls):
""" Read in all standard configuration files
"""
# Default one first
cls.readConfigFile('nupic-default.xml')
# Site specific one can override properties defined in default
cls.readConfigFile('nupic-site.xml')
|
gpl-3.0
|
narry/odenos
|
apps/mininet_examples/multi_network_control/config_odenos.py
|
6
|
10564
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
import signal
import sys
import threading
import time
from org.o3project.odenos.remoteobject.transport.message_dispatcher import MessageDispatcher
from org.o3project.odenos.remoteobject.object_property import ObjectProperty
from org.o3project.odenos.remoteobject.manager.system.component_connection import ComponentConnection
from org.o3project.odenos.remoteobject.manager.system.component_connection_logic_and_network import ComponentConnectionLogicAndNetwork
from org.o3project.odenos.core.component.network.flow.basic.flow_action_output import FlowActionOutput
from org.o3project.odenos.core.component.network.flow.ofpflow.ofp_flow import OFPFlow
from org.o3project.odenos.core.component.network.flow.ofpflow.ofp_flow_match import OFPFlowMatch
from org.o3project.odenos.core.util.network_interface import NetworkInterface
from org.o3project.odenos.core.util.remote_object_interface import RemoteObjectInterface
from org.o3project.odenos.core.util.system_manager_interface import SystemManagerInterface
def signal_handler(num, stack):
print 'Received signal %d' % num
dispatcher.close()
dispatcher.stop()
sys.exit()
class ServerThread(threading.Thread):
def __init__(self, dispatcher):
threading.Thread.__init__(self)
self.disp = dispatcher
def run(self):
self.disp.start()
class OdenosConfigurator(object):
CM1 = "romgr1"
CM2 = "romgr2"
CM3 = "romgr3"
CM4 = "romgr4"
CM5 = "romgr5"
CM6 = "romgr6"
def __init__(self, dispatcher):
self.disp = dispatcher
self.sysmgr = SystemManagerInterface(self.disp)
def create_component(self, type, name, cm_id):
obj = ObjectProperty(type, name)
obj.set_property("version", "1")
obj.set_property(ObjectProperty.CM_ID, cm_id)
ret = self.sysmgr.put_components(obj).status_code
if ret != 201:
print "failed to create(ret): " + type + " " + name + " @ " + cm_id
return RemoteObjectInterface(self.disp, name)
def create_aggregator(self, name, cm_id=CM1):
return self.create_component("Aggregator", name, cm_id)
def create_federator(self, name, cm_id=CM1):
return self.create_component("Federator", name, cm_id)
def create_l2switch(self, name, cm_id=CM1):
return self.create_component("LearningSwitch", name, cm_id)
def create_linklayerizer(self, name, cm_id=CM1):
return self.create_component("LinkLayerizer", name, cm_id)
def create_ofdriver(self, name, cm_id=CM3):
return self.create_component("OpenFlowDriver", name, cm_id)
def create_network(self, name, cm_id=CM1):
self.create_component("Network", name, cm_id)
return NetworkInterface(self.disp, name)
def create_slicer(self, name, cm_id=CM1):
return self.create_component("Slicer", name, cm_id)
def connect(self, logic, network, type):
conn_id = logic.object_id + "-" + network.object_id
conn = ComponentConnectionLogicAndNetwork(
conn_id, type, ComponentConnection.State.INITIALIZING,
logic.object_id, network.object_id)
if self.sysmgr.put_connection(conn).status_code != 201:
print "failed to connect(ret): " + conn_id + " as " + type
def set_flow(self, network, flow_id, inport, outport, inners, vid=0):
inport = network.get_physical_port(inport)
outport = network.get_physical_port(outport)
match = OFPFlowMatch("OFPFlowMatch", inport.node_id, inport.port_id)
if vid:
match.vlan_vid = vid
path = []
for inner in inners:
inner_inport = network.get_physical_port(inner[0])
inner_outport = network.get_physical_port(inner[1])
link_id = ""
links = network.get_links()
for id_, link in links.iteritems():
if link.src_node == inner_inport.node_id and \
link.src_port == inner_inport.port_id and \
link.dst_node == inner_outport.node_id and \
link.dst_port == inner_outport.port_id:
print "*** found: " + id_
path.append(id_)
actions = {outport.node_id: [FlowActionOutput("FlowActionOutput", outport.port_id)]}
flow = OFPFlow("OFPFlow", "1", flow_id, "demo", True, "65535", "none",
{}, [match], 0, 0, path, actions)
network.put_flow(flow)
def set_lly_boundaries(self, target, boundaries):
n = 0
for ports in boundaries:
net1 = ports[0][0]
net2 = ports[1][0]
phy_port1 = ports[0][1]
phy_port2 = ports[1][1]
port1 = net1.get_physical_port(phy_port1)
port2 = net2.get_physical_port(phy_port2)
while not port1:
print "cannot get port by %s from %s" % (phy_port1, net1.network_id)
print "Please start mininet"
port1 = net1.get_physical_port(phy_port1)
time.sleep(2)
while not port2:
print "cannot get port by %s from %s" % (phy_port2, net2.network_id)
print "Please start mininet"
port2 = net2.get_physical_port(phy_port2)
time.sleep(2)
bond_id = "bond_%s" % str(n)
bond = {"id": bond_id, "type": "LinkLayerizer",
"upper_nw": net1.network_id,
"upper_nw_node": port1.node_id, "upper_nw_port": port1.port_id,
"lower_nw": net2.network_id,
"lower_nw_node": port2.node_id, "lower_nw_port": port2.port_id
}
n = n + 1
target._put_object_to_remote_object("settings/boundaries/%s" % bond_id, bond)
def set_fed_boundaries(self, target, boundaries):
n = 0
for ports in boundaries:
net1 = ports[0][0]
net2 = ports[1][0]
phy_port1 = ports[0][1]
phy_port2 = ports[1][1]
port1 = net1.get_physical_port(phy_port1)
port2 = net2.get_physical_port(phy_port2)
while not port1:
print "cannot get port by %s from %s" % (phy_port1, net1.network_id)
print "Please start mininet"
port1 = net1.get_physical_port(phy_port1)
time.sleep(2)
while not port2:
print "cannot get port by %s from %s" % (phy_port2, net2.network_id)
print "Please start mininet"
port2 = net2.get_physical_port(phy_port2)
time.sleep(2)
bond_id = "bond_%s" % str(n)
bond = {"id": bond_id, "type": "Federator",
"network1": net1.network_id,
"node1": port1.node_id, "port1": port1.port_id,
"network2": net2.network_id,
"node2": port2.node_id, "port2": port2.port_id
}
n = n + 1
target._put_object_to_remote_object("settings/boundaries/%s" % bond_id, bond)
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
dispatcher = MessageDispatcher()
thread = ServerThread(dispatcher)
thread.start()
time.sleep(1)
oc = OdenosConfigurator(dispatcher)
drv1 = oc.create_ofdriver("driver_dc1", oc.CM3)
drv2 = oc.create_ofdriver("driver_dc2", oc.CM4)
drv3 = oc.create_ofdriver("driver_dc3", oc.CM5)
drv4 = oc.create_ofdriver("driver_wan", oc.CM6)
net1 = oc.create_network("network1")
net2 = oc.create_network("network2")
net3 = oc.create_network("network3")
net4 = oc.create_network("network4")
net5 = oc.create_network("network5")
net6 = oc.create_network("network6")
net7 = oc.create_network("network7")
agg1 = oc.create_aggregator("aggregator1")
fed1 = oc.create_federator("federator1")
lly1 = oc.create_linklayerizer("linklayerizer1")
lsw1 = oc.create_l2switch("l2sw1")
oc.connect(lsw1, net7, "original")
oc.connect(agg1, net7, "aggregated")
oc.connect(agg1, net6, "original")
oc.connect(lly1, net6, "layerized")
oc.connect(lly1, net5, "upper")
oc.connect(lly1, net4, "lower")
oc.connect(fed1, net5, "federated")
oc.connect(fed1, net3, "original")
oc.connect(fed1, net2, "original")
oc.connect(fed1, net1, "original")
oc.connect(drv1, net1, "original")
oc.connect(drv2, net2, "original")
oc.connect(drv3, net3, "original")
oc.connect(drv4, net4, "original")
time.sleep(5)
# set boundaries
boundaries = [
[[net5, "4@0x3"], [net4, "3@0xe"]], [[net5, "3@0x4"], [net4, "3@0xd"]],
[[net5, "3@0x6"], [net4, "3@0x12"]], [[net5, "3@0x7"], [net4, "3@0x11"]],
[[net5, "3@0x9"], [net4, "3@0x10"]], [[net5, "3@0xa"], [net4, "3@0xf"]]]
oc.set_lly_boundaries(lly1, boundaries)
# set flows
# net1 - net2
oc.set_flow(net4, "flow46", "3@0xd", "3@0x12", [["2@0xd", "2@0x12"]])
oc.set_flow(net4, "flow64", "3@0x12", "3@0xd", [["2@0x12", "2@0xd"]])
# net2 - net3
oc.set_flow(net4, "flow79", "3@0x11", "3@0x10", [["1@0x11", "2@0x10"]])
oc.set_flow(net4, "flow97", "3@0x10", "3@0x11", [["2@0x10", "1@0x11"]])
# net3 - net1
oc.set_flow(net4, "flow103", "3@0xf", "3@0xe", [["1@0xf", "2@0xe"]])
oc.set_flow(net4, "flow310", "3@0xe", "3@0xf", [["2@0xe", "1@0xf"]])
thread.join()
dispatcher.stop()
|
apache-2.0
|
wlamond/scikit-learn
|
sklearn/datasets/rcv1.py
|
17
|
8271
|
"""RCV1 dataset.
"""
# Author: Tom Dupre la Tour
# License: BSD 3 clause
import logging
from os.path import exists, join
from gzip import GzipFile
from io import BytesIO
from contextlib import closing
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import _pkl_filepath
from ..utils.fixes import makedirs
from ..externals import joblib
from .svmlight_format import load_svmlight_files
from ..utils import shuffle as shuffle_
from ..utils import Bunch
URL = ('http://jmlr.csail.mit.edu/papers/volume5/lewis04a/'
'a13-vector-files/lyrl2004_vectors')
URL_topics = ('http://jmlr.csail.mit.edu/papers/volume5/lewis04a/'
'a08-topic-qrels/rcv1-v2.topics.qrels.gz')
logger = logging.getLogger()
def fetch_rcv1(data_home=None, subset='all', download_if_missing=True,
random_state=None, shuffle=False):
"""Load the RCV1 multilabel dataset, downloading it if necessary.
Version: RCV1-v2, vectors, full sets, topics multilabels.
============== =====================
Classes 103
Samples total 804414
Dimensionality 47236
Features real, between 0 and 1
============== =====================
Read more in the :ref:`User Guide <datasets>`.
.. versionadded:: 0.17
Parameters
----------
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
subset : string, 'train', 'test', or 'all', default='all'
Select the dataset to load: 'train' for the training set
(23149 samples), 'test' for the test set (781265 samples),
'all' for both, with the training samples first if shuffle is False.
This follows the official LYRL2004 chronological split.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : scipy csr array, dtype np.float64, shape (804414, 47236)
The array has 0.16% of non zero values.
dataset.target : scipy csr array, dtype np.uint8, shape (804414, 103)
Each sample has a value of 1 in its categories, and 0 in others.
The array has 3.15% of non zero values.
dataset.sample_id : numpy array, dtype np.uint32, shape (804414,)
Identification number of each sample, as ordered in dataset.data.
dataset.target_names : numpy array, dtype object, length (103)
Names of each target (RCV1 topics), as ordered in dataset.target.
dataset.DESCR : string
Description of the RCV1 dataset.
References
----------
Lewis, D. D., Yang, Y., Rose, T. G., & Li, F. (2004). RCV1: A new
benchmark collection for text categorization research. The Journal of
Machine Learning Research, 5, 361-397.
"""
N_SAMPLES = 804414
N_FEATURES = 47236
N_CATEGORIES = 103
N_TRAIN = 23149
data_home = get_data_home(data_home=data_home)
rcv1_dir = join(data_home, "RCV1")
if download_if_missing:
makedirs(rcv1_dir, exist_ok=True)
samples_path = _pkl_filepath(rcv1_dir, "samples.pkl")
sample_id_path = _pkl_filepath(rcv1_dir, "sample_id.pkl")
sample_topics_path = _pkl_filepath(rcv1_dir, "sample_topics.pkl")
topics_path = _pkl_filepath(rcv1_dir, "topics_names.pkl")
# load data (X) and sample_id
if download_if_missing and (not exists(samples_path) or
not exists(sample_id_path)):
file_urls = ["%s_test_pt%d.dat.gz" % (URL, i) for i in range(4)]
file_urls.append("%s_train.dat.gz" % URL)
files = []
for file_url in file_urls:
logger.warning("Downloading %s" % file_url)
with closing(urlopen(file_url)) as online_file:
# buffer the full file in memory to make possible to Gzip to
# work correctly
f = BytesIO(online_file.read())
files.append(GzipFile(fileobj=f))
Xy = load_svmlight_files(files, n_features=N_FEATURES)
# Training data is before testing data
X = sp.vstack([Xy[8], Xy[0], Xy[2], Xy[4], Xy[6]]).tocsr()
sample_id = np.hstack((Xy[9], Xy[1], Xy[3], Xy[5], Xy[7]))
sample_id = sample_id.astype(np.uint32)
joblib.dump(X, samples_path, compress=9)
joblib.dump(sample_id, sample_id_path, compress=9)
else:
X = joblib.load(samples_path)
sample_id = joblib.load(sample_id_path)
# load target (y), categories, and sample_id_bis
if download_if_missing and (not exists(sample_topics_path) or
not exists(topics_path)):
logger.warning("Downloading %s" % URL_topics)
with closing(urlopen(URL_topics)) as online_topics:
f = BytesIO(online_topics.read())
# parse the target file
n_cat = -1
n_doc = -1
doc_previous = -1
y = np.zeros((N_SAMPLES, N_CATEGORIES), dtype=np.uint8)
sample_id_bis = np.zeros(N_SAMPLES, dtype=np.int32)
category_names = {}
for line in GzipFile(fileobj=f, mode='rb'):
line_components = line.decode("ascii").split(u" ")
if len(line_components) == 3:
cat, doc, _ = line_components
if cat not in category_names:
n_cat += 1
category_names[cat] = n_cat
doc = int(doc)
if doc != doc_previous:
doc_previous = doc
n_doc += 1
sample_id_bis[n_doc] = doc
y[n_doc, category_names[cat]] = 1
# Samples in X are ordered with sample_id,
# whereas in y, they are ordered with sample_id_bis.
permutation = _find_permutation(sample_id_bis, sample_id)
y = y[permutation, :]
# save category names in a list, with same order than y
categories = np.empty(N_CATEGORIES, dtype=object)
for k in category_names.keys():
categories[category_names[k]] = k
# reorder categories in lexicographic order
order = np.argsort(categories)
categories = categories[order]
y = sp.csr_matrix(y[:, order])
joblib.dump(y, sample_topics_path, compress=9)
joblib.dump(categories, topics_path, compress=9)
else:
y = joblib.load(sample_topics_path)
categories = joblib.load(topics_path)
if subset == 'all':
pass
elif subset == 'train':
X = X[:N_TRAIN, :]
y = y[:N_TRAIN, :]
sample_id = sample_id[:N_TRAIN]
elif subset == 'test':
X = X[N_TRAIN:, :]
y = y[N_TRAIN:, :]
sample_id = sample_id[N_TRAIN:]
else:
raise ValueError("Unknown subset parameter. Got '%s' instead of one"
" of ('all', 'train', test')" % subset)
if shuffle:
X, y, sample_id = shuffle_(X, y, sample_id, random_state=random_state)
return Bunch(data=X, target=y, sample_id=sample_id,
target_names=categories, DESCR=__doc__)
def _inverse_permutation(p):
"""inverse permutation p"""
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
np.put(s, p, i) # s[p] = i
return s
def _find_permutation(a, b):
"""find the permutation from a to b"""
t = np.argsort(a)
u = np.argsort(b)
u_ = _inverse_permutation(u)
return t[u_]
|
bsd-3-clause
|
Eagles2F/sync-engine
|
migrations/versions/128_fix_cascades.py
|
9
|
1263
|
"""fix_cascades
Revision ID: 284227d72f51
Revises: 581e91bd7141
Create Date: 2015-01-15 14:03:36.379402
"""
# revision identifiers, used by Alembic.
revision = '284227d72f51'
down_revision = '581e91bd7141'
from alembic import op
def upgrade():
connection = op.get_bind()
connection.execute(
'''
ALTER TABLE actionlog DROP FOREIGN KEY actionlog_ibfk_1;
ALTER TABLE actionlog ADD CONSTRAINT actionlog_ibfk_1 FOREIGN KEY (namespace_id) REFERENCES namespace(id) ON DELETE CASCADE;
ALTER TABLE easfoldersyncstatus DROP FOREIGN KEY easfoldersyncstatus_ibfk_3;
ALTER TABLE easfoldersyncstatus ADD CONSTRAINT easfoldersyncstatus_ibfk_3 FOREIGN KEY (folder_id) REFERENCES folder(id) ON DELETE CASCADE;
'''
)
def downgrade():
connection = op.get_bind()
connection.execute(
'''
ALTER TABLE actionlog DROP FOREIGN KEY actionlog_ibfk_1;
ALTER TABLE actionlog ADD CONSTRAINT actionlog_ibfk_1 FOREIGN KEY (namespace_id) REFERENCES namespace(id);
ALTER TABLE easfoldersyncstatus DROP FOREIGN KEY easfoldersyncstatus_ibfk_3;
ALTER TABLE easfoldersyncstatus ADD CONSTRAINT easfoldersyncstatus_ibfk_3 FOREIGN KEY (folder_id) REFERENCES folder(id);
'''
)
|
agpl-3.0
|
a10networks/a10sdk-python
|
a10sdk/core/slb/slb_fix_stats.py
|
2
|
3197
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param svrsel_fail: {"description": "Server selection failure", "format": "counter", "type": "number", "oid": "3", "optional": true, "size": "8"}
:param default_switching: {"description": "Default switching", "format": "counter", "type": "number", "oid": "10", "optional": true, "size": "8"}
:param total_proxy: {"description": "Total proxy conns", "format": "counter", "type": "number", "oid": "2", "optional": true, "size": "8"}
:param curr_proxy: {"description": "Current proxy conns", "format": "counter", "type": "number", "oid": "1", "optional": true, "size": "8"}
:param target_switching: {"description": "Target ID switching", "format": "counter", "type": "number", "oid": "12", "optional": true, "size": "8"}
:param noroute: {"description": "No route failure", "format": "counter", "type": "number", "oid": "4", "optional": true, "size": "8"}
:param client_err: {"description": "Client fail", "format": "counter", "type": "number", "oid": "6", "optional": true, "size": "8"}
:param sender_switching: {"description": "Sender ID switching", "format": "counter", "type": "number", "oid": "11", "optional": true, "size": "8"}
:param server_err: {"description": "Server fail", "format": "counter", "type": "number", "oid": "7", "optional": true, "size": "8"}
:param snat_fail: {"description": "Source NAT failure", "format": "counter", "type": "number", "oid": "5", "optional": true, "size": "8"}
:param insert_clientip: {"description": "Insert client IP", "format": "counter", "type": "number", "oid": "9", "optional": true, "size": "8"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.svrsel_fail = ""
self.default_switching = ""
self.total_proxy = ""
self.curr_proxy = ""
self.target_switching = ""
self.noroute = ""
self.client_err = ""
self.sender_switching = ""
self.server_err = ""
self.snat_fail = ""
self.insert_clientip = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Fix(A10BaseClass):
"""Class Description::
Statistics for the object fix.
Class fix supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/slb/fix/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "fix"
self.a10_url="/axapi/v3/slb/fix/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
apache-2.0
|
nanolearning/edx-platform
|
common/lib/capa/capa/util.py
|
3
|
5174
|
from calc import evaluator
from cmath import isinf
#-----------------------------------------------------------------------------
#
# Utility functions used in CAPA responsetypes
default_tolerance = '0.001%'
def compare_with_tolerance(student_complex, instructor_complex, tolerance=default_tolerance, relative_tolerance=False):
"""
Compare student_complex to instructor_complex with maximum tolerance tolerance.
- student_complex : student result (float complex number)
- instructor_complex : instructor result (float complex number)
- tolerance : float, or string (representing a float or a percentage)
- relative_tolerance: bool, to explicitly use passed tolerance as relative
Note: when a tolerance is a percentage (i.e. '10%'), it will compute that
percentage of the instructor result and yield a number.
If relative_tolerance is set to False, it will use that value and the
instructor result to define the bounds of valid student result:
instructor_complex = 10, tolerance = '10%' will give [9.0, 11.0].
If relative_tolerance is set to True, it will use that value and both
instructor result and student result to define the bounds of valid student
result:
instructor_complex = 10, student_complex = 20, tolerance = '10%' will give
[8.0, 12.0].
This is typically used internally to compare float, with a
default_tolerance = '0.001%'.
Default tolerance of 1e-3% is added to compare two floats for
near-equality (to handle machine representation errors).
Default tolerance is relative, as the acceptable difference between two
floats depends on the magnitude of the floats.
(http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/)
Examples:
In [183]: 0.000016 - 1.6*10**-5
Out[183]: -3.3881317890172014e-21
In [212]: 1.9e24 - 1.9*10**24
Out[212]: 268435456.0
"""
if isinstance(tolerance, str):
if tolerance == default_tolerance:
relative_tolerance = True
if tolerance.endswith('%'):
tolerance = evaluator(dict(), dict(), tolerance[:-1]) * 0.01
if not relative_tolerance:
tolerance = tolerance * abs(instructor_complex)
else:
tolerance = evaluator(dict(), dict(), tolerance)
if relative_tolerance:
tolerance = tolerance * max(abs(student_complex), abs(instructor_complex))
if isinf(student_complex) or isinf(instructor_complex):
# If an input is infinite, we can end up with `abs(student_complex-instructor_complex)` and
# `tolerance` both equal to infinity. Then, below we would have
# `inf <= inf` which is a fail. Instead, compare directly.
return student_complex == instructor_complex
else:
# v1 and v2 are, in general, complex numbers:
# there are some notes about backward compatibility issue: see responsetypes.get_staff_ans()).
return abs(student_complex - instructor_complex) <= tolerance
def contextualize_text(text, context): # private
"""
Takes a string with variables. E.g. $a+$b.
Does a substitution of those variables from the context
"""
if not text:
return text
for key in sorted(context, lambda x, y: cmp(len(y), len(x))):
# TODO (vshnayder): This whole replacement thing is a big hack
# right now--context contains not just the vars defined in the
# program, but also e.g. a reference to the numpy module.
# Should be a separate dict of variables that should be
# replaced.
if '$' + key in text:
try:
s = str(context[key])
except UnicodeEncodeError:
s = context[key].encode('utf8', errors='ignore')
text = text.replace('$' + key, s)
return text
def convert_files_to_filenames(answers):
"""
Check for File objects in the dict of submitted answers,
convert File objects to their filename (string)
"""
new_answers = dict()
for answer_id in answers.keys():
answer = answers[answer_id]
# Files are stored as a list, even if one file
if is_list_of_files(answer):
new_answers[answer_id] = [f.name for f in answer]
else:
new_answers[answer_id] = answers[answer_id]
return new_answers
def is_list_of_files(files):
return isinstance(files, list) and all(is_file(f) for f in files)
def is_file(file_to_test):
"""
Duck typing to check if 'file_to_test' is a File object
"""
return all(hasattr(file_to_test, method) for method in ['read', 'name'])
def find_with_default(node, path, default):
"""
Look for a child of node using , and return its text if found.
Otherwise returns default.
Arguments:
node: lxml node
path: xpath search expression
default: value to return if nothing found
Returns:
node.find(path).text if the find succeeds, default otherwise.
"""
v = node.find(path)
if v is not None:
return v.text
else:
return default
|
agpl-3.0
|
qisanstudio/qstudio-launch
|
src/studio/launch/commands/nginx_commands.py
|
1
|
3541
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import sh
from sh import sudo, ErrorReturnCode
from StringIO import StringIO
from termcolor import colored
from studio.frame import config
from studio.launch.base import manager
from .contrib import mkdirs, build_structure
CONF_DIR = os.path.join(config.common['NGINX_CHROOT'], 'etc/nginx/')
nginx_manager = manager.subcommand('nginx')
def _init_nginx():
mkdirs(CONF_DIR)
mkdirs(os.path.join(config.common['NGINX_CHROOT'], 'var/lib/nginx/body'))
mkdirs(os.path.join(config.common['NGINX_CHROOT'], 'var/log/nginx/'))
@nginx_manager.command
def render():
"""
render 配置
"""
_init_nginx()
params = {}
params['NGINX_CHROOT'] = config.common['NGINX_CHROOT']
build_structure('nginx', dist=CONF_DIR, **params)
print(colored('render config complete!', 'green'))
# TODO ln -s sites-enabled/
@nginx_manager.command
def test():
"""
测试当前的 nginx 配置是否正确
"""
print('Testing nginx:', end=' ')
err = StringIO()
try:
sudo.nginx('-p', CONF_DIR, '-c',
os.path.join(CONF_DIR, 'nginx.conf'),
'-t', _err=err)
except ErrorReturnCode:
print(colored('failed', 'red', attrs=['bold']) + '.')
print(colored(err.getvalue(), attrs=['bold']), file=sys.stderr)
else:
print(colored('success', 'green', attrs=['bold']) + '.')
@nginx_manager.command
def start():
"""
启动 nginx 进程
"""
print('Starting nginx:', end=' ')
err = StringIO()
try:
sudo.nginx('-p', CONF_DIR, '-c',
os.path.join(CONF_DIR, 'nginx.conf'),
_err=err)
except ErrorReturnCode:
print(colored('failed', 'red', attrs=['bold']) + '.')
print(colored(err.getvalue(), attrs=['bold']), file=sys.stderr)
else:
print(colored('nginx', 'green', attrs=['bold']) + '.')
@nginx_manager.command
def stop():
"""
启动 nginx 进程
"""
print('Stopping nginx:', end=' ')
try:
sudo.nginx('-p', CONF_DIR, '-c',
os.path.join(CONF_DIR, 'nginx.conf'),
'-s', 'stop')
except ErrorReturnCode:
print(colored('failed', 'red', attrs=['bold']) + '.')
else:
print(colored('nginx', 'green', attrs=['bold']) + '.')
@nginx_manager.command
def reload():
"""
平滑加载 nginx 的配置
"""
print('Reloading nginx:', end=' ')
err = StringIO()
try:
sudo.nginx('-p', CONF_DIR, '-c',
os.path.join(CONF_DIR, 'nginx.conf'),
'-s', 'reload', _err=err)
except ErrorReturnCode:
print(colored('failed', 'red', attrs=['bold']) + '.')
print(colored(err.getvalue(), attrs=['bold']), file=sys.stderr)
else:
print(colored('nginx', 'green', attrs=['bold']) + '.')
@nginx_manager.command
def restart():
print('Restarting nginx:', end=' ')
try:
sudo.nginx('-p', CONF_DIR, '-c',
os.path.join(CONF_DIR, 'nginx.conf'),
'-s', 'stop')
except ErrorReturnCode:
pass # ignore
finally:
try:
sh.nginx('-p', CONF_DIR, '-c',
os.path.join(CONF_DIR, 'nginx.conf'))
except ErrorReturnCode:
print(colored('failed', 'red', attrs=['bold']) + '.')
else:
print(colored('nginx', 'green', attrs=['bold']) + '.')
|
mit
|
hortonworks/hortonworks-sandbox
|
desktop/core/ext-py/Pygments-1.3.1/pygments/formatters/html.py
|
55
|
26839
|
# -*- coding: utf-8 -*-
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import StringIO
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, bytes
__all__ = ['HtmlFormatter']
def escape_html(text):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.replace('&', '&'). \
replace('<', '<'). \
replace('>', '>'). \
replace('"', '"'). \
replace("'", ''')
def get_random_id():
"""Return a random id for javascript fields."""
from random import random
from time import time
try:
from hashlib import sha1 as sha
except ImportError:
import sha
sha = sha.new
return sha('%s|%s' % (random(), time())).hexdigest()
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
pre { line-height: 125%%; }
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags will not use CSS classes, but
inline styles. This is not recommended for larger pieces of code since
it increases output size by quite a bit (default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
*New in Pygments 0.9:* If you select the ``'table'`` line numbers, the
wrapping table will have a CSS class of this string plus ``'table'``,
the default is accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``). *New in
Pygments 0.11.*
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file. *New in Pygments 0.6.*
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
*New in Pygments 1.1.*
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted. *New in Pygments 0.11.*
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``). *New in
Pygments 0.6.*
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks. *New in Pygments
0.7.*
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
This allows easy linking to certain lines. *New in Pygments 0.9.*
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
**Subclassing the HTML formatter**
*New in Pygments 0.7.*
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.anchorlinenos = options.get('anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._class_cache = {}
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
if ttype in self._class_cache:
return self._class_cache[ttype]
return self.classprefix + _get_ttype_class(ttype)
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
cp = self.classprefix
for ttype, ndef in self.style:
name = cp + _get_ttype_class(ttype)
style = ''
if ndef['color']:
style += 'color: #%s; ' % ndef['color']
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: #%s; ' % ndef['bgcolor']
if ndef['border']:
style += 'border: 1px solid #%s; ' % ndef['border']
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, basestring):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in self.class2style.iteritems()
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines)
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print >>sys.stderr, 'Note: Cannot determine output file name, ' \
'using current directory as base for the CSS file name'
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
cf = open(cssfilename, "w")
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
cf.close()
except IOError, err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title = self.title,
cssfile = self.cssfile,
encoding = self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title = self.title,
styledefs = self.get_style_defs('body'),
encoding = self.encoding))
for t, line in inner:
yield t, line
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO.StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
la = self.lineanchors
aln = self.anchorlinenos
if sp:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if i % sp == 0:
if aln:
lines.append('<a href="#%s-%d" class="special">%*d</a>' %
(la, i, mw, i))
else:
lines.append('<span class="special">%*d</span>' % (mw, i))
else:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
else:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">')
yield 0, dummyoutfile.getvalue()
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(lines) + num - 1))
if sp:
for t, line in lines:
yield 1, '<span class="lineno%s">%*s</span> ' % (
num%sp == 0 and ' special' or '', mw,
(num%st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, '<span class="lineno">%*s</span> ' % (
mw, (num%st and ' ' or num)) + line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
i = 0
for t, line in inner:
if t:
i += 1
yield 1, '<a name="%s-%d"></a>' % (s, i) + line
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass)
+ (style and (' style="%s"' % style)) + '>')
for tup in inner:
yield tup
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
yield 0, ('<pre' + (style and ' style="%s"' % style) + '>')
for tup in inner:
yield tup
yield 0, '</pre>'
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
lspan = ''
line = ''
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_class(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = escape_html(value).split('\n')
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line += (lspan and '</span>') + cspan + part + \
(cspan and '</span>') + lsep
else: # both are the same
line += part + (lspan and '</span>') + lsep
yield 1, line
line = ''
elif part:
yield 1, cspan + part + (cspan and '</span>') + lsep
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line += (lspan and '</span>') + cspan + parts[-1]
lspan = cspan
else:
line += parts[-1]
elif parts[-1]:
line = cspan + parts[-1]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
yield 1, line + (lspan and '</span>') + lsep
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
def wrap(self, source, outfile):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
return self._wrap_div(self._wrap_pre(source))
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
|
apache-2.0
|
Cinntax/home-assistant
|
homeassistant/components/snmp/device_tracker.py
|
2
|
3830
|
"""Support for fetching WiFi associations through SNMP."""
import binascii
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_AUTH_KEY,
CONF_BASEOID,
CONF_COMMUNITY,
CONF_PRIV_KEY,
DEFAULT_COMMUNITY,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BASEOID): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Inclusive(CONF_AUTH_KEY, "keys"): cv.string,
vol.Inclusive(CONF_PRIV_KEY, "keys"): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return an SNMP scanner."""
scanner = SnmpScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SnmpScanner(DeviceScanner):
"""Queries any SNMP capable Access Point for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
from pysnmp.entity.rfc3413.oneliner import cmdgen
from pysnmp.entity import config as cfg
self.snmp = cmdgen.CommandGenerator()
self.host = cmdgen.UdpTransportTarget((config[CONF_HOST], 161))
if CONF_AUTH_KEY not in config or CONF_PRIV_KEY not in config:
self.auth = cmdgen.CommunityData(config[CONF_COMMUNITY])
else:
self.auth = cmdgen.UsmUserData(
config[CONF_COMMUNITY],
config[CONF_AUTH_KEY],
config[CONF_PRIV_KEY],
authProtocol=cfg.usmHMACSHAAuthProtocol,
privProtocol=cfg.usmAesCfb128Protocol,
)
self.baseoid = cmdgen.MibVariable(config[CONF_BASEOID])
self.last_results = []
# Test the router is accessible
data = self.get_snmp_data()
self.success_init = data is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client["mac"] for client in self.last_results if client.get("mac")]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
# We have no names
return None
def _update_info(self):
"""Ensure the information from the device is up to date.
Return boolean if scanning successful.
"""
if not self.success_init:
return False
data = self.get_snmp_data()
if not data:
return False
self.last_results = data
return True
def get_snmp_data(self):
"""Fetch MAC addresses from access point via SNMP."""
devices = []
errindication, errstatus, errindex, restable = self.snmp.nextCmd(
self.auth, self.host, self.baseoid
)
if errindication:
_LOGGER.error("SNMPLIB error: %s", errindication)
return
if errstatus:
_LOGGER.error(
"SNMP error: %s at %s",
errstatus.prettyPrint(),
errindex and restable[int(errindex) - 1][0] or "?",
)
return
for resrow in restable:
for _, val in resrow:
try:
mac = binascii.hexlify(val.asOctets()).decode("utf-8")
except AttributeError:
continue
_LOGGER.debug("Found MAC address: %s", mac)
mac = ":".join([mac[i : i + 2] for i in range(0, len(mac), 2)])
devices.append({"mac": mac})
return devices
|
apache-2.0
|
gitprouser/appengine-bottle-skeleton
|
lib/simplejson/tests/test_indent.py
|
147
|
2568
|
from unittest import TestCase
import textwrap
import simplejson as json
from simplejson.compat import StringIO
class TestIndent(TestCase):
def test_indent(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh',
'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
\t[
\t\t"blorpie"
\t],
\t[
\t\t"whoops"
\t],
\t[],
\t"d-shtaeou",
\t"d-nthiouh",
\t"i-vhbjkhnth",
\t{
\t\t"nifty": 87
\t},
\t{
\t\t"field": "yes",
\t\t"morefield": false
\t}
]""")
d1 = json.dumps(h)
d2 = json.dumps(h, indent='\t', sort_keys=True, separators=(',', ': '))
d3 = json.dumps(h, indent=' ', sort_keys=True, separators=(',', ': '))
d4 = json.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
h1 = json.loads(d1)
h2 = json.loads(d2)
h3 = json.loads(d3)
h4 = json.loads(d4)
self.assertEqual(h1, h)
self.assertEqual(h2, h)
self.assertEqual(h3, h)
self.assertEqual(h4, h)
self.assertEqual(d3, expect.replace('\t', ' '))
self.assertEqual(d4, expect.replace('\t', ' '))
# NOTE: Python 2.4 textwrap.dedent converts tabs to spaces,
# so the following is expected to fail. Python 2.4 is not a
# supported platform in simplejson 2.1.0+.
self.assertEqual(d2, expect)
def test_indent0(self):
h = {3: 1}
def check(indent, expected):
d1 = json.dumps(h, indent=indent)
self.assertEqual(d1, expected)
sio = StringIO()
json.dump(h, sio, indent=indent)
self.assertEqual(sio.getvalue(), expected)
# indent=0 should emit newlines
check(0, '{\n"3": 1\n}')
# indent=None is more compact
check(None, '{"3": 1}')
def test_separators(self):
lst = [1,2,3,4]
expect = '[\n1,\n2,\n3,\n4\n]'
expect_spaces = '[\n1, \n2, \n3, \n4\n]'
# Ensure that separators still works
self.assertEqual(
expect_spaces,
json.dumps(lst, indent=0, separators=(', ', ': ')))
# Force the new defaults
self.assertEqual(
expect,
json.dumps(lst, indent=0, separators=(',', ': ')))
# Added in 2.1.4
self.assertEqual(
expect,
json.dumps(lst, indent=0))
|
apache-2.0
|
deisi/home-assistant
|
homeassistant/components/switch/tellduslive.py
|
24
|
1996
|
"""
Support for Tellstick switches using Tellstick Net.
This platform uses the Telldus Live online service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.tellduslive/
"""
import logging
from homeassistant.components import tellduslive
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup Tellstick switches."""
if discovery_info is None:
return
add_devices(TelldusLiveSwitch(switch) for switch in discovery_info)
class TelldusLiveSwitch(ToggleEntity):
"""Representation of a Tellstick switch."""
def __init__(self, switch_id):
"""Initialize the switch."""
self._id = switch_id
self.update()
_LOGGER.debug("created switch %s", self)
def update(self):
"""Get the latest date and update the state."""
tellduslive.NETWORK.update_switches()
self._switch = tellduslive.NETWORK.get_switch(self._id)
@property
def should_poll(self):
"""Polling is needed."""
return True
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def name(self):
"""Return the name of the switch if any."""
return self._switch["name"]
@property
def available(self):
"""Return the state of the switch."""
return not self._switch.get("offline", False)
@property
def is_on(self):
"""Return true if switch is on."""
from tellive.live import const
return self._switch["state"] == const.TELLSTICK_TURNON
def turn_on(self, **kwargs):
"""Turn the switch on."""
tellduslive.NETWORK.turn_switch_on(self._id)
def turn_off(self, **kwargs):
"""Turn the switch off."""
tellduslive.NETWORK.turn_switch_off(self._id)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.