repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jjack15/CS402-Project-UTK | OnlinePythonTutor/v3/watch_module.py | 3 | 2196 | # Created by Peter Norvig
# TODO: need to somehow enable sys._getframe()
# even though 'sys' is technically locked down
#import inspect # old and really slow!
import sys
class watchedlist(list):
"""A class that wraps a list, and monitors sets and gets.
Optionally monitors local variables."""
def __setitem__(self, i, val):
print('setting A[{}] = {}'.format(i, val))
self.watchlocals()
return list.__setitem__(self, i, val)
def __getitem__(self, i):
print('fetching A[{}]; value is {};'.format(
self, i, list.__getitem__(self, i)))
self.watchlocals()
return list.__getitem__(self, i)
def watchlocals(self):
if hasattr(self, 'watchedlocals'):
#D = inspect.stack()[2][0].f_locals # old and really slow!
D = sys._getframe(2).f_locals
print(' watched locals: {}'.format(
{var: D[var] for var in self.watchedlocals}))
def watch(object, watchedspec):
"""Wrap object with a wrapper class (like watchedlist).
watchedspec is either None or a callable (like watchedlist), or
a 2-tuple of (callable, local_var_names), where local_var_names
can be a string or a sequence of strings."""
if not watchedspec:
return object
kind, locals = (watchedspec if isinstance(watchedspec, (tuple, list)) else
(watchedspec, ()))
if isinstance(locals, str): locals = locals.split()
watched = kind(object)
watched.watchedlocals = locals
return watched
class watchfn(object):
"""Decorator that watches the arguments of a function.
Specify watchedspecs for each positional argument, and optionally
for keyword arguments."""
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
def __call__(self, fn):
def wrapped_fn(*args, **kwargs):
args = [watch(obj, spec) for (obj, spec) in zip(args, self.args)]
kwargs = {k: watch(kwargs[k], self.args.get(k, None)) for k in kwargs}
return fn(*args, **kwargs)
#wrapped_fn.__name__ = fn.__name__
return wrapped_fn
| agpl-3.0 |
krikru/tensorflow-opencl | tensorflow/contrib/training/python/training/failure_tolerator_test.py | 54 | 4445 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.failure_tolerator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.contrib.training.python.training import failure_tolerator
from tensorflow.python.platform import test
class ForgiveMe(Exception):
pass
class Unforgivable(Exception):
pass
class FailureToleratorTest(test.TestCase):
# Tests for the FailureTolerator helper
def testHandledExceptions(self):
tolerator = failure_tolerator.FailureTolerator(
init_delay=0.0, handled_exceptions=[ForgiveMe])
with tolerator.forgive():
raise ForgiveMe()
with self.assertRaises(Unforgivable):
with tolerator.forgive():
raise Unforgivable()
def testLimit(self):
tolerator = failure_tolerator.FailureTolerator(
init_delay=0.0, limit=3, handled_exceptions=[ForgiveMe])
with tolerator.forgive():
raise ForgiveMe()
with tolerator.forgive():
raise ForgiveMe()
with self.assertRaises(ForgiveMe):
with tolerator.forgive():
raise ForgiveMe()
def testDelaysExponentially(self):
# Tests that delays are appropriate, with exponential backoff.
tolerator = failure_tolerator.FailureTolerator(
init_delay=1.0, backoff_factor=1.5, handled_exceptions=[ForgiveMe])
with test.mock.patch.object(time, 'sleep') as mock_sleep:
with tolerator.forgive():
raise ForgiveMe()
with tolerator.forgive():
raise ForgiveMe()
with tolerator.forgive():
raise ForgiveMe()
with tolerator.forgive():
raise ForgiveMe()
mock_sleep.assert_has_calls(
[test.mock.call(1.0), test.mock.call(1.5), test.mock.call(2.25)],
any_order=False)
self.assertEquals(3, mock_sleep.call_count)
def testForgivesSuccessfully(self):
# Tests that exceptions are forgiven after forgive_after_seconds
tolerator = failure_tolerator.FailureTolerator(
limit=3,
init_delay=0.0,
backoff_factor=1.0, # no exponential backoff
forgive_after_seconds=10.0,
handled_exceptions=[ForgiveMe])
cur_time = 10.0
with test.mock.patch.object(time, 'time') as mock_time:
mock_time.side_effect = lambda: cur_time
with tolerator.forgive():
raise ForgiveMe()
cur_time = 15.0
with tolerator.forgive():
raise ForgiveMe()
cur_time = 20.1 # advance more than forgive_after_seconds
with tolerator.forgive():
raise ForgiveMe() # should not be raised
cur_time = 24.0
with self.assertRaises(ForgiveMe):
with tolerator.forgive():
raise ForgiveMe() # third exception in < 10secs (t=15, 20.1, 24)
def testForgivesDoesNotCountDelays(self):
tolerator = failure_tolerator.FailureTolerator(
limit=3,
init_delay=1.0,
backoff_factor=1.0, # no exponential backoff
forgive_after_seconds=10.0,
handled_exceptions=[ForgiveMe])
cur_time = [10.0]
def _sleep(x):
cur_time[0] += x
with test.mock.patch.object(time, 'sleep') as mock_sleep:
with test.mock.patch.object(time, 'time') as mock_time:
mock_time.side_effect = lambda: cur_time[0]
mock_sleep.side_effect = _sleep
with tolerator.forgive():
raise ForgiveMe()
cur_time[0] += 1.0
with tolerator.forgive():
raise ForgiveMe()
self.assertEquals(12.0, time.time()) # ensure there was a sleep
cur_time[0] = 20.1 # 10.1 seconds after the first failure!
with self.assertRaises(ForgiveMe):
with tolerator.forgive():
raise ForgiveMe()
if __name__ == '__main__':
test.main()
| apache-2.0 |
asadoughi/python-neutronclient | neutronclient/openstack/common/strutils.py | 1 | 7376 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import re
import sys
import unicodedata
import six
from neutronclient.openstack.common.gettextutils import _ # noqa
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else is considered False.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return False
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an isntance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an isntance of str
"""
if not isinstance(text, six.string_types):
raise TypeError(_("%s can't be encoded") % type(text).capitalize())
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
return text
def to_bytes(text, default=0):
"""Converts a string into an integer of bytes.
Looks at the last characters of the text to determine
what conversion is needed to turn the input text into a byte number.
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
:param text: String input for bytes size conversion.
:param default: Default return value when text is blank.
"""
match = BYTE_REGEX.search(text)
if match:
magnitude = int(match.group(1))
mult_key_org = match.group(2)
if not mult_key_org:
return magnitude
elif text:
msg = _('Invalid string format: %s') % text
raise TypeError(msg)
else:
return default
mult_key = mult_key_org.lower().replace('b', '', 1)
multiplier = BYTE_MULTIPLIERS.get(mult_key)
if multiplier is None:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
return magnitude * multiplier
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
| apache-2.0 |
ngonzalvez/sentry | src/sentry/migrations/0064_index_checksum.py | 36 | 19092 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_index('sentry_groupedmessage', ['project_id', 'checksum'])
def backwards(self, orm):
db.delete_index('sentry_groupedmessage', ['project_id', 'checksum'])
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'views': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.view': {
'Meta': {'object_name': 'View'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'verbose_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'verbose_name_plural': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
cp16net/trove | trove/cmd/common.py | 2 | 2441 | # Copyright 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def initialize(extra_opts=None, pre_logging=None):
# Initialize localization support (the underscore character).
import gettext
gettext.install('trove', unicode=1)
# Apply whole eventlet.monkey_patch excluding 'thread' module.
# Decision for 'thread' module patching will be made
# after debug_utils is set up.
import eventlet
eventlet.monkey_patch(all=True, thread=False)
# Import only the modules necessary to initialize logging and determine if
# debug_utils are enabled.
import sys
from oslo_log import log as logging
from trove.common import cfg
from trove.common import debug_utils
conf = cfg.CONF
if extra_opts:
conf.register_cli_opts(extra_opts)
cfg.parse_args(sys.argv)
if pre_logging:
pre_logging(conf)
logging.setup(conf, None)
debug_utils.setup()
# Patch 'thread' module if debug is disabled.
if not debug_utils.enabled():
eventlet.monkey_patch(thread=True)
# rpc module must be loaded after decision about thread monkeypatching
# because if thread module is not monkeypatched we can't use eventlet
# executor from oslo_messaging library.
from trove import rpc
rpc.init(conf)
# Initialize Trove database.
from trove.db import get_db_api
get_db_api().configure_db(conf)
return conf # May be used by other scripts
def with_initialize(main_function=None, **kwargs):
"""
Decorates a script main function to make sure that dependency imports and
initialization happens correctly.
"""
def apply(main_function):
def run():
conf = initialize(**kwargs)
return main_function(conf)
return run
if main_function:
return apply(main_function)
else:
return apply
| apache-2.0 |
ethanrublee/ecto-release | test/benchmark/metrics.py | 2 | 4617 | #!/usr/bin/env python
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import ecto
import ecto_test
import sys
def test_nodelay():
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
metrics = ecto_test.Metrics("Metrics", queue_size=10)
plasm.connect(ping[:] >> metrics[:])
sched = ecto.schedulers.Threadpool(plasm)
sched.execute(niter=10000, nthreads=1)
print "Hz:", metrics.outputs.hz, " Latency in seconds: %f" % metrics.outputs.latency_seconds
# these are kinda loose
assert metrics.outputs.hz > 5000
assert metrics.outputs.latency_seconds < 0.0001
def test_20hz():
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
throttle = ecto_test.Throttle("Throttle", rate=20)
metrics = ecto_test.Metrics("Metrics", queue_size=10)
plasm.connect(ping[:] >> throttle[:],
throttle[:] >> metrics[:])
sched = ecto.schedulers.Threadpool(plasm)
sched.execute(niter=100, nthreads=1)
print "Hz:", metrics.outputs.hz, " Latency in seconds: %f" % metrics.outputs.latency_seconds
# these are kinda loose
assert 19 < metrics.outputs.hz < 21
assert 0.04 < metrics.outputs.latency_seconds < 0.06
def makeplasm(n_nodes):
plasm = ecto.Plasm()
ping = ecto_test.Ping("Ping")
throttle = ecto_test.Sleep("Sleep_0", seconds=1.0/n_nodes)
plasm.connect(ping[:] >> throttle[:])
for j in range(n_nodes-1): # one has already been added
throttle_next = ecto_test.Sleep("Sleep_%u" % (j+1), seconds=1.0/n_nodes)
plasm.connect(throttle, "out", throttle_next, "in")
throttle = throttle_next
metrics = ecto_test.Metrics("Metrics", queue_size=4)
plasm.connect(throttle[:] >> metrics[:])
# o = open('graph.dot', 'w')
# print >>o, plasm.viz()
# o.close()
# print "\n", plasm.viz(), "\n"
return (plasm, metrics)
def test_st(niter, n_nodes):
(plasm, metrics) = makeplasm(n_nodes)
#sched = ecto.schedulers.Threadpool(plasm)
#sched.execute(nthreads, niter)
sched = ecto.schedulers.Singlethreaded(plasm)
sched.execute(niter)
print "Hz:", metrics.outputs.hz, " Latency in seconds:", metrics.outputs.latency_seconds
assert 0.95 < metrics.outputs.hz < 1.05
assert 0.95 < metrics.outputs.latency_seconds < 1.05
#
# It is hard to test the middle cases, i.e. if you have one thread
# per node, things should run at n_nodes hz and 1 second latency but
# if there are less than that, things are somewhere in the middle.
# Also your latency tends to be worse as you have to wait for the
# graph to "fill up"
#
def test_tp(niter, n_nodes):
(plasm, metrics) = makeplasm(n_nodes)
sched = ecto.schedulers.Threadpool(plasm)
sched.execute(niter=niter, nthreads=n_nodes)
print "Hz:", metrics.outputs.hz, " Latency in seconds:", metrics.outputs.latency_seconds
assert n_nodes * 0.95 < metrics.outputs.hz < n_nodes * 1.05
assert 0.9 < metrics.outputs.latency_seconds < 1.1
test_nodelay()
test_20hz()
test_st(5, 5)
test_st(5, 12)
test_tp(20, 15)
test_tp(20, 10)
test_tp(20, 5)
| bsd-3-clause |
littlstar/chromium.src | tools/telemetry/telemetry/results/gtest_progress_reporter.py | 6 | 4171 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
from telemetry.results import progress_reporter
from telemetry.value import failure
from telemetry.value import skip
class GTestProgressReporter(progress_reporter.ProgressReporter):
"""A progress reporter that outputs the progress report in gtest style."""
def __init__(self, output_stream, output_skipped_tests_summary=False):
super(GTestProgressReporter, self).__init__()
self._output_stream = output_stream
self._timestamp = None
self._output_skipped_tests_summary = output_skipped_tests_summary
def _GetMs(self):
assert self._timestamp is not None, 'Did not call WillRunPage.'
return (time.time() - self._timestamp) * 1000
def DidAddValue(self, value):
super(GTestProgressReporter, self).DidAddValue(value)
if isinstance(value, failure.FailureValue):
print >> self._output_stream, failure.GetStringFromExcInfo(
value.exc_info)
self._output_stream.flush()
elif isinstance(value, skip.SkipValue):
print >> self._output_stream, '===== SKIPPING TEST %s: %s =====' % (
value.page.display_name, value.reason)
# TODO(chrishenry): Consider outputting metric values as well. For
# e.g., it can replace BuildbotOutputFormatter in
# --output-format=html, which we used only so that users can grep
# the results without opening results.html.
def WillRunPage(self, page_test_results):
super(GTestProgressReporter, self).WillRunPage(page_test_results)
print >> self._output_stream, '[ RUN ]', (
page_test_results.current_page.display_name)
self._output_stream.flush()
self._timestamp = time.time()
def DidRunPage(self, page_test_results):
super(GTestProgressReporter, self).DidRunPage(page_test_results)
page = page_test_results.current_page
if page_test_results.current_page_run.failed:
print >> self._output_stream, '[ FAILED ]', page.display_name, (
'(%0.f ms)' % self._GetMs())
else:
print >> self._output_stream, '[ OK ]', page.display_name, (
'(%0.f ms)' % self._GetMs())
self._output_stream.flush()
def WillAttemptPageRun(self, page_test_results, attempt_count, max_attempts):
super(GTestProgressReporter, self).WillAttemptPageRun(
page_test_results, attempt_count, max_attempts)
# A failed attempt will have at least 1 value.
if attempt_count != 1:
print >> self._output_stream, (
'===== RETRYING PAGE RUN (attempt %s out of %s allowed) =====' % (
attempt_count, max_attempts))
print >> self._output_stream, (
'Page run attempt failed and will be retried. '
'Discarding previous results.')
def DidFinishAllTests(self, page_test_results):
super(GTestProgressReporter, self).DidFinishAllTests(page_test_results)
successful_runs = []
failed_runs = []
for run in page_test_results.all_page_runs:
if run.failed:
failed_runs.append(run)
else:
successful_runs.append(run)
unit = 'test' if len(successful_runs) == 1 else 'tests'
print >> self._output_stream, '[ PASSED ]', (
'%d %s.' % (len(successful_runs), unit))
if len(failed_runs) > 0:
unit = 'test' if len(failed_runs) == 1 else 'tests'
print >> self._output_stream, '[ FAILED ]', (
'%d %s, listed below:' % (len(page_test_results.failures), unit))
for failed_run in failed_runs:
print >> self._output_stream, '[ FAILED ] ', (
failed_run.page.display_name)
print >> self._output_stream
count = len(failed_runs)
unit = 'TEST' if count == 1 else 'TESTS'
print >> self._output_stream, '%d FAILED %s' % (count, unit)
print >> self._output_stream
if self._output_skipped_tests_summary:
if len(page_test_results.skipped_values) > 0:
print >> self._output_stream, 'Skipped pages:\n%s\n' % ('\n'.join(
v.page.display_name for v in page_test_results.skipped_values))
self._output_stream.flush()
| bsd-3-clause |
Yannig/ansible | lib/ansible/modules/cloud/amazon/cloudformation.py | 8 | 25361 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# upcoming features:
# - Ted's multifile YAML concatenation
# - changesets (and blocking/waiting for them)
# - finish AWSRetry conversion
# - move create/update code out of main
# - unit tests
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: cloudformation
short_description: Create or delete an AWS CloudFormation stack
description:
- Launches or updates an AWS CloudFormation stack and waits for it complete.
notes:
- As of version 2.3, migrated to boto3 to enable new features. To match existing behavior, YAML parsing is done in the module, not given to AWS as YAML.
This will change (in fact, it may change before 2.3 is out).
version_added: "1.1"
options:
stack_name:
description:
- name of the cloudformation stack
required: true
disable_rollback:
description:
- If a stacks fails to form, rollback will remove the stack
required: false
default: "false"
choices: [ "true", "false" ]
template_parameters:
description:
- a list of hashes of all the template variables for the stack
required: false
default: {}
state:
description:
- If state is "present", stack will be created. If state is "present" and if stack exists and template has changed, it will be updated.
If state is "absent", stack will be removed.
required: true
template:
description:
- The local path of the cloudformation template.
- This must be the full path to the file, relative to the working directory. If using roles this may look
like "roles/cloudformation/files/cloudformation-example.json".
- If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is
present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused.
required: false
default: null
notification_arns:
description:
- The Simple Notification Service (SNS) topic ARNs to publish stack related events.
required: false
default: null
version_added: "2.0"
stack_policy:
description:
- the path of the cloudformation stack policy. A policy cannot be removed once placed, but it can be modified.
(for instance, [allow all updates](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
required: false
default: null
version_added: "1.9"
tags:
description:
- Dictionary of tags to associate with stack and its resources during stack creation. Can be updated later, updating tags removes previous entries.
required: false
default: null
version_added: "1.4"
template_url:
description:
- Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region
as the stack.
- If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is
present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused.
required: false
version_added: "2.0"
create_changeset:
description:
- "If stack already exists create a changeset instead of directly applying changes.
See the AWS Change Sets docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html).
WARNING: if the stack does not exist, it will be created without changeset. If the state is absent, the stack will be deleted immediately with no
changeset."
required: false
default: false
version_added: "2.4"
changeset_name:
description:
- Name given to the changeset when creating a changeset, only used when create_changeset is true. By default a name prefixed with Ansible-STACKNAME
is generated based on input parameters.
See the AWS Change Sets docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-changesets.html)
required: false
default: null
version_added: "2.4"
template_format:
description:
- (deprecated) For local templates, allows specification of json or yaml format. Templates are now passed raw to CloudFormation regardless of format.
This parameter is ignored since Ansible 2.3.
default: json
choices: [ json, yaml ]
required: false
version_added: "2.0"
role_arn:
description:
- The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
required: false
default: null
version_added: "2.3"
author: "James S. Martin (@jsmartin)"
extends_documentation_fragment:
- aws
- ec2
requirements: [ botocore>=1.4.57 ]
'''
EXAMPLES = '''
# Basic task example
- name: launch ansible cloudformation example
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Basic role example
- name: launch ansible cloudformation example
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "roles/cloudformation/files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Removal example
- name: tear down old deployment
cloudformation:
stack_name: "ansible-cloudformation-old"
state: "absent"
# Use a template from a URL
- name: launch ansible cloudformation example
cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
args:
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
# Use a template from a URL, and assume a role to execute
- name: launch ansible cloudformation example with role assumption
cloudformation:
stack_name: "ansible-cloudformation"
state: present
region: us-east-1
disable_rollback: true
template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template
role_arn: 'arn:aws:iam::123456789012:role/cloudformation-iam-role'
args:
template_parameters:
KeyName: jmartin
DiskType: ephemeral
InstanceType: m1.small
ClusterSize: 3
tags:
Stack: ansible-cloudformation
'''
RETURN = '''
events:
type: list
description: Most recent events in Cloudformation's event log. This may be from a previous run in some cases.
returned: always
sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"]
log:
description: Debugging logs. Useful when modifying or finding an error.
returned: always
type: list
sample: ["updating stack"]
stack_resources:
description: AWS stack resources and their status. List of dictionaries, one dict per resource.
returned: state == present
type: list
sample: [
{
"last_updated_time": "2016-10-11T19:40:14.979000+00:00",
"logical_resource_id": "CFTestSg",
"physical_resource_id": "cloudformation2-CFTestSg-16UQ4CYQ57O9F",
"resource_type": "AWS::EC2::SecurityGroup",
"status": "UPDATE_COMPLETE",
"status_reason": null
}
]
stack_outputs:
type: dict
description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
returned: state == present
sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
''' # NOQA
import json
import time
import traceback
from hashlib import sha1
try:
import boto3
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
import ansible.module_utils.ec2
# import a class, otherwise we'll use a fully qualified path
from ansible.module_utils.ec2 import AWSRetry, boto_exception
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes
def get_stack_events(cfn, stack_name):
'''This event data was never correct, it worked as a side effect. So the v2.3 format is different.'''
ret = {'events':[], 'log':[]}
try:
events = cfn.describe_stack_events(StackName=stack_name)
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
ret['log'].append('Stack does not exist.')
return ret
ret['log'].append('Unknown error: ' + str(error_msg))
return ret
for e in events.get('StackEvents', []):
eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e)
ret['events'].append(eventline)
if e['ResourceStatus'].endswith('FAILED'):
failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e)
ret['log'].append(failline)
return ret
def create_stack(module, stack_params, cfn):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template' or 'template_url' is required when the stack does not exist.")
# 'disablerollback' only applies on creation, not update.
stack_params['DisableRollback'] = module.params['disable_rollback']
try:
cfn.create_stack(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'CREATE')
except Exception as err:
error_msg = boto_exception(err)
module.fail_json(msg="Failed to create stack {0}: {1}.".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def list_changesets(cfn, stack_name):
res = cfn.list_change_sets(StackName=stack_name)
return [cs['ChangeSetName'] for cs in res['Summaries']]
def create_changeset(module, stack_params, cfn):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
module.fail_json(msg="Either 'template' or 'template_url' is required.")
try:
changeset_name = build_changeset_name(stack_params)
stack_params['ChangeSetName'] = changeset_name
# Determine if this changeset already exists
pending_changesets = list_changesets(cfn, stack_params['StackName'])
if changeset_name in pending_changesets:
warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets)
result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning])
else:
cs = cfn.create_change_set(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'CREATE_CHANGESET')
result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']),
'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'],
'NOTE that dependencies on this stack might fail due to pending changes!']
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg="Failed to create change set: {0}".format(error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def update_stack(module, stack_params, cfn):
if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params:
stack_params['UsePreviousTemplate'] = True
# if the state is present and the stack already exists, we try to update it.
# AWS will tell us if the stack template and parameters are the same and
# don't need to be updated.
try:
cfn.update_stack(**stack_params)
result = stack_operation(cfn, stack_params['StackName'], 'UPDATE')
except Exception as err:
error_msg = boto_exception(err)
if 'No updates are to be performed.' in error_msg:
result = dict(changed=False, output='Stack is already up-to-date.')
else:
module.fail_json(msg="Failed to update stack {0}: {1}".format(stack_params.get('StackName'), error_msg), exception=traceback.format_exc())
if not result:
module.fail_json(msg="empty result")
return result
def stack_operation(cfn, stack_name, operation):
'''gets the status of a stack while it is created/updated/deleted'''
existed = []
while True:
try:
stack = get_stack_facts(cfn, stack_name)
existed.append('yes')
except:
# If the stack previously existed, and now can't be found then it's
# been deleted successfully.
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()}
ret = get_stack_events(cfn, stack_name)
if not stack:
if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways.
ret = get_stack_events(cfn, stack_name)
ret.update({'changed': True, 'output': 'Stack Deleted'})
return ret
else:
ret.update({'changed': False, 'failed': True, 'output' : 'Stack not found.'})
return ret
# it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE
# Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13
elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET':
ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation})
return ret
# note the ordering of ROLLBACK_COMPLETE and COMPLETE, because otherwise COMPLETE will match both cases.
elif stack['StackStatus'].endswith('_COMPLETE'):
ret.update({'changed': True, 'output' : 'Stack %s complete' % operation })
return ret
elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation})
return ret
# note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases.
elif stack['StackStatus'].endswith('_FAILED'):
ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation})
return ret
else:
# this can loop forever :/
time.sleep(5)
return {'failed': True, 'output':'Failed for unknown reasons.'}
def build_changeset_name(stack_params):
if 'ChangeSetName' in stack_params:
return stack_params['ChangeSetName']
json_params = json.dumps(stack_params, sort_keys=True)
return 'Ansible-{0}-{1}'.format(
stack_params['StackName'],
sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest()
)
def check_mode_changeset(module, stack_params, cfn):
"""Create a change set, describe it and delete it before returning check mode outputs."""
stack_params['ChangeSetName'] = build_changeset_name(stack_params)
try:
change_set = cfn.create_change_set(**stack_params)
for i in range(60): # total time 5 min
description = cfn.describe_change_set(ChangeSetName=change_set['Id'])
if description['Status'] in ('CREATE_COMPLETE', 'FAILED'):
break
time.sleep(5)
else:
# if the changeset doesn't finish in 5 mins, this `else` will trigger and fail
module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName'])
cfn.delete_change_set(ChangeSetName=change_set['Id'])
reason = description.get('StatusReason')
if description['Status'] == 'FAILED' and "didn't contain changes" in description['StatusReason']:
return {'changed': False, 'msg': reason, 'meta': description['StatusReason']}
return {'changed': True, 'msg': reason, 'meta': description['Changes']}
except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
module.fail_json(msg=error_msg, exception=traceback.format_exc())
def get_stack_facts(cfn, stack_name):
try:
stack_response = cfn.describe_stacks(StackName=stack_name)
stack_info = stack_response['Stacks'][0]
except (botocore.exceptions.ValidationError,botocore.exceptions.ClientError) as err:
error_msg = boto_exception(err)
if 'does not exist' in error_msg:
# missing stack, don't bail.
return None
# other error, bail.
raise err
if stack_response and stack_response.get('Stacks', None):
stacks = stack_response['Stacks']
if len(stacks):
stack_info = stacks[0]
return stack_info
def main():
argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
argument_spec.update(dict(
stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=False, type='path'),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
template_url=dict(default=None, required=False),
template_format=dict(default=None, choices=['json', 'yaml'], required=False),
create_changeset=dict(default=False, type='bool'),
changeset_name=dict(default=None, required=False),
role_arn=dict(default=None, required=False),
tags=dict(default=None, type='dict')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['template_url', 'template']],
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 and botocore are required for this module')
# collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
stack_params = {
'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
}
state = module.params['state']
stack_params['StackName'] = module.params['stack_name']
if module.params['template'] is not None:
stack_params['TemplateBody'] = open(module.params['template'], 'r').read()
elif module.params['template_url'] is not None:
stack_params['TemplateURL'] = module.params['template_url']
if module.params.get('notification_arns'):
stack_params['NotificationARNs'] = module.params['notification_arns'].split(',')
else:
stack_params['NotificationARNs'] = []
if module.params['stack_policy'] is not None:
stack_params['StackPolicyBody'] = open(module.params['stack_policy'], 'r').read()
if module.params['changeset_name'] is not None:
stack_params['ChangeSetName'] = module.params['changeset_name']
template_parameters = module.params['template_parameters']
stack_params['Parameters'] = [{'ParameterKey':k, 'ParameterValue':str(v)} for k, v in template_parameters.items()]
if isinstance(module.params.get('tags'), dict):
stack_params['Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(module.params['tags'])
if module.params.get('role_arn'):
stack_params['RoleARN'] = module.params['role_arn']
result = {}
try:
region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(module, boto3=True)
cfn = ansible.module_utils.ec2.boto3_conn(module, conn_type='client', resource='cloudformation', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=boto_exception(e))
# Wrap the cloudformation client methods that this module uses with
# automatic backoff / retry for throttling error codes
backoff_wrapper = AWSRetry.jittered_backoff(retries=10, delay=3, max_delay=30)
cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
cfn.create_stack = backoff_wrapper(cfn.create_stack)
cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
cfn.update_stack = backoff_wrapper(cfn.update_stack)
cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
stack_info = get_stack_facts(cfn, stack_params['StackName'])
if module.check_mode:
if state == 'absent' and stack_info:
module.exit_json(changed=True, msg='Stack would be deleted', meta=[])
elif state == 'absent' and not stack_info:
module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[])
elif state == 'present' and not stack_info:
module.exit_json(changed=True, msg='New stack would be created', meta=[])
else:
module.exit_json(**check_mode_changeset(module, stack_params, cfn))
if state == 'present':
if not stack_info:
result = create_stack(module, stack_params, cfn)
elif module.params.get('create_changeset'):
result = create_changeset(module, stack_params, cfn)
else:
result = update_stack(module, stack_params, cfn)
# format the stack output
stack = get_stack_facts(cfn, stack_params['StackName'])
if result.get('stack_outputs') is None:
# always define stack_outputs, but it may be empty
result['stack_outputs'] = {}
for output in stack.get('Outputs', []):
result['stack_outputs'][output['OutputKey']] = output['OutputValue']
stack_resources = []
reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
for res in reslist.get('StackResourceSummaries', []):
stack_resources.append({
"logical_resource_id": res['LogicalResourceId'],
"physical_resource_id": res.get('PhysicalResourceId', ''),
"resource_type": res['ResourceType'],
"last_updated_time": res['LastUpdatedTimestamp'],
"status": res['ResourceStatus'],
"status_reason": res.get('ResourceStatusReason') # can be blank, apparently
})
result['stack_resources'] = stack_resources
elif state == 'absent':
# absent state is different because of the way delete_stack works.
# problem is it it doesn't give an error if stack isn't found
# so must describe the stack first
try:
stack = get_stack_facts(cfn, stack_params['StackName'])
if not stack:
result = {'changed': False, 'output': 'Stack not found.'}
else:
cfn.delete_stack(StackName=stack_params['StackName'])
result = stack_operation(cfn, stack_params['StackName'], 'DELETE')
except Exception as err:
module.fail_json(msg=boto_exception(err), exception=traceback.format_exc())
if module.params['template_format'] is not None:
result['warnings'] = [('Argument `template_format` is deprecated '
'since Ansible 2.3, JSON and YAML templates are now passed '
'directly to the CloudFormation API.')]
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
waseem18/oh-mainline | vendor/packages/twisted/twisted/words/im/baseaccount.py | 80 | 1808 | # -*- Python -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
class AccountManager:
"""I am responsible for managing a user's accounts.
That is, remembering what accounts are available, their settings,
adding and removal of accounts, etc.
@ivar accounts: A collection of available accounts.
@type accounts: mapping of strings to L{Account<interfaces.IAccount>}s.
"""
def __init__(self):
self.accounts = {}
def getSnapShot(self):
"""A snapshot of all the accounts and their status.
@returns: A list of tuples, each of the form
(string:accountName, boolean:isOnline,
boolean:autoLogin, string:gatewayType)
"""
data = []
for account in self.accounts.values():
data.append((account.accountName, account.isOnline(),
account.autoLogin, account.gatewayType))
return data
def isEmpty(self):
return len(self.accounts) == 0
def getConnectionInfo(self):
connectioninfo = []
for account in self.accounts.values():
connectioninfo.append(account.isOnline())
return connectioninfo
def addAccount(self, account):
self.accounts[account.accountName] = account
def delAccount(self, accountName):
del self.accounts[accountName]
def connect(self, accountName, chatui):
"""
@returntype: Deferred L{interfaces.IClient}
"""
return self.accounts[accountName].logOn(chatui)
def disconnect(self, accountName):
pass
#self.accounts[accountName].logOff() - not yet implemented
def quit(self):
pass
#for account in self.accounts.values():
# account.logOff() - not yet implemented
| agpl-3.0 |
SimplyKnownAsG/yaml-cpp | test/gtest-1.8.0/googletest/scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| mit |
manashmndl/flexx | flexx/react/tests/test_both.py | 20 | 15553 | """
Tests that should run in both Python and JS.
This helps ensure that both implementation work in the same way.
Focus on use-cases rather than coverage.
These tests work a bit awkward, but its very useful to be able to test
that the two systems work exactly the same way. You define a class with
signals, and then provide that class to a test function using a
decorator. The test function will then be run both in Python and in JS.
The test function should return an object, that when evaluates to a
string matches with the reference string given to the decorator. The
result string is made lowercase, and double quotes are converted to
single quotes.
"""
from pytest import raises
from flexx.util.testing import run_tests_if_main
from flexx.react import source, input, connect, lazy, HasSignals, undefined
from flexx.react.pyscript import create_js_signals_class, HasSignalsJS
from flexx.pyscript.functions import py2js, evaljs, evalpy, js_rename
def run_in_both(cls, reference, extra_classes=()):
if reference.lower() != reference:
raise ValueError('Test reference should be lowercase!')
def wrapper(func):
def runner():
# Run in JS
code = js_rename(HasSignalsJS.JSCODE, 'HasSignalsJS', 'HasSignals')
for c in cls.mro()[1:]:
if c is HasSignals:
break
code += create_js_signals_class(c, c.__name__, c.__bases__[0].__name__+'.prototype')
for c in extra_classes:
code += create_js_signals_class(c, c.__name__)
code += create_js_signals_class(cls, cls.__name__, cls.__bases__[0].__name__+'.prototype')
code += py2js(func, 'test')
code += 'test(%s);' % cls.__name__
jsresult = evaljs(code)
jsresult = jsresult.replace('[ ', '[').replace(' ]', ']')
jsresult = jsresult.replace('"', "'")
print('js:', jsresult)
# Run in Python
pyresult = str(func(cls))
pyresult = pyresult.replace('"', "'")
print('py:', pyresult)
#
assert pyresult.lower() == reference
assert jsresult.lower() == reference
return runner
return wrapper
class Name(HasSignals):
_foo = 3
_bar = 'bar'
spam = [1, 2, 3]
def __init__(self):
self.r = []
super().__init__()
@input
def first_name(v='john'):
return str(v)
@input
def last_name(v='doe'):
return str(v)
@lazy('first_name', 'last_name')
def full_name(self, n1, n2):
self.r.append('')
return n1 + ' ' + n2
@run_in_both(Name, "['', 'john doe', '', 'almar klein', '', 'jorik klein']")
def test_pull(Name):
name = Name()
name.r.append(name.full_name())
name.first_name('almar')
name.last_name('klein')
name.r.append(name.full_name())
name.first_name('jorik')
name.r.append(name.full_name())
return name.r
@run_in_both(Name, "['', 'john doe', '', 'jane doe']")
def test_disconnecting_signal(Name):
s = Name()
s.r.append(s.full_name())
# Disconnect, but because its a react signal, it re-connects at once
s.full_name.disconnect(False) # no destroy
s.first_name('almar')
s.first_name('jorik')
s.first_name('jane')
s.r.append(s.full_name()) # connects now
return s.r
@run_in_both(Name, "[true, true, '', true, true, true, true, '', true, true]")
def test_signal_attributes(Name):
s = Name()
s.r.append(s.full_name._timestamp == 0)
s.r.append(s.full_name._value is undefined)
s.full_name()
s.r.append(s.full_name._timestamp > 0)
s.r.append(s.full_name._last_timestamp == 0)
s.r.append(s.full_name._value == 'john doe')
s.r.append(s.full_name._last_value is undefined)
s.first_name('jane')
s.full_name()
s.r.append(s.full_name._last_timestamp > 0)
s.r.append(s.full_name._last_value == 'john doe')
return s.r
@run_in_both(Name, "[3, 'bar', [1, 2, 3], 2, 'err', 'err', 'john']")
def test_hassignal_attributes(Name):
s = Name()
# class attributes
s.r.append(s._foo)
s.r.append(s._bar)
s.r.append(s.spam)
# can set other attributes
s.eggs = 2
s.r.append(s.eggs)
# cannot overwrite signals
try:
s.first_name = 2
s.r.append(s.first_name)
except Exception:
s.r.append('err')
# cannot overwrite signal attributes
try:
s.first_name.value = 2
s.r.append(s.first_name.value)
except Exception:
s.r.append('err')
# cannot delete signals
try:
del s.first_name
except Exception:
pass # on Python it raises, on JS it ignores
s.r.append(s.first_name.value)
return s.r
@run_in_both(Name, "['first_name', 'full_name', 'last_name']")
def test_hassignal__signals__(Name):
s = Name()
return s.__signals__
@run_in_both(Name, "[2, 2]")
def test_reconnect_no_doubles(Name):
s = Name()
s.r.append(len(s.full_name._upstream))
s.full_name.connect()
s.r.append(len(s.full_name._upstream))
return s.r
class NoDefaults(HasSignals):
def __init__(self):
self.r = []
super().__init__()
@input
def in1(v):
return v
@connect('in1')
def s1a(v):
return v
@connect('s1a')
def s1b(v):
return v
# ---
@input
def in2(v):
return v
@connect('in2')
def s2a(self, v):
return v
@connect('s2a')
def s2b(self, v):
self.r.append(v)
#
@input
def in3(v):
return v
@connect('in3')
def aa_s3a(self, v): # name mangling to make these connect first
self.r.append(v)
return v
@connect('aa_s3a')
def aa_s3b(self, v):
self.r.append(v)
@run_in_both(NoDefaults, "['err', '', 'x', 'y', 'z', 'z']")
def test_pull_no_defaults(Cls):
s = Cls()
try:
s.s1b()
except Exception:
s.r.append('err')
s.r.append('')
s.in1('x')
s.r.append(s.s1b())
s.in2('y')
s.in3('z')
return s.r
class Title(HasSignals):
def __init__(self):
self.r = []
super().__init__()
@input
def title(v=''):
return v
@connect('title')
def title_len(v):
return len(v)
@connect('title_len')
def show_title(self, v):
self.r.append(v)
@run_in_both(Title, '[0, 2, 4, false]')
def test_push(Title):
foo = Title()
foo.title('xx')
foo.title('xxxx')
foo.r.append(foo.show_title.not_connected)
return foo.r
@run_in_both(Title, "[0]")
def test_disconnecting_react(Title):
s = Title()
# Disconnect, but because its a react signal, it re-connects at once
# No, this was the case earlier. Disconnect really disconnects
s.show_title.disconnect()
s.title('xx')
return s.r
class Unconnected(HasSignals):
@input
def s0(v=''):
return v
@connect('nope')
def s1(v):
return v
@connect('button.title')
def s2(v):
return v
@connect('s2')
def s3(v):
return v
@connect('s3')
def s4(v):
return v
@run_in_both(Unconnected, "[false, true, 'signal 'button.title' does not exist.']")
def test_unconnected1(Cls):
s = Cls()
r = []
r.append(bool(s.s0.not_connected))
r.append(bool(s.s1.not_connected))
r.append(s.s2.not_connected)
return r
@run_in_both(Unconnected, "[true, 'object 'nope' is not a signal.']")
def test_unconnected2(Cls):
s = Cls()
r = []
s.nope = 4
s.s1.connect(False)
r.append(bool(s.s1.not_connected))
r.append(s.s1.not_connected)
return r
@run_in_both(Unconnected, "[true, false, 'err2', 'err3', 'err4']")
def test_unconnected_handling(Cls):
s = Cls()
r = []
r.append(bool(s.s2.not_connected))
r.append(bool(s.s3.not_connected))
#
try:
s.s2()
except Exception:
r.append('err2') # error, because this signal is not connected
try:
s.s3()
except Exception:
r.append('err3') # error, because an upstream signal is not connected
try:
s.s4()
except Exception:
r.append('err4') # error, because an upstream signal is not connected
return r
@run_in_both(Unconnected, "['err4', 'ha', 'ho', 'err4']", extra_classes=(Title,))
def test_unconnected_connect(Cls):
s = Cls()
r = []
# We add an object named 'button' with signal 'title', exactly what s2 needs
button = Title()
s.button = button
button.title('ha')
# Now calling s4 will fail
try:
s.s4()
except Exception:
r.append('err4') # error, because an upstream signal is not connected
# We connect it
s.s2.connect()
r.append(s.s4())
# Now we remove 'button'
del s.button
# This should still work, since connections are in place
button.title('ho')
r.append(s.s4())
# And we break connections
s.s2.disconnect()
try:
s.s4()
except Exception:
r.append('err4') # error, because an upstream signal is not connected
return r
class SignalTypes(HasSignals):
@input
def s1(v=None):
return v
@source
def s2(v=None):
return v
@connect('s2')
def s3(v):
return v
@connect('s2')
def s4(v):
return v
@run_in_both(SignalTypes, "['s2', 's3', 's4', 's3', 's4']")
def test_setting_inputs(Cls):
s = Cls()
r = []
# These do not error
s.s1('foo')
s.s1._set('foo')
s.s2._set('foo')
# But these do
try:
s.s2('foo')
except Exception:
r.append('s2')
try:
s.s3('foo')
except Exception:
r.append('s3')
try:
s.s4('foo')
except Exception:
r.append('s4')
# And these too
try:
s.s3._set('foo')
except Exception:
r.append('s3')
try:
s.s4._set('foo')
except Exception:
r.append('s4')
return r
@run_in_both(SignalTypes, "[true, 'foo', 'bar']")
def test_setting_inputs2(Cls):
s = Cls()
r = []
r.append(s.s1() is None) # test no default value
s.s1('foo')
s.s2._set('bar')
r.append(s.s1())
r.append(s.s2())
return r
class UndefinedSignalValues(HasSignals):
def __init__(self):
self.r = []
super().__init__()
@input
def number1(v=1):
if v > 0:
return v
return undefined
@connect('number1')
def number2(v):
if v > 5:
return v
return undefined
@connect('number2')
def reg(self, v):
self.r.append(v)
@run_in_both(UndefinedSignalValues, "[9, 8, 7]")
def test_undefined_values(Cls):
s = Cls()
s.number1(9)
s.number1(-2)
s.number1(-3)
s.number1(8)
s.number1(3)
s.number1(4)
s.number1(7)
return s.r
class Circular(HasSignals):
@input('s3')
def s1(v1=10, v3=None):
if v3 is None:
return v1
else:
return v3 + 1
@lazy('s1')
def s2(v):
return v + 1
@lazy('s2')
def s3(v):
return v + 1
@run_in_both(Circular, "[10, 11, 12, '', 2, 3, 4]")
def test_circular(Cls):
s = Cls()
r = []
r.append(s.s1())
r.append(s.s2())
r.append(s.s3())
r.append('')
s.s1(2)
r.append(s.s1())
r.append(s.s2())
r.append(s.s3())
return r
# todo: this is not pretty. Do we need it? Can this be done differently?
class Temperature(HasSignals): # to avoid round errors, the relation is simplified
@input('f')
def c(v=0, f=None):
if f is None:
return int(v)
else:
return f - 32
@input('c')
def f(v=32, c=None):
if c is None:
return int(v)
else:
return c + 32
@run_in_both(Temperature, "[0, 32, '', 10, 42, '', -22, 10]")
def test_circular_temperature(Cls):
s = Cls()
r = []
r.append(s.c())
r.append(s.f())
r.append('')
s.c(10)
r.append(s.c())
r.append(s.f())
r.append('')
s.f(10)
r.append(s.c())
r.append(s.f())
return r
# todo: this does not work, but maybe it should? Although making this work would close the door to async, I think
class Temperature2(HasSignals): # to avoid round erros, the relation is simplified
@input
def c(v=32):
return int(v)
@input
def f(v=0):
return int(v)
@connect('f')
def _f(self, v):
self.c(v+32)
@connect('c')
def _c(self, v):
self.f(v-32)
class Name2(Name):
@connect('full_name')
def name_length(v):
return len(v)
@input
def aa():
return len(v)
@run_in_both(Name2, "['aa', 'first_name', 'full_name', 'last_name', 'name_length']")
def test_hassignal__signals__(Name2):
s = Name2()
return s.__signals__
@run_in_both(Name2, "[8, 3]")
def test_inheritance(Cls):
s = Cls()
r = []
r.append(s.name_length())
s.first_name('a')
s.last_name('b')
r.append(s.name_length())
return r
class Dynamism(HasSignals):
def __init__(self):
self.r = []
super().__init__()
@input
def current_person(v):
return v
@connect('current_person')
def current_person_proxy(v): # need this to cover more code
return v
@input
def current_persons(v):
return v
@connect('current_person.first_name')
def current_name1(v):
return v
@connect('current_person_proxy.first_name')
def current_name2(self, v):
self.r.append(v)
@connect('current_persons.*.first_name')
def current_name3(self, *names):
v = ''
for n in names:
v += n
self.r.append(v)
@connect('current_persons.*.bla')
def current_name4(self, *names):
pass
@run_in_both(Dynamism, "[3, 'err', 'john', 'john', 0, 3, 'john', 0, 'jane', 'jane']", extra_classes=(Name,))
def test_dynamism1(Cls):
d = Dynamism()
n = Name()
d.r.append(d.current_name2._status)
try:
d.r.append(d.current_name1())
except Exception:
d.r.append('err')
d.current_person(n)
d.r.append(d.current_name1())
d.r.append(d.current_name2._status) # 0
# Set to None, signal will not be updated
d.current_person(None)
d.r.append(d.current_name2._status) # 3
# Set back, but signal will update
d.current_person(n)
d.r.append(d.current_name2._status) # 0
# Normal update
n.first_name('jane')
d.r.append(d.current_name1())
return d.r
@run_in_both(Dynamism, "[3, 'err', 'john', 'johnjohn', 'janejohn', 'janejane', '', 3, '']", extra_classes=(Name,))
def test_dynamism2(Cls):
d = Dynamism()
n1, n2 = Name(), Name()
assert d.current_name4.not_connected
d.r.append(d.current_name3._status)
try:
d.r.append(d.current_name3())
except Exception:
d.r.append('err')
# Set persons
d.current_persons((n1, ))
d.current_persons((n1, n2))
n1.first_name('jane')
n2.first_name('jane')
d.current_persons(())
# Now set to something that has no first_name
d.current_persons(None)
d.r.append(d.current_name3._status) # 3
d.current_persons(())
return d.r
run_tests_if_main()
| bsd-2-clause |
zofuthan/edx-platform | scripts/runone.py | 182 | 3124 | #!/usr/bin/env python
import argparse
import os
import sys
# I want this:
# ERROR: test_update_and_fetch (edx-platform.cms.djangoapps.contentstore.tests.test_course_settings.CourseDetailsViewTest)
# to become:
# test --settings=cms.envs.test --pythonpath=. -s cms/djangoapps/contentstore/tests/test_course_settings.py:CourseDetailsViewTest.test_update_and_fetch
def find_full_path(path_to_file):
"""Find the full path where we only have a relative path from somewhere in the tree."""
for subdir, dirs, files in os.walk("."):
full = os.path.relpath(os.path.join(subdir, path_to_file))
if os.path.exists(full):
return full
def main(argv):
parser = argparse.ArgumentParser(description="Run just one test")
parser.add_argument('--nocapture', '-s', action='store_true', help="Don't capture stdout (any stdout output will be printed immediately)")
parser.add_argument('--pdb', action='store_true', help="Use pdb for test errors")
parser.add_argument('--pdb-fail', action='store_true', help="Use pdb for test failures")
parser.add_argument('words', metavar="WORDS", nargs='+', help="The description of a test failure, like 'ERROR: test_set_missing_field (courseware.tests.test_model_data.TestStudentModuleStorage)'")
args = parser.parse_args(argv)
words = []
# Collect all the words, ignoring what was quoted together, and get rid of parens.
for argword in args.words:
words.extend(w.strip("()") for w in argword.split())
# If it starts with "ERROR:" or "FAIL:", just ignore that.
if words[0].endswith(':'):
del words[0]
if len(words) == 1:
test_path, test_method = words[0].rsplit('.', 1)
test_path = test_path.split('.')
else:
test_method = words[0]
test_path = words[1].split('.')
if test_path[0] == 'edx-platform':
del test_path[0]
test_class = test_path[-1]
del test_path[-1]
test_py_path = "%s.py" % ("/".join(test_path))
test_py_path = find_full_path(test_py_path)
test_spec = "%s:%s.%s" % (test_py_path, test_class, test_method)
system = None
if test_py_path.startswith('cms'):
system = 'cms'
elif test_py_path.startswith('lms'):
system = 'lms'
if system:
# Run as a django test suite
from django.core import management
os.environ['DJANGO_SETTINGS_MODULE'] = system + '.envs.test'
django_args = ["./manage.py", "test"]
if args.nocapture:
django_args.append("-s")
if args.pdb:
django_args.append("--pdb")
if args.pdb_fail:
django_args.append("--pdb-fail")
django_args.append(test_spec)
print " ".join(django_args)
management.execute_from_command_line(django_args)
else:
# Run as a nose test suite
import nose.core
nose_args = ["nosetests"]
if args.nocapture:
nose_args.append("-s")
nose_args.append(test_spec)
print " ".join(nose_args)
nose.core.main(argv=nose_args)
if __name__ == "__main__":
main(sys.argv[1:])
| agpl-3.0 |
darjus-amzn/boto | tests/integration/ec2containerservice/test_ec2containerservice.py | 99 | 1749 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.ec2containerservice.exceptions import ClientException
from tests.compat import unittest
class TestEC2ContainerService(unittest.TestCase):
def setUp(self):
self.ecs = boto.connect_ec2containerservice()
def test_list_clusters(self):
response = self.ecs.list_clusters()
self.assertIn('clusterArns',
response['ListClustersResponse']['ListClustersResult'])
def test_handle_not_found_exception(self):
with self.assertRaises(ClientException):
# Try to stop a task with an invalid arn.
self.ecs.stop_task(task='foo')
| mit |
shanglt/youtube-dl | youtube_dl/extractor/stanfordoc.py | 173 | 3526 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
orderedSet,
unescapeHTML,
)
class StanfordOpenClassroomIE(InfoExtractor):
IE_NAME = 'stanfordoc'
IE_DESC = 'Stanford Open ClassRoom'
_VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
_TEST = {
'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
'md5': '544a9468546059d4e80d76265b0443b8',
'info_dict': {
'id': 'PracticalUnix_intro-environment',
'ext': 'mp4',
'title': 'Intro Environment',
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
if mobj.group('course') and mobj.group('video'): # A specific video
course = mobj.group('course')
video = mobj.group('video')
info = {
'id': course + '_' + video,
'uploader': None,
'upload_date': None,
}
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
xmlUrl = baseUrl + video + '.xml'
mdoc = self._download_xml(xmlUrl, info['id'])
try:
info['title'] = mdoc.findall('./title')[0].text
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
except IndexError:
raise ExtractorError('Invalid metadata XML file')
return info
elif mobj.group('course'): # A course page
course = mobj.group('course')
info = {
'id': course,
'_type': 'playlist',
'uploader': None,
'upload_date': None,
}
coursepage = self._download_webpage(
url, info['id'],
note='Downloading course info page',
errnote='Unable to download course info page')
info['title'] = self._html_search_regex(
r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
info['description'] = self._html_search_regex(
r'(?s)<description>([^<]+)</description>',
coursepage, 'description', fatal=False)
links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
info['entries'] = [self.url_result(
'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
) for l in links]
return info
else: # Root page
info = {
'id': 'Stanford OpenClassroom',
'_type': 'playlist',
'uploader': None,
'upload_date': None,
}
info['title'] = info['id']
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
rootpage = self._download_webpage(rootURL, info['id'],
errnote='Unable to download course info page')
links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
info['entries'] = [self.url_result(
'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
) for l in links]
return info
| unlicense |
toooooper/oppia | core/domain/rte_component_registry.py | 29 | 3279 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registry for custom rich-text components."""
__author__ = 'Sean Lip'
import pkgutil
import feconf
import utils
class Registry(object):
"""Registry of all custom rich-text components."""
_rte_components = {}
@classmethod
def _refresh(cls):
"""Repopulate the registry."""
cls._rte_components.clear()
# Assemble all paths to the RTE components.
EXTENSION_PATHS = [
component['dir'] for component in
feconf.ALLOWED_RTE_EXTENSIONS.values()]
# Crawl the directories and add new RTE component instances to the
# registry.
for loader, name, _ in pkgutil.iter_modules(path=EXTENSION_PATHS):
module = loader.find_module(name).load_module(name)
clazz = getattr(module, name)
ancestor_names = [
base_class.__name__ for base_class in clazz.__bases__]
if 'BaseRichTextComponent' in ancestor_names:
cls._rte_components[clazz.__name__] = clazz()
@classmethod
def get_all_rte_components(cls):
"""Get a list of instances of all custom RTE components."""
if len(cls._rte_components) == 0:
cls._refresh()
return cls._rte_components.values()
@classmethod
def get_tag_list_with_attrs(cls):
"""Returns a dict of HTML tag names and attributes for RTE components.
The keys are tag names starting with 'oppia-noninteractive-', followed
by the hyphenated version of the name of the RTE component. The values
are lists of allowed attributes of the form
[PARAM_NAME]-with-[CUSTOMIZATION_ARG_NAME].
"""
# TODO(sll): Cache this computation and update it on each refresh.
component_list = cls.get_all_rte_components()
component_tags = {}
for component in component_list:
tag_name = 'oppia-noninteractive-%s' % (
utils.camelcase_to_hyphenated(component.id))
component_tags[tag_name] = [
'%s-with-value' % ca_spec.name
for ca_spec in component.customization_arg_specs]
return component_tags
@classmethod
def get_html_for_all_components(cls):
"""Returns the HTML bodies for all custom RTE components."""
return ' \n'.join([
component.html_body for component in cls.get_all_rte_components()])
@classmethod
def get_all_specs(cls):
"""Returns a dict containing the full specs of each RTE component."""
return {
component.id: component.to_dict()
for component in cls.get_all_rte_components()
}
| apache-2.0 |
int19h/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/setuptools/__init__.py | 29 | 7283 | """Extensions to the 'distutils' for large or complex distributions"""
import os
import sys
import functools
import distutils.core
import distutils.filelist
import re
from distutils.errors import DistutilsOptionError
from distutils.util import convert_path
from fnmatch import fnmatchcase
from ._deprecation_warning import SetuptoolsDeprecationWarning
from setuptools.extern.six import PY3, string_types
from setuptools.extern.six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution, Feature
from setuptools.depends import Require
from . import monkey
__metaclass__ = type
__all__ = [
'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require',
'SetuptoolsDeprecationWarning',
'find_packages'
]
if PY3:
__all__.append('find_namespace_packages')
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
if PY3:
find_namespace_packages = PEP420PackageFinder.find
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
# our PEP 517 backend patch `distutils.core.Distribution`.
dist = distutils.core.Distribution(dict(
(k, v) for k, v in attrs.items()
if k in ('dependency_links', 'setup_requires')
))
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
dist.fetch_build_eggs(dist.setup_requires)
def setup(**attrs):
# Make sure we have any requirements needed to interpret 'attrs'.
_install_setup_requires(attrs)
return distutils.core.setup(**attrs)
setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, string_types):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, string_types):
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if isinstance(val, list):
ok = all(isinstance(v, string_types) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)"
% (option, val))
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
# Apply monkey patches
monkey.patch_all()
| apache-2.0 |
nzlosh/st2 | st2tests/integration/orquesta/test_performance.py | 3 | 2222 | # -*- coding: utf-8 -*-
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import eventlet
import json
from integration.orquesta import base
from six.moves import range
from st2common.constants import action as ac_const
class WiringTest(base.TestWorkflowExecution):
def test_concurrent_load(self):
load_count = 3
delay_poll = load_count * 5
wf_name = "examples.orquesta-mock-create-vm"
wf_input = {"vm_name": "demo1", "meta": {"demo1.itests.org": "10.3.41.99"}}
exs = [self._execute_workflow(wf_name, wf_input) for i in range(load_count)]
eventlet.sleep(delay_poll)
for ex in exs:
e = self._wait_for_completion(ex)
self.assertEqual(
e.status, ac_const.LIVEACTION_STATUS_SUCCEEDED, json.dumps(e.result)
)
self.assertIn("output", e.result)
self.assertIn("vm_id", e.result["output"])
def test_with_items_load(self):
wf_name = "examples.orquesta-with-items-concurrency"
num_items = 10
concurrency = 10
members = [str(i).zfill(5) for i in range(0, num_items)]
wf_input = {"members": members, "concurrency": concurrency}
message = "%s, resistance is futile!"
expected_output = {"items": [message % i for i in members]}
expected_result = {"output": expected_output}
ex = self._execute_workflow(wf_name, wf_input)
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_SUCCEEDED)
self.assertDictEqual(ex.result, expected_result)
| apache-2.0 |
ghedsouza/django | django/core/management/commands/migrate.py | 16 | 14047 | import time
from collections import OrderedDict
from importlib import import_module
from django.apps import apps
from django.core.checks import Tags, run_checks
from django.core.management.base import BaseCommand, CommandError
from django.core.management.sql import (
emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ModelState, ProjectState
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='?',
help='App label of an application to synchronize the state.',
)
parser.add_argument(
'migration_name', nargs='?',
help='Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
parser.add_argument(
'--fake', action='store_true', dest='fake',
help='Mark migrations as run without actually running them.',
)
parser.add_argument(
'--fake-initial', action='store_true', dest='fake_initial',
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.',
)
parser.add_argument(
'--run-syncdb', action='store_true', dest='run_syncdb',
help='Creates tables for apps without migrations.',
)
def _run_checks(self, **kwargs):
issues = run_checks(tags=[Tags.database])
issues.extend(super()._run_checks(**kwargs))
return issues
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options['database']
connection = connections[db]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Raise an error if any migrations are applied before their dependencies.
executor.loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
target_app_labels_only = True
if options['app_label'] and options['migration_name']:
app_label, migration_name = options['app_label'], options['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
app_label = options['app_label']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
plan = executor.migration_plan(targets)
run_syncdb = options['run_syncdb'] and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(sorted(executor.loader.unmigrated_apps)))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(sorted(set(a for a, n in targets))) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0], )
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
pre_migrate_state = executor._create_project_state(with_applied_migrations=True)
pre_migrate_apps = pre_migrate_state.apps
emit_pre_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=pre_migrate_apps, plan=plan,
)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models have changes that are not yet reflected "
"in a migration, and so won't be applied."
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
fake = False
fake_initial = False
else:
fake = options['fake']
fake_initial = options['fake_initial']
post_migrate_state = executor.migrate(
targets, plan=plan, state=pre_migrate_state.clone(), fake=fake,
fake_initial=fake_initial,
)
# post_migrate signals have access to all models. Ensure that all models
# are reloaded in case any are delayed.
post_migrate_state.clear_delayed_apps_cache()
post_migrate_apps = post_migrate_state.apps
# Re-render models of real apps to include relationships now that
# we've got a final state. This wouldn't be necessary if real apps
# models were rendered with relationships in the first place.
with post_migrate_apps.bulk_update():
model_keys = []
for model_state in post_migrate_apps.real_models:
model_key = model_state.app_label, model_state.name_lower
model_keys.append(model_key)
post_migrate_apps.unregister_model(*model_key)
post_migrate_apps.render_multiple([
ModelState.from_model(apps.get_model(*model)) for model in model_keys
])
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=post_migrate_apps, plan=plan,
)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"""Run the old syncdb-style operation on a list of app_labels."""
with connection.cursor() as cursor:
tables = connection.introspection.table_names(cursor)
# Build the manifest of apps and models that are to be synchronized.
all_models = [
(
app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False),
)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not (
(converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables)
)
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with connection.schema_editor() as editor:
for app_name, model_list in manifest.items():
for model in model_list:
# Never install unmanaged models, etc.
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model\n" % (app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
editor.create_model(model)
# Deferred SQL is executed when exiting the editor's context.
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL...\n")
| bsd-3-clause |
llhe/tensorflow | tensorflow/contrib/keras/api/keras/preprocessing/sequence/__init__.py | 57 | 1172 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras data preprocessing utils for sequence data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.preprocessing.sequence import make_sampling_table
from tensorflow.contrib.keras.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.contrib.keras.python.keras.preprocessing.sequence import skipgrams
del absolute_import
del division
del print_function
| apache-2.0 |
admetricks/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py | 114 | 50422 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi ([email protected]), University of Szeged
# Copyright (C) 2011 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import json
import logging
import os
import platform
import Queue
import re
import StringIO
import sys
import thread
import time
import threading
import unittest2 as unittest
from webkitpy.common.system import outputcapture, path
from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.host import Host
from webkitpy.common.host_mock import MockHost
from webkitpy import port
from webkitpy.layout_tests import run_webkit_tests
from webkitpy.port import Port
from webkitpy.port import test
from webkitpy.test.skip import skip_if
from webkitpy.tool.mocktool import MockOptions
def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
extra_args = extra_args or []
args = []
if not '--platform' in extra_args:
args.extend(['--platform', 'test'])
if not new_results:
args.append('--no-new-test-results')
if not '--child-processes' in extra_args:
args.extend(['--child-processes', 1])
args.extend(extra_args)
if not tests_included:
# We use the glob to test that globbing works.
args.extend(['passes',
'http/tests',
'websocket/tests',
'failures/expected/*'])
return run_webkit_tests.parse_args(args)
def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
options, parsed_args = parse_args(extra_args, tests_included)
if not port_obj:
host = host or MockHost()
port_obj = host.port_factory.get(port_name=options.platform, options=options)
if shared_port:
port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
logging_stream = StringIO.StringIO()
run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
return run_details.exit_code == 0
def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
options, parsed_args = parse_args(extra_args=extra_args,
tests_included=tests_included,
print_nothing=False, new_results=new_results)
host = host or MockHost()
if not port_obj:
port_obj = host.port_factory.get(port_name=options.platform, options=options)
run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
return (run_details, output, host.user)
def run_and_capture(port_obj, options, parsed_args, shared_port=True):
if shared_port:
port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
oc = outputcapture.OutputCapture()
try:
oc.capture_output()
logging_stream = StringIO.StringIO()
run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
finally:
oc.restore_output()
return (run_details, logging_stream)
def get_tests_run(args, host=None):
results = get_test_results(args, host)
return [result.test_name for result in results]
def get_test_batches(args, host=None):
results = get_test_results(args, host)
batches = []
batch = []
current_pid = None
for result in results:
if batch and result.pid != current_pid:
batches.append(batch)
batch = []
batch.append(result.test_name)
if batch:
batches.append(batch)
return batches
def get_test_results(args, host=None):
options, parsed_args = parse_args(args, tests_included=True)
host = host or MockHost()
port_obj = host.port_factory.get(port_name=options.platform, options=options)
oc = outputcapture.OutputCapture()
oc.capture_output()
logging_stream = StringIO.StringIO()
try:
run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
finally:
oc.restore_output()
all_results = []
if run_details.initial_results:
all_results.extend(run_details.initial_results.all_results)
if run_details.retry_results:
all_results.extend(run_details.retry_results.all_results)
return all_results
def parse_full_results(full_results_text):
json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
compressed_results = json.loads(json_to_eval)
return compressed_results
class StreamTestingMixin(object):
def assertContains(self, stream, string):
self.assertTrue(string in stream.getvalue())
def assertEmpty(self, stream):
self.assertFalse(stream.getvalue())
def assertNotEmpty(self, stream):
self.assertTrue(stream.getvalue())
class RunTest(unittest.TestCase, StreamTestingMixin):
def setUp(self):
# A real PlatformInfo object is used here instead of a
# MockPlatformInfo because we need to actually check for
# Windows and Mac to skip some tests.
self._platform = SystemHost().platform
# FIXME: Remove this when we fix test-webkitpy to work
# properly on cygwin (bug 63846).
self.should_test_processes = not self._platform.is_win()
def test_basic(self):
options, args = parse_args(tests_included=True)
logging_stream = StringIO.StringIO()
host = MockHost()
port_obj = host.port_factory.get(options.platform, options)
details = run_webkit_tests.run(port_obj, options, args, logging_stream)
# These numbers will need to be updated whenever we add new tests.
self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
len(details.initial_results.unexpected_results_by_name))
self.assertTrue(one_line_summary in logging_stream.buflist)
# Ensure the results were summarized properly.
self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
# Ensure the image diff percentage is in the results.
self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
# Ensure the results were written out and displayed.
full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
self.assertEqual(json.loads(json_to_eval), details.summarized_results)
self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
def test_batch_size(self):
batch_tests_run = get_test_batches(['--batch-size', '2'])
for batch in batch_tests_run:
self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
def test_max_locked_shards(self):
# Tests for the default of using one locked shard even in the case of more than one child process.
if not self.should_test_processes:
return
save_env_webkit_test_max_locked_shards = None
if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
_, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
try:
self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
finally:
if save_env_webkit_test_max_locked_shards:
os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
def test_child_processes_2(self):
if self.should_test_processes:
_, regular_output, _ = logging_run(
['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
def test_child_processes_min(self):
if self.should_test_processes:
_, regular_output, _ = logging_run(
['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
tests_included=True, shared_port=False)
self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
def test_dryrun(self):
tests_run = get_tests_run(['--dry-run'])
self.assertEqual(tests_run, [])
tests_run = get_tests_run(['-n'])
self.assertEqual(tests_run, [])
def test_exception_raised(self):
# Exceptions raised by a worker are treated differently depending on
# whether they are in-process or out. inline exceptions work as normal,
# which allows us to get the full stack trace and traceback from the
# worker. The downside to this is that it could be any error, but this
# is actually useful in testing.
#
# Exceptions raised in a separate process are re-packaged into
# WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
# be printed, but don't display properly in the unit test exception handlers.
self.assertRaises(BaseException, logging_run,
['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
if self.should_test_processes:
self.assertRaises(BaseException, logging_run,
['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
def test_full_results_html(self):
# FIXME: verify html?
details, _, _ = logging_run(['--full-results-html'])
self.assertEqual(details.exit_code, 0)
def test_hung_thread(self):
details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
# Note that hang.html is marked as WontFix and all WontFix tests are
# expected to Pass, so that actually running them generates an "unexpected" error.
self.assertEqual(details.exit_code, 1)
self.assertNotEmpty(err)
def test_keyboard_interrupt(self):
# Note that this also tests running a test marked as SKIP if
# you specify it explicitly.
self.assertRaises(KeyboardInterrupt, logging_run, ['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
if self.should_test_processes:
self.assertRaises(KeyboardInterrupt, logging_run,
['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
def test_no_tests_found(self):
details, err, _ = logging_run(['resources'], tests_included=True)
self.assertEqual(details.exit_code, -1)
self.assertContains(err, 'No tests to run.\n')
def test_no_tests_found_2(self):
details, err, _ = logging_run(['foo'], tests_included=True)
self.assertEqual(details.exit_code, -1)
self.assertContains(err, 'No tests to run.\n')
def test_natural_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
tests_run = get_tests_run(['--order=natural'] + tests_to_run)
self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
def test_natural_order_test_specified_multiple_times(self):
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
tests_run = get_tests_run(['--order=natural'] + tests_to_run)
self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
def test_random_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
tests_run = get_tests_run(['--order=random'] + tests_to_run)
self.assertEqual(sorted(tests_to_run), sorted(tests_run))
def test_random_order_test_specified_multiple_times(self):
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
tests_run = get_tests_run(['--order=random'] + tests_to_run)
self.assertEqual(tests_run.count('passes/audio.html'), 2)
self.assertEqual(tests_run.count('passes/args.html'), 2)
def test_no_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
tests_run = get_tests_run(['--order=none'] + tests_to_run)
self.assertEqual(tests_to_run, tests_run)
def test_no_order_test_specified_multiple_times(self):
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
tests_run = get_tests_run(['--order=none'] + tests_to_run)
self.assertEqual(tests_to_run, tests_run)
def test_no_order_with_directory_entries_in_natural_order(self):
tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
tests_run = get_tests_run(['--order=none'] + tests_to_run)
self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
def test_gc_between_tests(self):
self.assertTrue(passing_run(['--gc-between-tests']))
def test_complex_text(self):
self.assertTrue(passing_run(['--complex-text']))
def test_threaded(self):
self.assertTrue(passing_run(['--threaded']))
def test_repeat_each(self):
tests_to_run = ['passes/image.html', 'passes/text.html']
tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
def test_ignore_flag(self):
# Note that passes/image.html is expected to be run since we specified it directly.
tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
self.assertFalse('passes/text.html' in tests_run)
self.assertTrue('passes/image.html' in tests_run)
def test_skipped_flag(self):
tests_run = get_tests_run(['passes'])
self.assertFalse('passes/skipped/skip.html' in tests_run)
num_tests_run_by_default = len(tests_run)
# Check that nothing changes when we specify skipped=default.
self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
num_tests_run_by_default)
# Now check that we run one more test (the skipped one).
tests_run = get_tests_run(['--skipped=ignore', 'passes'])
self.assertTrue('passes/skipped/skip.html' in tests_run)
self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
# Now check that we only run the skipped test.
self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
# Now check that we don't run anything.
self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
def test_iterations(self):
tests_to_run = ['passes/image.html', 'passes/text.html']
tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
def test_repeat_each_iterations_num_tests(self):
# The total number of tests should be: number_of_tests *
# repeat_each * iterations
host = MockHost()
_, err, _ = logging_run(
['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
tests_included=True, host=host)
self.assertContains(err, "All 16 tests ran as expected.\n")
def test_run_chunk(self):
# Test that we actually select the right chunk
all_tests_run = get_tests_run(['passes', 'failures'])
chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
self.assertEqual(all_tests_run[4:8], chunk_tests_run)
# Test that we wrap around if the number of tests is not evenly divisible by the chunk size
tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
def test_run_force(self):
# This raises an exception because we run
# failures/expected/exception.html, which is normally SKIPped.
self.assertRaises(ValueError, logging_run, ['--force'])
def test_run_part(self):
# Test that we actually select the right part
tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
# Test that we wrap around if the number of tests is not evenly divisible by the chunk size
# (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
# last part repeats the first two tests).
chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
def test_run_singly(self):
batch_tests_run = get_test_batches(['--run-singly'])
for batch in batch_tests_run:
self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
def test_skip_failing_tests(self):
# This tests that we skip both known failing and known flaky tests. Because there are
# no known flaky tests in the default test_expectations, we add additional expectations.
host = MockHost()
host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
has_passes_text = False
for batch in batches:
self.assertFalse('failures/expected/text.html' in batch)
self.assertFalse('passes/image.html' in batch)
has_passes_text = has_passes_text or ('passes/text.html' in batch)
self.assertTrue(has_passes_text)
def test_run_singly_actually_runs_tests(self):
details, _, _ = logging_run(['--run-singly'], tests_included=True)
self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1) # failures/expected/hang.html actually passes w/ --run-singly.
def test_single_file(self):
tests_run = get_tests_run(['passes/text.html'])
self.assertEqual(tests_run, ['passes/text.html'])
def test_single_file_with_prefix(self):
tests_run = get_tests_run(['LayoutTests/passes/text.html'])
self.assertEqual(['passes/text.html'], tests_run)
def test_single_skipped_file(self):
tests_run = get_tests_run(['failures/expected/keybaord.html'])
self.assertEqual([], tests_run)
def test_stderr_is_saved(self):
host = MockHost()
self.assertTrue(passing_run(host=host))
self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
'stuff going to stderr')
def test_test_list(self):
host = MockHost()
filename = '/tmp/foo.txt'
host.filesystem.write_text_file(filename, 'passes/text.html')
tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
self.assertEqual(['passes/text.html'], tests_run)
host.filesystem.remove(filename)
details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
self.assertEqual(details.exit_code, -1)
self.assertNotEmpty(err)
def test_test_list_with_prefix(self):
host = MockHost()
filename = '/tmp/foo.txt'
host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
self.assertEqual(['passes/text.html'], tests_run)
def test_missing_and_unexpected_results(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
details, err, _ = logging_run(['--no-show-results',
'failures/expected/missing_image.html',
'failures/unexpected/missing_text.html',
'failures/unexpected/text-image-checksum.html'],
tests_included=True, host=host)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 1)
expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
self.assertTrue(json_string.find(expected_token) != -1)
self.assertTrue(json_string.find('"num_regressions":1') != -1)
self.assertTrue(json_string.find('"num_flaky":0') != -1)
self.assertTrue(json_string.find('"num_missing":1') != -1)
def test_pixel_test_directories(self):
host = MockHost()
"""Both tests have faling checksum. We include only the first in pixel tests so only that should fail."""
args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
'failures/unexpected/pixeldir/image_in_pixeldir.html',
'failures/unexpected/image_not_in_pixeldir.html']
details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
self.assertEqual(details.exit_code, 1)
expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE"'
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
self.assertTrue(json_string.find(expected_token) != -1)
def test_missing_and_unexpected_results_with_custom_exit_code(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
class CustomExitCodePort(test.TestPort):
def exit_code_from_summarized_results(self, unexpected_results):
return unexpected_results['num_regressions'] + unexpected_results['num_missing']
host = MockHost()
options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
test_port = CustomExitCodePort(host, options=options)
details, err, _ = logging_run(['--no-show-results',
'failures/expected/missing_image.html',
'failures/unexpected/missing_text.html',
'failures/unexpected/text-image-checksum.html'],
tests_included=True, host=host, port_obj=test_port)
self.assertEqual(details.exit_code, 2)
def test_crash_with_stderr(self):
host = MockHost()
_, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
def test_no_image_failure_with_image_diff(self):
host = MockHost()
_, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
def test_crash_log(self):
# FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
# Currently CrashLog uploading only works on Darwin.
if not self._platform.is_mac():
return
mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
host = MockHost()
host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
_, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
expected_crash_log = mock_crash_report
self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
def test_web_process_crash_log(self):
# FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
# Currently CrashLog uploading only works on Darwin.
if not self._platform.is_mac():
return
mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
host = MockHost()
host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
def test_exit_after_n_failures_upload(self):
host = MockHost()
details, regular_output, user = logging_run(
['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
tests_included=True, host=host)
# By returning False, we know that the incremental results were generated and then deleted.
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
# This checks that we report only the number of tests that actually failed.
self.assertEqual(details.exit_code, 1)
# This checks that passes/text.html is considered SKIPped.
self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
# This checks that we told the user we bailed out.
self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
# This checks that neither test ran as expected.
# FIXME: This log message is confusing; tests that were skipped should be called out separately.
self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
def test_exit_after_n_failures(self):
# Unexpected failures should result in tests stopping.
tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
# But we'll keep going for expected ones.
tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
def test_exit_after_n_crashes(self):
# Unexpected crashes should result in tests stopping.
tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
self.assertEqual(['failures/unexpected/crash.html'], tests_run)
# Same with timeouts.
tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
# But we'll keep going for expected ones.
tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
def test_results_directory_absolute(self):
# We run a configuration that should fail, to generate output, then
# look for what the output results url was.
host = MockHost()
with host.filesystem.mkdtemp() as tmpdir:
_, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
def test_results_directory_default(self):
# We run a configuration that should fail, to generate output, then
# look for what the output results url was.
# This is the default location.
_, _, user = logging_run(tests_included=True)
self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
def test_results_directory_relative(self):
# We run a configuration that should fail, to generate output, then
# look for what the output results url was.
host = MockHost()
host.filesystem.maybe_make_directory('/tmp/cwd')
host.filesystem.chdir('/tmp/cwd')
_, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
def test_retrying_and_flaky_tests(self):
host = MockHost()
details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 0)
self.assertTrue('Retrying' in err.getvalue())
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
# Now we test that --clobber-old-results does remove the old entries and the old retries,
# and that we don't retry again.
host = MockHost()
details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 1)
self.assertTrue('Clobbering old results' in err.getvalue())
self.assertTrue('flaky/text.html' in err.getvalue())
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
self.assertFalse(host.filesystem.exists('retries'))
def test_retrying_force_pixel_tests(self):
host = MockHost()
details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 1)
self.assertTrue('Retrying' in err.getvalue())
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
json = parse_full_results(json_string)
self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
{"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1})
self.assertFalse(json["pixel_tests_enabled"])
self.assertEqual(details.enabled_pixel_tests_in_retry, True)
def test_retrying_uses_retries_directory(self):
host = MockHost()
details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
self.assertEqual(details.exit_code, 1)
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
def test_run_order__inline(self):
# These next tests test that we run the tests in ascending alphabetical
# order per directory. HTTP tests are sharded separately from other tests,
# so we have to test both.
tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
self.assertEqual(tests_run, sorted(tests_run))
tests_run = get_tests_run(['http/tests/passes'])
self.assertEqual(tests_run, sorted(tests_run))
def test_tolerance(self):
class ImageDiffTestPort(test.TestPort):
def diff_image(self, expected_contents, actual_contents, tolerance=None):
self.tolerance_used_for_diff_image = self._options.tolerance
return (True, 1, None)
def get_port_for_run(args):
options, parsed_args = run_webkit_tests.parse_args(args)
host = MockHost()
test_port = ImageDiffTestPort(host, options=options)
res = passing_run(args, port_obj=test_port, tests_included=True)
self.assertTrue(res)
return test_port
base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
# If we pass in an explicit tolerance argument, then that will be used.
test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
test_port = get_port_for_run(base_args + ['--tolerance', '0'])
self.assertEqual(0, test_port.tolerance_used_for_diff_image)
# Otherwise the port's default tolerance behavior (including ignoring it)
# should be used.
test_port = get_port_for_run(base_args)
self.assertEqual(None, test_port.tolerance_used_for_diff_image)
def test_virtual(self):
self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
'virtual/passes/text.html', 'virtual/passes/args.html']))
def test_reftest_run(self):
tests_run = get_tests_run(['passes/reftest.html'])
self.assertEqual(['passes/reftest.html'], tests_run)
def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
self.assertEqual(['passes/reftest.html'], tests_run)
def test_reftest_skip_reftests_if_no_ref_tests(self):
tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
self.assertEqual([], tests_run)
tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
self.assertEqual([], tests_run)
def test_reftest_expected_html_should_be_ignored(self):
tests_run = get_tests_run(['passes/reftest-expected.html'])
self.assertEqual([], tests_run)
def test_reftest_driver_should_run_expected_html(self):
tests_run = get_test_results(['passes/reftest.html'])
self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
def test_reftest_driver_should_run_expected_mismatch_html(self):
tests_run = get_test_results(['passes/mismatch.html'])
self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
host = MockHost()
_, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
self.assertTrue(json_string.find('"num_regressions":4') != -1)
self.assertTrue(json_string.find('"num_flaky":0') != -1)
self.assertTrue(json_string.find('"num_missing":1') != -1)
def test_additional_platform_directory(self):
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
def test_additional_expectations(self):
host = MockHost()
host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
tests_included=True, host=host))
def test_no_http_and_force(self):
# See test_run_force, using --force raises an exception.
# FIXME: We would like to check the warnings generated.
self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
@staticmethod
def has_test_of_type(tests, type):
return [test for test in tests if type in test]
def test_no_http_tests(self):
batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
def test_platform_tests_are_found(self):
tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
def test_output_diffs(self):
# Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
# aren't available.
host = MockHost()
_, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
written_files = host.filesystem.written_files
self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
self.assertEqual(full_results['has_wdiff'], False)
self.assertEqual(full_results['has_pretty_patch'], False)
def test_unsupported_platform(self):
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
self.assertEqual(stdout.getvalue(), '')
self.assertTrue('unsupported platform' in stderr.getvalue())
def test_verbose_in_child_processes(self):
# When we actually run multiple processes, we may have to reconfigure logging in the
# child process (e.g., on win32) and we need to make sure that works and we still
# see the verbose log output. However, we can't use logging_run() because using
# outputcapture to capture stdout and stderr latter results in a nonpicklable host.
# Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
if not self.should_test_processes:
return
options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
host = MockHost()
port_obj = host.port_factory.get(port_name=options.platform, options=options)
logging_stream = StringIO.StringIO()
run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
self.assertTrue('text.html passed' in logging_stream.getvalue())
self.assertTrue('image.html passed' in logging_stream.getvalue())
class EndToEndTest(unittest.TestCase):
def test_reftest_with_two_notrefs(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
_, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
file_list = host.filesystem.written_files.keys()
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
json = parse_full_results(json_string)
self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
{"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "image_diff_percent": 1})
self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
{"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="]})
self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
{"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="]})
class RebaselineTest(unittest.TestCase, StreamTestingMixin):
def assertBaselines(self, file_list, file, extensions, err):
"assert that the file_list contains the baselines."""
for ext in extensions:
baseline = file + "-expected" + ext
baseline_msg = 'Writing new expected result "%s"\n' % baseline
self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
self.assertContains(err, baseline_msg)
# FIXME: Add tests to ensure that we're *not* writing baselines when we're not
# supposed to be.
def test_reset_results(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
details, err, _ = logging_run(
['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
self.assertEqual(len(file_list), 8)
self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
def test_missing_results(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
details, err, _ = logging_run(['--no-show-results',
'failures/unexpected/missing_text.html',
'failures/unexpected/missing_image.html',
'failures/unexpected/missing_audio.html',
'failures/unexpected/missing_render_tree_dump.html'],
tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
self.assertEqual(len(file_list), 10)
self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
def test_new_baseline(self):
# Test that we update the platform expectations in the version-specific directories
# for both existing and new baselines.
host = MockHost()
details, err, _ = logging_run(
['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
self.assertEqual(details.exit_code, 0)
self.assertEqual(len(file_list), 8)
self.assertBaselines(file_list,
"platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
self.assertBaselines(file_list,
"platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
class PortTest(unittest.TestCase):
def assert_mock_port_works(self, port_name, args=[]):
self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
def disabled_test_chromium_mac_lion(self):
self.assert_mock_port_works('chromium-mac-lion')
def disabled_test_chromium_mac_lion_in_test_shell_mode(self):
self.assert_mock_port_works('chromium-mac-lion', args=['--additional-drt-flag=--test-shell'])
def disabled_test_qt_linux(self):
self.assert_mock_port_works('qt-linux')
def disabled_test_mac_lion(self):
self.assert_mock_port_works('mac-lion')
class MainTest(unittest.TestCase):
def test_exception_handling(self):
orig_run_fn = run_webkit_tests.run
# unused args pylint: disable=W0613
def interrupting_run(port, options, args, stderr):
raise KeyboardInterrupt
def successful_run(port, options, args, stderr):
class FakeRunDetails(object):
exit_code = -1
return FakeRunDetails()
def exception_raising_run(port, options, args, stderr):
assert False
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
try:
run_webkit_tests.run = interrupting_run
res = run_webkit_tests.main([], stdout, stderr)
self.assertEqual(res, run_webkit_tests.INTERRUPTED_EXIT_STATUS)
run_webkit_tests.run = successful_run
res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
self.assertEqual(res, -1)
run_webkit_tests.run = exception_raising_run
res = run_webkit_tests.main([], stdout, stderr)
self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
finally:
run_webkit_tests.run = orig_run_fn
| bsd-3-clause |
nwjs/chromium.src | third_party/blink/tools/print_web_test_times.py | 9 | 1704 | #!/usr/bin/env vpython
#
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from blinkpy.common import host
from blinkpy.web_tests import print_web_test_times
print_web_test_times.main(host.Host(), sys.argv[1:])
| bsd-3-clause |
tudorvio/nova | nova/api/openstack/compute/plugins/v3/scheduler_hints.py | 43 | 1771 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute.schemas.v3 import scheduler_hints as schema
from nova.api.openstack import extensions
ALIAS = "os-scheduler-hints"
class SchedulerHints(extensions.V3APIExtensionBase):
"""Pass arbitrary key/value pairs to the scheduler."""
name = "SchedulerHints"
alias = ALIAS
version = 1
def get_controller_extensions(self):
return []
def get_resources(self):
return []
# NOTE(gmann): Accepting request body in this function to fetch "scheduler
# hint". This is a workaround to allow OS_SCH-HNT at the top level
# of the body request, but that it will be changed in the future to be a
# subset of the servers dict.
def server_create(self, server_dict, create_kwargs, req_body):
scheduler_hints = {}
if 'os:scheduler_hints' in req_body:
scheduler_hints = req_body['os:scheduler_hints']
elif 'OS-SCH-HNT:scheduler_hints' in req_body:
scheduler_hints = req_body['OS-SCH-HNT:scheduler_hints']
create_kwargs['scheduler_hints'] = scheduler_hints
def get_server_create_schema(self):
return schema.server_create
| apache-2.0 |
jonashaag/jedi | test/completion/ordering.py | 13 | 2079 | # -----------------
# normal
# -----------------
a = ""
a = 1
#? int()
a
#? []
a.append
a = list
b = 1; b = ""
#? str()
b
# temp should not be accessible before definition
#? []
temp
a = 1
temp = b;
b = a
a = temp
#? int()
b
#? int()
b
#? str()
a
a = tuple
if 1:
a = list
#? ['append']
a.append
#? ['index']
a.index
# -----------------
# tuples exchanges
# -----------------
a, b = 1, ""
#? int()
a
#? str()
b
b, a = a, b
#? int()
b
#? str()
a
b, a = a, b
#? int()
a
#? str()
b
# -----------------
# function
# -----------------
def a(a=3):
#? int()
a
#? []
a.func
return a
#? int()
a(2)
#? []
a(2).func
a_param = 3
def func(a_param):
# should not be int
#? []
a_param.
from os import path
# should not return a function, because `a` is a function above
def f(b, a): return a
#? []
f(b=3)
# -----------------
# closure
# -----------------
def x():
a = 0
def x():
return a
a = 3.0
return x()
#? float()
x()
# -----------------
# class
# -----------------
class A(object):
a = ""
a = 3
#? int()
a
a = list()
def __init__(self):
self.b = ""
def before(self):
self.b = 3
# TODO should this be so? include entries after cursor?
#? int() str() list
self.b
self.b = list
self.a = 1
#? str() int()
self.a
#? ['after']
self.after
self.c = 3
#? int()
self.c
def after(self):
self.a = ''
c = set()
#? list()
A.a
a = A()
#? ['after']
a.after
#? []
a.upper
#? []
a.append
#? []
a.real
#? str() int()
a.a
a = 3
class a():
def __init__(self, a):
self.a = a
#? float()
a(1.0).a
#?
a().a
# -----------------
# imports
# -----------------
math = 3
import math
#? ['cosh']
math.cosh
#? []
math.real
math = 3
#? int()
math
#? []
math.cos
# do the same for star imports
cosh = 3
from math import *
# cosh doesn't work, but that's not a problem, star imports should be at the
# start of EVERY script!
cosh.real
cosh = 3
#? int()
cosh
| mit |
ricxsar/ardupilot | Tools/LogAnalyzer/tests/TestGPSGlitch.py | 273 | 2325 | from LogAnalyzer import Test,TestResult
import DataflashLog
class TestGPSGlitch(Test):
'''test for GPS glitch reporting or bad GPS data (satellite count, hdop)'''
def __init__(self):
Test.__init__(self)
self.name = "GPS"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if "GPS" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No GPS log data"
return
# glitch protection is currently copter-only, but might be added to other vehicle types later and there's no harm in leaving the test in for all
gpsGlitchCount = 0
if "ERR" in logdata.channels:
assert(len(logdata.channels["ERR"]["Subsys"].listData) == len(logdata.channels["ERR"]["ECode"].listData))
for i in range(len(logdata.channels["ERR"]["Subsys"].listData)):
subSys = logdata.channels["ERR"]["Subsys"].listData[i][1]
eCode = logdata.channels["ERR"]["ECode"].listData[i][1]
if subSys == 11 and (eCode == 2):
gpsGlitchCount += 1
if gpsGlitchCount:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "GPS glitch errors found (%d)" % gpsGlitchCount
# define and check different thresholds for WARN level and FAIL level
# TODO: for plane, only check after first instance of throttle > 0, or after takeoff if we can reliably detect it
minSatsWARN = 6
minSatsFAIL = 5
maxHDopWARN = 3.0
maxHDopFAIL = 10.0
foundBadSatsWarn = logdata.channels["GPS"]["NSats"].min() < minSatsWARN
foundBadHDopWarn = logdata.channels["GPS"]["HDop"].max() > maxHDopWARN
foundBadSatsFail = logdata.channels["GPS"]["NSats"].min() < minSatsFAIL
foundBadHDopFail = logdata.channels["GPS"]["HDop"].max() > maxHDopFAIL
satsMsg = "Min satellites: %s, Max HDop: %s" % (logdata.channels["GPS"]["NSats"].min(), logdata.channels["GPS"]["HDop"].max())
if gpsGlitchCount:
self.result.statusMessage = self.result.statusMessage + "\n" + satsMsg
if foundBadSatsFail or foundBadHDopFail:
if not gpsGlitchCount:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = satsMsg
elif foundBadSatsWarn or foundBadHDopWarn:
if not gpsGlitchCount:
self.result.status = TestResult.StatusType.WARN
self.result.statusMessage = satsMsg
| gpl-3.0 |
TalShafir/ansible | test/units/modules/network/f5/test_bigip_sys_global.py | 5 | 4423 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_sys_global import ApiParameters
from library.modules.bigip_sys_global import ModuleParameters
from library.modules.bigip_sys_global import ModuleManager
from library.modules.bigip_sys_global import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_sys_global import ApiParameters
from ansible.modules.network.f5.bigip_sys_global import ModuleParameters
from ansible.modules.network.f5.bigip_sys_global import ModuleManager
from ansible.modules.network.f5.bigip_sys_global import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
banner_text='this is a banner',
console_timeout=100,
gui_setup='yes',
lcd_display='yes',
mgmt_dhcp='yes',
net_reboot='yes',
quiet_boot='yes',
security_banner='yes',
)
p = ModuleParameters(params=args)
assert p.banner_text == 'this is a banner'
assert p.console_timeout == 100
assert p.gui_setup == 'enabled'
assert p.lcd_display == 'enabled'
assert p.mgmt_dhcp == 'enabled'
assert p.net_reboot == 'enabled'
assert p.quiet_boot == 'enabled'
assert p.security_banner == 'enabled'
def test_api_parameters(self):
args = load_fixture('load_sys_global_settings.json')
p = ApiParameters(params=args)
assert 'Welcome to the BIG-IP Configuration Utility' in p.banner_text
assert p.console_timeout == 0
assert p.gui_setup == 'disabled'
assert p.lcd_display == 'enabled'
assert p.mgmt_dhcp == 'enabled'
assert p.net_reboot == 'disabled'
assert p.quiet_boot == 'enabled'
assert p.security_banner == 'enabled'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update(self, *args):
set_module_args(dict(
banner_text='this is a banner',
console_timeout=100,
password='admin',
server='localhost',
user='admin',
state='present'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_sys_global_settings.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
hdinsight/hue | desktop/core/ext-py/Paste-2.0.1/tests/test_exceptions/test_formatter.py | 47 | 4985 | from paste.exceptions import formatter
from paste.exceptions import collector
import sys
import os
import difflib
class Mock(object):
def __init__(self, **kw):
for name, value in kw.items():
setattr(self, name, value)
class Supplement(Mock):
object = 'test_object'
source_url = 'http://whatever.com'
info = 'This is some supplemental information'
args = ()
def getInfo(self):
return self.info
def __call__(self, *args):
self.args = args
return self
class BadSupplement(Supplement):
def getInfo(self):
raise ValueError("This supplemental info is buggy")
def call_error(sup):
1 + 2
__traceback_supplement__ = (sup, ())
assert 0, "I am an error"
def raise_error(sup='default'):
if sup == 'default':
sup = Supplement()
for i in range(10):
__traceback_info__ = i
if i == 5:
call_error(sup=sup)
def hide(t, inner, *args, **kw):
__traceback_hide__ = t
return inner(*args, **kw)
def pass_through(info, inner, *args, **kw):
"""
To add another frame to the call; detectable because
__tracback_info__ is set to `info`
"""
__traceback_info__ = info
return inner(*args, **kw)
def format(type='html', **ops):
data = collector.collect_exception(*sys.exc_info())
report = getattr(formatter, 'format_' + type)(data, **ops)
return report
formats = ('text', 'html')
def test_excersize():
for f in formats:
try:
raise_error()
except:
format(f)
def test_content():
for f in formats:
try:
raise_error()
except:
result = format(f)
print(result)
assert 'test_object' in result
assert 'http://whatever.com' in result
assert 'This is some supplemental information' in result
assert 'raise_error' in result
assert 'call_error' in result
assert '5' in result
assert 'test_content' in result
else:
assert 0
def test_trim():
current = os.path.abspath(os.getcwd())
for f in formats:
try:
raise_error()
except:
result = format(f, trim_source_paths=[(current, '.')])
assert current not in result
assert ('%stest_formatter.py' % os.sep) in result, ValueError(repr(result))
else:
assert 0
def test_hide():
for f in formats:
try:
hide(True, raise_error)
except:
result = format(f)
print(result)
assert 'in hide_inner' not in result
assert 'inner(*args, **kw)' not in result
else:
assert 0
def print_diff(s1, s2):
differ = difflib.Differ()
result = list(differ.compare(s1.splitlines(), s2.splitlines()))
print('\n'.join(result))
def test_hide_supppressed():
"""
When an error occurs and __traceback_stop__ is true for the
erroneous frame, then that setting should be ignored.
"""
for f in ['html']: #formats:
results = []
for hide_value in (False, 'after'):
try:
pass_through(
'a',
hide,
hide_value,
pass_through,
'b',
raise_error)
except:
results.append(format(f))
else:
assert 0
if results[0] != results[1]:
print_diff(results[0], results[1])
assert 0
def test_hide_after():
for f in formats:
try:
pass_through(
'AABB',
hide, 'after',
pass_through, 'CCDD',
# A little whitespace to keep this line out of the
# content part of the report
hide, 'reset',
raise_error)
except:
result = format(f)
assert 'AABB' in result
assert 'CCDD' not in result
assert 'raise_error' in result
else:
assert 0
def test_hide_before():
for f in formats:
try:
pass_through(
'AABB',
hide, 'before',
raise_error)
except:
result = format(f)
print(result)
assert 'AABB' not in result
assert 'raise_error' in result
else:
assert 0
def test_make_wrappable():
assert '<wbr>' in formatter.make_wrappable('x'*1000)
# I'm just going to test that this doesn't excede the stack limit:
formatter.make_wrappable(';'*2000)
assert (formatter.make_wrappable('this that the other')
== 'this that the other')
assert (formatter.make_wrappable('this that ' + ('x'*50) + ';' + ('y'*50) + ' and the other')
== 'this that '+('x'*50) + ';<wbr>' + ('y'*50) + ' and the other')
| apache-2.0 |
tectronics/ambhas | examples/run_richards.py | 3 | 1615 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 13 10:47:16 2012
@author: Sat Kumar Tomer
@email: [email protected]
@website: www.ambhas.com
"""
from ambhas.richards import RICHARDS_1D
import matplotlib.pyplot as plt
from scipy.io import netcdf as nc
params = {'axes.labelsize': 15,
'text.fontsize': 15,
'legend.fontsize': 15,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'text.usetex': False}
plt.rcParams.update(params)
maddur = RICHARDS_1D('/home/tomer/svn/ambhas/examples/maddur.xls')
output_file = nc.NetCDFFile(maddur.ofile_name, 'r')
theta = output_file.variables['sm'][:]
doy = range(1,367)
rain = output_file.variables['rain'][:]
# main plot
plt.close()
fig = plt.figure(figsize=(6, 4.5))
ax = plt.axes([0.15, 0.15, 0.7, 0.7])
ax.plot(doy,theta[0,:],'b')
ax.plot(doy,theta[20,:],'g')
ax.plot(doy,theta[39,:],'c')
ax.set_ylabel('Soil Moisture (v/v)')
ax.set_ylim(ymax=0.4)
ax.set_xlim(xmax=366)
ax.set_xlabel('DOY')
fig.canvas.draw()
# precipitation plot
ax2 = plt.twinx()
ax2.bar(doy,rain*86400*1000, label='Precipitation', color='m', edgecolor='m')
ax2.set_ylabel('Precipitation (mm)')
ax2.set_ylim(ymax=100)
ax2.set_xlim(xmax=366)
ax2.invert_yaxis()
p1 = plt.Rectangle((0, 0), 1, 1, fc="m")
p2 = plt.Rectangle((0, 0), 1, 1, fc="b")
p3 = plt.Rectangle((0, 0), 1, 1, fc="g")
p4 = plt.Rectangle((0, 0), 1, 1, fc="c")
leg = plt.legend([p1,p2,p3,p4], ["Precipitation","SM at surface", "SM at 1m", "SM at 2m"], loc=(0.01,0.4))
frame = leg.get_frame()
frame.set_alpha(0.5)
plt.savefig('/home/tomer/svn/ambhas-wiki/images/run_richards.png')
| lgpl-2.1 |
bnoordhuis/suv | deps/gyp/test/mac/gyptest-postbuild-copy-bundle.py | 172 | 2697 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that a postbuild copying a dependend framework into an app bundle is
rerun if the resources in the framework change.
"""
import TestGyp
import os.path
import sys
if sys.platform == 'darwin':
# TODO(thakis): Make this pass with the make generator, http://crbug.com/95529
test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
CHDIR = 'postbuild-copy-bundle'
test.run_gyp('test.gyp', chdir=CHDIR)
app_bundle_dir = test.built_file_path('Test app.app', chdir=CHDIR)
bundled_framework_dir = os.path.join(
app_bundle_dir, 'Contents', 'My Framework.framework', 'Resources')
final_plist_path = os.path.join(bundled_framework_dir, 'Info.plist')
final_resource_path = os.path.join(bundled_framework_dir, 'resource_file.sb')
final_copies_path = os.path.join(
app_bundle_dir, 'Contents', 'My Framework.framework', 'Versions', 'A',
'Libraries', 'copied.txt')
# Check that the dependency was built and copied into the app bundle:
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.must_exist(final_resource_path)
test.must_match(final_resource_path,
'This is included in the framework bundle.\n')
test.must_exist(final_plist_path)
test.must_contain(final_plist_path, '''\
\t<key>RandomKey</key>
\t<string>RandomValue</string>''')
# Touch the dependency's bundle resource, and check that the modification
# makes it all the way into the app bundle:
test.sleep()
test.write('postbuild-copy-bundle/resource_file.sb', 'New text\n')
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.must_exist(final_resource_path)
test.must_match(final_resource_path, 'New text\n')
# Check the same for the plist file.
test.sleep()
contents = test.read('postbuild-copy-bundle/Framework-Info.plist')
contents = contents.replace('RandomValue', 'NewRandomValue')
test.write('postbuild-copy-bundle/Framework-Info.plist', contents)
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.must_exist(final_plist_path)
test.must_contain(final_plist_path, '''\
\t<key>RandomKey</key>
\t<string>NewRandomValue</string>''')
# Check the same for the copies section, test for http://crbug.com/157077
test.sleep()
contents = test.read('postbuild-copy-bundle/copied.txt')
contents = contents.replace('old', 'new')
test.write('postbuild-copy-bundle/copied.txt', contents)
test.build('test.gyp', 'test_app', chdir=CHDIR)
test.must_exist(final_copies_path)
test.must_contain(final_copies_path, 'new copied file')
test.pass_test()
| isc |
hulifox008/openembedded | contrib/qa/checksum/checksum.py | 45 | 2326 | #
# Helper utilitiy to verify checksums of SRC_URI's
#
# To ease parsing I will use INI files to contain the
# checksums, at least they will force some kind of structure. This allows
# to easily add and replace new sums
#
#
# Example:
# [PN-PV-filename]
# md5=THESUM
# sha256=OTHERSUM
#
# [PN-filename]
# md5=THESUM
# sha256=OTHERSUM
def verify_file(config_path, pn, pv, src_uri, localpath):
"""
Verify using the INI file at config_path and check that
the localpath matches the one specified by the PN-PV-SRCURI
inside the ini file
"""
import ConfigParser, os
parser = ConfigParser.ConfigParser()
if not len(parser.read(config_path)) == 1:
raise Exception("Can not open the '%s'" % config_path)
# Try PN-PV-SRC_URI first and then try PN-SRC_URI
# we rely on the get method to create errors
pn_pv_src = "%s-%s-%s" % (pn,pv,src_uri)
pn_src = "%s-%s" % (pn,src_uri)
if parser.has_section(pn_pv_src):
md5 = parser.get(pn_pv_src, "md5")
sha256 = parser.get(pn_pv_src, "sha256")
elif parser.has_section(pn_src):
md5 = parser.get(pn_src, "md5")
sha256 = parser.get(pn_src, "sha256")
else:
raise Exception("Can not find a section for '%s' '%s' and '%s'" % (pn,pv,src_uri))
# md5 and sha256 should be valid now
if not os.path.exists(localpath):
raise Exception("The path does not exist '%s'" % localpath)
# call md5(sum) and shasum
try:
md5pipe = os.popen('md5sum ' + localpath)
md5data = (md5pipe.readline().split() or [ "" ])[0]
md5pipe.close()
except OSError:
raise Exception("Executing md5sum failed")
try:
shapipe = os.popen('shasum -a256 -p ' + localpath)
shadata = (shapipe.readline().split() or [ "" ])[0]
shapipe.close()
except OSError:
raise Exception("Executing shasum failed")
if not md5 == md5data:
raise Exception("MD5 Sums do not match. Wanted: '%s' Got: '%s'" % (md5, md5data))
if not sha256 == shadata:
raise Exception("SHA256 Sums do not match. Wanted: '%s' Got: '%s'" % (sha256, shadata))
return True
# Test it
verify_file("sample.conf", "qtopia-core", "4.3.0", "ftp://ftp.trolltech.com/qt/source/qtopia-core-opensource-4.2.3.tar.gz", "test.file")
| mit |
simartin/servo | tests/wpt/web-platform-tests/common/security-features/scope/document.py | 18 | 1179 | import os, sys, json
from wptserve.utils import isomorphic_decode, isomorphic_encode
import importlib
util = importlib.import_module("common.security-features.scope.util")
def main(request, response):
policyDeliveries = json.loads(request.GET.first(b"policyDeliveries", b"[]"))
maybe_additional_headers = {}
meta = u''
error = u''
for delivery in policyDeliveries:
if delivery[u'deliveryType'] == u'meta':
if delivery[u'key'] == u'referrerPolicy':
meta += u'<meta name="referrer" content="%s">' % delivery[u'value']
else:
error = u'invalid delivery key'
elif delivery[u'deliveryType'] == u'http-rp':
if delivery[u'key'] == u'referrerPolicy':
maybe_additional_headers[b'Referrer-Policy'] = isomorphic_encode(delivery[u'value'])
else:
error = u'invalid delivery key'
else:
error = u'invalid deliveryType'
handler = lambda: util.get_template(u"document.html.template") % ({
u"meta": meta,
u"error": error
})
util.respond(
request,
response,
payload_generator=handler,
content_type=b"text/html",
maybe_additional_headers=maybe_additional_headers)
| mpl-2.0 |
spacewalkproject/spacewalk | client/tools/rhncfg/config_management/rhncfg_remove.py | 10 | 2929 | #
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
from config_common import handler_base, utils
from config_common.rhn_log import log_debug, die
try: # python2
import xmlrpclib
except ImportError: # python3
import xmlrpc.client as xmlrpclib
class Handler(handler_base.HandlerBase):
_usage_options = "[options] file [ file ... ]"
_options_table = handler_base.HandlerBase._options_table + [
handler_base.HandlerBase._option_class(
'-c', '--channel', action="store",
help="Remove files from this config channel",
),
handler_base.HandlerBase._option_class(
'-t', '--topdir', action="store",
help="Make all files relative to this string",
),
]
def run(self):
log_debug(2)
r = self.repository
if len(self.args) == 0:
die(0, "No files supplied (use --help for help)")
channel = self.options.channel
if not channel:
die(6, "Config channel not specified")
r = self.repository
if not r.config_channel_exists(channel):
die(6, "Error: config channel %s does not exist" % channel)
files = [utils.normalize_path(x) for x in self.args]
files_to_remove = []
if self.options.topdir:
if not os.path.isdir(self.options.topdir):
die(8, "--topdir specified, but `%s' not a directory" %
self.options.topdir)
for f in files:
if not f.startswith(self.options.topdir):
die(8, "--topdir %s specified, but file `%s' doesn't comply"
% (self.options.topdir, f))
files_to_remove.append((f, f[len(self.options.topdir):]))
else:
for f in files:
files_to_remove.append((f, f))
print("Removing from config channel %s" % channel)
for (local_file, remote_file) in files_to_remove:
try:
r.remove_file(channel, remote_file)
except xmlrpclib.Fault:
e = sys.exc_info()[1]
if e.faultCode == -4011:
print("%s does not exist" % remote_file)
continue
raise
else:
print("%s removed" % remote_file)
| gpl-2.0 |
YzPaul3/h2o-3 | scripts/send_to_mysql.py | 7 | 3583 | import sys, os
import csv
import mysql.connector
from mysql.connector.constants import ClientFlag
import traceback
class SendDataToMysql:
def __init__(self):
self = self
def add_test_cases_to_h2o(self):
#Connect to mysql database
h2o = mysql.connector.connect(client_flags=[ClientFlag.LOCAL_FILES],user='root', password='0xdata', host='172.16.2.178', database='h2o')
cursor = h2o.cursor()
#Send data to mysql database
try:
#Sending accuracyTestCases.csv
cursor.execute("LOAD DATA LOCAL INFILE '../h2o-test-accuracy/src/test/resources/accuracyTestCases.csv' INTO "
"TABLE TestCases COLUMNS TERMINATED BY ',' LINES TERMINATED BY '\n' IGNORE 1 LINES;")
#Commit query
h2o.commit()
except:
traceback.print_exc()
h2o.rollback()
assert False, "Failed to add accuracy test cases to h2o database!"
def add_accuracy_data(self):
#Connect to mysql database
h2o = mysql.connector.connect(client_flags=[ClientFlag.LOCAL_FILES],user='root', password='0xdata', host='172.16.2.178', database='h2o')
cursor = h2o.cursor()
#Send data to mysql database
try:
#Sending accuracyDatasets
cursor.execute("LOAD DATA LOCAL INFILE '../h2o-test-accuracy/src/test/resources/accuracyDataSets.csv' INTO "
"TABLE AccuracyDatasets COLUMNS TERMINATED BY ',' LINES TERMINATED BY '\n' IGNORE 1 LINES;")
#Commit query
h2o.commit()
except:
traceback.print_exc()
h2o.rollback()
assert False, "Failed to add accuracy test cases to h2o database!"
def drop_join_test_cases_tables(self):
#Connect to mysql database
h2o = mysql.connector.connect(user='root', password='0xdata', host='172.16.2.178', database='h2o')
cursor = h2o.cursor()
try:
drop_join_test_cases_query = """
DROP TABLES IF EXISTS TestCasesResults;
"""
cursor.execute(drop_join_test_cases_query)
except:
traceback.print_exc()
h2o.rollback()
assert False, "Failed to drop TestCasesResults table!"
def join_test_cases_results(self):
#Connect to mysql database
h2o = mysql.connector.connect(client_flags=[ClientFlag.LOCAL_FILES],user='root', password='0xdata', host='172.16.2.178', database='h2o')
cursor = h2o.cursor()
#Drop table if exists before re creating
self.drop_join_test_cases_tables()
try:
join_query = """
CREATE TABLE TestCasesResults AS(
SELECT *
FROM AccuracyTestCaseResults
LEFT JOIN TestCases
ON AccuracyTestCaseResults.testcase_id = TestCases.test_case_id
LEFT JOIN AccuracyDatasets
ON TestCases.training_data_set_id = AccuracyDatasets.data_set_id);
"""
cursor.execute(join_query)
except:
traceback.print_exc()
h2o.rollback()
assert False, "Failed to join AccuracyTestCaseResults, TestCases, and AccuracyDatasets!"
cursor.close()
h2o.close()
if __name__ == '__main__':
#SendDataToMysql().add_test_cases_to_h2o()
#SendDataToMysql().add_accuracy_data()
SendDataToMysql().join_test_cases_results() | apache-2.0 |
qinyushuang/robotframework-selenium2library | test/resources/testserver/testserver.py | 61 | 3546 | # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/336012
import SimpleHTTPServer
import BaseHTTPServer
import httplib
import os
class StoppableHttpRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""http request handler with QUIT stopping the server"""
def do_QUIT(self):
"""send 200 OK response, and set server.stop to True"""
self.send_response(200)
self.end_headers()
self.server.stop = True
def do_POST(self):
# We could also process paremeters here using something like below.
# length = self.headers['Content-Length']
# print self.rfile.read(int(length))
self.do_GET()
def send_head(self):
# This is ripped directly from SimpleHTTPRequestHandler,
# only the cookie part is added.
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
if ctype.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.send_header("Set-Cookie", "test=seleniumlibrary;")
self.send_header("Set-Cookie", "another=value;")
self.end_headers()
return f
class StoppableHttpServer(BaseHTTPServer.HTTPServer):
"""http server that reacts to self.stop flag"""
def serve_forever(self):
"""Handle one request at a time until stopped."""
self.stop = False
while not self.stop:
self.handle_request()
def stop_server(port=7000):
"""send QUIT request to http server running on localhost:<port>"""
conn = httplib.HTTPConnection("localhost:%d" % port)
conn.request("QUIT", "/")
conn.getresponse()
def start_server(port=7000):
import os
os.chdir(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), '..'))
server = StoppableHttpServer(('', port), StoppableHttpRequestHandler)
server.serve_forever()
if __name__ == '__main__':
import sys
if len(sys.argv) != 2 or sys.argv[1] not in [ 'start', 'stop' ]:
print 'usage: %s start|stop' % sys.argv[0]
sys.exit(1)
if sys.argv[1] == 'start':
start_server()
else:
stop_server()
| apache-2.0 |
nwchandler/ansible | test/units/module_utils/aws/test_aws_module.py | 88 | 6242 | # -*- coding: utf-8 -*-
# (c) 2017, Michael De La Rue
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
from pytest import importorskip
import unittest
from ansible.module_utils import basic
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils._text import to_bytes
from ansible.compat.tests.mock import Mock, patch
import json
importorskip("boto3")
botocore = importorskip("botocore")
class AWSModuleTestCase(unittest.TestCase):
basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}}))
def test_create_aws_module_should_set_up_params(self):
m = AnsibleAWSModule(argument_spec=dict(
win_string_arg=dict(type='list', default=['win'])
))
m_noretry_no_customargs = AnsibleAWSModule(
auto_retry=False, default_args=False,
argument_spec=dict(
success_string_arg=dict(type='list', default=['success'])
)
)
assert m, "module wasn't true!!"
assert m_noretry_no_customargs, "module wasn't true!!"
m_params = m.params
m_no_defs_params = m_noretry_no_customargs.params
assert 'region' in m_params
assert 'win' in m_params["win_string_arg"]
assert 'success' in m_no_defs_params["success_string_arg"]
assert 'aws_secret_key' not in m_no_defs_params
class ErrorReportingTestcase(unittest.TestCase):
def test_botocore_exception_reports_nicely_via_fail_json_aws(self):
basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}}))
module = AnsibleAWSModule(argument_spec=dict(
fail_mode=dict(type='list', default=['success'])
))
fail_json_double = Mock()
err_msg = {'Error': {'Code': 'FakeClass.FakeError'}}
with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
try:
raise botocore.exceptions.ClientError(err_msg, 'Could not find you')
except Exception as e:
print("exception is " + str(e))
module.fail_json_aws(e, msg="Fake failure for testing boto exception messages")
assert(len(fail_json_double.mock_calls) >
0), "failed to call fail_json when should have"
assert(len(fail_json_double.mock_calls) <
2), "called fail_json multiple times when once would do"
assert("test_botocore_exception_reports_nicely"
in fail_json_double.mock_calls[0][2]["exception"]), \
"exception traceback doesn't include correct function, fail call was actually: " \
+ str(fail_json_double.mock_calls[0])
assert("Fake failure for testing boto exception messages:"
in fail_json_double.mock_calls[0][2]["msg"]), \
"error message doesn't include the local message; was: " \
+ str(fail_json_double.mock_calls[0])
assert("Could not find you" in fail_json_double.mock_calls[0][2]["msg"]), \
"error message doesn't include the botocore exception message; was: " \
+ str(fail_json_double.mock_calls[0])
try:
fail_json_double.mock_calls[0][2]["error"]
except KeyError:
raise Exception("error was missing; call was: " + str(fail_json_double.mock_calls[0]))
assert("FakeClass.FakeError" == fail_json_double.mock_calls[0][2]["error"]["code"]), \
"Failed to find error/code; was: " + str(fail_json_double.mock_calls[0])
def test_botocore_exception_without_response_reports_nicely_via_fail_json_aws(self):
basic._ANSIBLE_ARGS = to_bytes(json.dumps({'ANSIBLE_MODULE_ARGS': {}}))
module = AnsibleAWSModule(argument_spec=dict(
fail_mode=dict(type='list', default=['success'])
))
fail_json_double = Mock()
err_msg = None
with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
try:
raise botocore.exceptions.ClientError(err_msg, 'Could not find you')
except Exception as e:
print("exception is " + str(e))
module.fail_json_aws(e, msg="Fake failure for testing boto exception messages")
assert(len(fail_json_double.mock_calls) > 0), "failed to call fail_json when should have"
assert(len(fail_json_double.mock_calls) < 2), "called fail_json multiple times"
assert("test_botocore_exception_without_response_reports_nicely_via_fail_json_aws"
in fail_json_double.mock_calls[0][2]["exception"]), \
"exception traceback doesn't include correct function, fail call was actually: " \
+ str(fail_json_double.mock_calls[0])
assert("Fake failure for testing boto exception messages"
in fail_json_double.mock_calls[0][2]["msg"]), \
"error message doesn't include the local message; was: " \
+ str(fail_json_double.mock_calls[0])
# I would have thought this should work, however the botocore exception comes back with
# "argument of type 'NoneType' is not iterable" so it's probably not really designed
# to handle "None" as an error response.
#
# assert("Could not find you" in fail_json_double.mock_calls[0][2]["msg"]), \
# "error message doesn't include the botocore exception message; was: " \
# + str(fail_json_double.mock_calls[0])
# TODO:
# - an exception without a message
# - plain boto exception
# - socket errors and other standard things.
| gpl-3.0 |
Ikusaba-san/Chiaki-Nanami | cogs/bot/halp.py | 1 | 7051 | import json
import random
from datetime import datetime
import discord
from discord.ext import commands
from ..utils.converter import Category
from ..utils.examples import wrap_example
from ..utils.formats import multi_replace
from ..utils.help import CogPages, help_command
from ..utils.misc import emoji_url
from ..utils.paginator import Paginator
CHIAKI_TIP_EPOCH = datetime(2017, 8, 24)
TIP_EMOJI = emoji_url('\N{ELECTRIC LIGHT BULB}')
DEFAULT_TIP = {
'title': 'You have reached the end of the tips!',
'description': 'Wait until the next update for more tips!'
}
TOO_FAR_TIP = {
'title': "You're going a bit too far here!",
'description': 'Wait until tomorrow or something!'
}
def _get_tip_index():
return (datetime.utcnow() - CHIAKI_TIP_EPOCH).days
def positive_index(s):
num = int(s)
if num <= 0:
raise commands.BadArgument('Value must be positive.')
return num
@wrap_example(positive_index)
def _positive_index_example(ctx):
return random.randint(1, len(ctx.bot.get_cog('Help').tips_list))
class TipPaginator(Paginator):
def __init__(self, *args, **kwargs):
super().__init__(*args, per_page=1, **kwargs)
def create_embed(self, page):
# page returns a tuple (because it returns a slice of entries)
p = page[0]
return (discord.Embed(colour=self.colour, description=p['description'])
.set_author(name=f"#{self._index + 1}: {p['title']}", icon_url=TIP_EMOJI)
)
_bracket_repls = {
'(': ')', ')': '(',
'[': ']', ']': '[',
'<': '>', '>': '<',
}
class Help:
def __init__(self, bot):
self.bot = bot
self.bot.remove_command('help')
self.bot.remove_command('h')
try:
with open('data/tips.json') as f:
self.tips_list = json.load(f)
except FileNotFoundError:
self.tips_list = []
help = help_command(name='help', aliases=['h'])
halp = help_command(str.upper, name='halp', hidden=True)
pleh = help_command((lambda s: multi_replace(s[::-1], _bracket_repls)), name='pleh', hidden=True)
pleh = help_command((lambda s: multi_replace(s[::-1].upper(), _bracket_repls)),
name='plah', hidden=True)
async def _invite_embed(self, ctx):
# TODO: Move this somewhere else as this is also duplicated in meta.py
source_url = f'https://github.com/Ikusaba-san/Chiaki-Nanami'
if ctx.bot.version_info.releaselevel != 'alpha':
source_url = f'{source_url}/tree/v{ctx.bot.__version__}'
invite = (discord.Embed(description=self.bot.description, title=str(self.bot.user), colour=self.bot.colour)
.set_thumbnail(url=self.bot.user.avatar_url_as(format=None))
.add_field(name="Want me in your server?",
value=f'[Invite me here!]({self.bot.invite_url})', inline=False)
.add_field(name="If you just to be simple...",
value=f'[Invite me with minimal permissions!]({self.bot.minimal_invite_url})', inline=False)
.add_field(name="Need help with using me?",
value=f"[Here's the official server!]({self.bot.support_invite})", inline=False)
.add_field(name="If you're curious about how I work...",
value=f"[Check out the source code!]({source_url})", inline=False)
)
await ctx.send(embed=invite)
@commands.command()
async def invite(self, ctx):
"""...it's an invite"""
if ctx.bot_has_embed_links():
await self._invite_embed(ctx)
else:
content = (
'Okay~ Here you go... I think. ^.^'
f'Full Permissions: <{self.bot.invite_url}>'
f'Minimal Permissions: <{self.bot.minimal_invite_url}>'
)
await ctx.send(content)
@commands.command(name='commands', aliases=['cmds'])
async def commands_(self, ctx, category: Category = None):
"""Shows all the commands in a given category.
If no category is given, all commands are shown.
"""
if category is None:
return await ctx.invoke(self.help)
paginator = await CogPages.create(ctx, category)
await paginator.interact()
async def _show_tip(self, ctx, number):
if number > _get_tip_index() + 1:
tip, success = TOO_FAR_TIP, False
else:
try:
tip, success = self.tips_list[number - 1], True
except IndexError:
tip, success = DEFAULT_TIP, False
tip_embed = discord.Embed.from_data(tip)
tip_embed.colour = ctx.bot.colour
if success:
tip_embed.set_author(name=f'Tip of the Day #{number}', icon_url=TIP_EMOJI)
await ctx.send(embed=tip_embed)
@commands.command()
@commands.bot_has_permissions(embed_links=True)
async def tip(self, ctx, number: positive_index = None):
"""Shows a Chiaki Tip via number.
If no number is specified, it shows the daily tip.
"""
if number is None:
number = _get_tip_index() + 1
await self._show_tip(ctx, number)
@commands.command()
async def tips(self, ctx):
"""Shows all tips *up to today*"""
current_index = _get_tip_index() + 1
await TipPaginator(ctx, self.tips_list[:current_index]).interact()
@commands.command()
@commands.bot_has_permissions(embed_links=True)
async def randomtip(self, ctx):
"""Shows a random tip.
The tip range is from the first one to today's one.
"""
number = _get_tip_index() + 1
await self._show_tip(ctx, random.randint(1, number))
@commands.command()
@commands.cooldown(rate=1, per=60, type=commands.BucketType.user)
async def feedback(self, ctx, *, message):
"""Gives feedback about the bot.
This is a quick and easy way to either request features
or bug fixes without being in the support server.
You can only send feedback once every minute.
"""
dest = self.bot.feedback_destination
if not dest:
return
# Create the feedback embed
embed = (discord.Embed(colour=ctx.bot.colour, description=message, title='Feedback')
.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)
.set_footer(text=f'Author ID: {ctx.author.id}')
)
if ctx.guild:
embed.add_field(name='From', value=f'#{ctx.channel}\n(ID: {ctx.channel.id})', inline=False)
embed.add_field(name='In', value=f'{ctx.guild}\n(ID: {ctx.guild.id})', inline=False)
else:
embed.add_field(name='From', value=f'{ctx.channel}', inline=False)
embed.timestamp = ctx.message.created_at
await dest.send(embed=embed)
await ctx.send(':ok_hand:')
def setup(bot):
bot.add_cog(Help(bot))
| mit |
GeekTrainer/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/flask/testsuite/regression.py | 563 | 3198 | # -*- coding: utf-8 -*-
"""
flask.testsuite.regression
~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests regressions.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import gc
import sys
import flask
import threading
import unittest
from werkzeug.exceptions import NotFound
from flask.testsuite import FlaskTestCase
_gc_lock = threading.Lock()
class _NoLeakAsserter(object):
def __init__(self, testcase):
self.testcase = testcase
def __enter__(self):
gc.disable()
_gc_lock.acquire()
loc = flask._request_ctx_stack._local
# Force Python to track this dictionary at all times.
# This is necessary since Python only starts tracking
# dicts if they contain mutable objects. It's a horrible,
# horrible hack but makes this kinda testable.
loc.__storage__['FOOO'] = [1, 2, 3]
gc.collect()
self.old_objects = len(gc.get_objects())
def __exit__(self, exc_type, exc_value, tb):
if not hasattr(sys, 'getrefcount'):
gc.collect()
new_objects = len(gc.get_objects())
if new_objects > self.old_objects:
self.testcase.fail('Example code leaked')
_gc_lock.release()
gc.enable()
class MemoryTestCase(FlaskTestCase):
def assert_no_leak(self):
return _NoLeakAsserter(self)
def test_memory_consumption(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('simple_template.html', whiskey=42)
def fire():
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, b'<h1>42</h1>')
# Trigger caches
fire()
# This test only works on CPython 2.7.
if sys.version_info >= (2, 7) and \
not hasattr(sys, 'pypy_translation_info'):
with self.assert_no_leak():
for x in range(10):
fire()
def test_safe_join_toplevel_pardir(self):
from flask.helpers import safe_join
with self.assert_raises(NotFound):
safe_join('/foo', '..')
class ExceptionTestCase(FlaskTestCase):
def test_aborting(self):
class Foo(Exception):
whatever = 42
app = flask.Flask(__name__)
app.testing = True
@app.errorhandler(Foo)
def handle_foo(e):
return str(e.whatever)
@app.route('/')
def index():
raise flask.abort(flask.redirect(flask.url_for('test')))
@app.route('/test')
def test():
raise Foo()
with app.test_client() as c:
rv = c.get('/')
self.assertEqual(rv.headers['Location'], 'http://localhost/test')
rv = c.get('/test')
self.assertEqual(rv.data, b'42')
def suite():
suite = unittest.TestSuite()
if os.environ.get('RUN_FLASK_MEMORY_TESTS') == '1':
suite.addTest(unittest.makeSuite(MemoryTestCase))
suite.addTest(unittest.makeSuite(ExceptionTestCase))
return suite
| apache-2.0 |
ceb8/astroquery | astroquery/utils/tap/conn/tests/DummyConnHandler.py | 2 | 5631 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
=============
TAP plus
=============
@author: Juan Carlos Segovia
@contact: [email protected]
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
Created on 30 jun. 2016
"""
from astroquery.utils.tap import taputils
from six.moves.urllib.parse import urlencode
import requests
class DummyConnHandler:
def __init__(self):
self.request = None
self.data = None
self.fileExt = ".ext"
self.defaultResponse = None
self.responses = {}
self.errorFileOutput = None
self.errorReceivedResponse = None
self.contentType = None
self.verbose = None
self.query = None
self.fileOutput = None
def set_default_response(self, defaultResponse):
self.defaultResponse = defaultResponse
def get_default_response(self):
return self.defaultResponse
def get_last_request(self):
return self.request
def get_last_data(self):
return self.data
def get_last_query(self):
return self.query
def get_error_file_output(self):
return self.errorFileOutput
def get_error_received_response(self):
return self.errorReceivedResponse
def set_response(self, request, response):
self.responses[str(request)] = response
def execute_tapget(self, request=None, verbose=False):
return self.__execute_get(request, verbose)
def execute_dataget(self, query, verbose=False):
return self.__execute_get(query)
def execute_datalinkget(self, subcontext, query, verbose=False):
self.query = query
return self.__execute_get(subcontext, verbose)
def __execute_get(self, request, verbose):
self.request = request
self.verbose = verbose
return self.__get_response(request)
def execute_tappost(self, subcontext=None, data=None,
content_type=None, verbose=False):
return self.__execute_post(subcontext, data, content_type, verbose)
def execute_datapost(self, data=None, content_type=None, verbose=False):
return self.__execute_post("", data, content_type, verbose)
def execute_datalinkpost(self, subcontext=None, data=None,
content_type=None, verbose=False):
return self.__execute_post(subcontext, data, content_type, verbose)
def __execute_post(self, subcontext=None, data=None,
content_type=None, verbose=False):
self.data = data
self.contentType = content_type
self.verbose = verbose
sortedKey = self.__create_sorted_dict_key(data)
if subcontext.find('?') == -1:
self.request = f"{subcontext}?{sortedKey}"
else:
if subcontext.endswith('?'):
self.request = f"{subcontext}{sortedKey}"
else:
self.request = f"{subcontext}&{sortedKey}"
return self.__get_response(self.request)
def dump_to_file(self, fileOutput, response):
self.errorFileOutput = fileOutput
self.errorReceivedResponse = response
print(f"DummyConnHandler - dump to file: file: '{fileOutput}', \
response status: {response.status}, response msg: {response.reason}")
def __get_response(self, responseid):
try:
return self.responses[str(responseid)]
except KeyError as e:
if self.defaultResponse is not None:
return self.defaultResponse
else:
print(f"\nNot found response for key\n\t'{responseid}'")
print("Available keys: ")
if self.responses is None:
print("\tNone available")
else:
for k in self.responses.keys():
print(f"\t'{k}'")
raise (e)
def __create_sorted_dict_key(self, data):
dictTmp = {}
items = data.split('&')
for i in (items):
subItems = i.split('=')
dictTmp[subItems[0]] = subItems[1]
# sort dict
return taputils.taputil_create_sorted_dict_key(dictTmp)
def check_launch_response_status(self, response, debug,
expected_response_status,
raise_exception=True):
isError = False
if response.status != expected_response_status:
if debug:
print(f"ERROR: {response.status}: {response.reason}")
isError = True
if isError and raise_exception:
errMsg = taputils.get_http_response_error(response)
print(response.status, errMsg)
raise requests.exceptions.HTTPError(errMsg)
else:
return isError
def url_encode(self, data):
return urlencode(data)
def get_suitable_extension(self, headers):
return self.fileExt
def set_suitable_extension(self, ext):
self.fileExt = ext
def get_suitable_extension_by_format(self, output_format):
return self.fileExt
def get_file_from_header(self, headers):
return self.fileOutput
def find_header(self, headers, key):
return taputils.taputil_find_header(headers, key)
def execute_table_edit(self, data,
content_type="application/x-www-form-urlencoded",
verbose=False):
return self.__execute_post(subcontext="tableEdit", data=data,
content_type=content_type, verbose=verbose)
| bsd-3-clause |
thadeaus/wesnoth | scons/python_devel.py | 49 | 1381 | # vi: syntax=python:et:ts=4
import sys, os
from config_check_utils import backup_env, restore_env
import distutils.sysconfig
def exists():
return True
def PythonExtension(env, target, source, **kv):
return env.SharedLibrary(target, source, SHLIBPREFIX='', SHLIBSUFFIX=distutils.sysconfig.get_config_var("SO"), **kv)
def generate(env):
env.AddMethod(PythonExtension)
def CheckPython(context):
env = context.env
backup = backup_env(env, ["CPPPATH", "LIBPATH", "LIBS"])
context.Message("Checking for Python... ")
env.AppendUnique(CPPPATH = distutils.sysconfig.get_python_inc())
version = distutils.sysconfig.get_config_var("VERSION")
if not version:
version = sys.version[:3]
if env["PLATFORM"] == "win32":
version = version.replace('.', '')
env.AppendUnique(LIBPATH = distutils.sysconfig.get_config_var("LIBDIR") or \
os.path.join(distutils.sysconfig.get_config_var("prefix"), "libs") )
env.AppendUnique(LIBS = "python" + version)
test_program = """
#include <Python.h>
int main()
{
Py_Initialize();
}
\n"""
if context.TryLink(test_program, ".c"):
context.Result("yes")
return True
else:
context.Result("no")
restore_env(context.env, backup)
return False
config_checks = { "CheckPython" : CheckPython }
| gpl-2.0 |
msabramo/ansible | lib/ansible/modules/packaging/os/pkgng.py | 9 | 12668 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, bleader
# Written by bleader <[email protected]>
# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkgng
short_description: Package manager for FreeBSD >= 9.0
description:
- Manage binary packages for FreeBSD using 'pkgng' which
is available in versions after 9.0.
version_added: "1.2"
options:
name:
description:
- Name of package to install/remove.
required: true
state:
description:
- State of the package.
choices: [ 'present', 'absent' ]
required: false
default: present
cached:
description:
- Use local package base instead of fetching an updated one.
choices: [ 'yes', 'no' ]
required: false
default: no
annotation:
description:
- A comma-separated list of keyvalue-pairs of the form
C(<+/-/:><key>[=<value>]). A C(+) denotes adding an annotation, a
C(-) denotes removing an annotation, and C(:) denotes modifying an
annotation.
If setting or modifying annotations, a value must be provided.
required: false
version_added: "1.6"
pkgsite:
description:
- For pkgng versions before 1.1.4, specify packagesite to use
for downloading packages. If not specified, use settings from
C(/usr/local/etc/pkg.conf).
- For newer pkgng versions, specify a the name of a repository
configured in C(/usr/local/etc/pkg/repos).
required: false
rootdir:
description:
- For pkgng versions 1.5 and later, pkg will install all packages
within the specified root directory.
- Can not be used together with I(chroot) option.
required: false
chroot:
version_added: "2.1"
description:
- Pkg will chroot in the specified environment.
- Can not be used together with I(rootdir) option.
required: false
autoremove:
version_added: "2.2"
description:
- Remove automatically installed packages which are no longer needed.
required: false
choices: [ "yes", "no" ]
default: no
author: "bleader (@bleader)"
notes:
- When using pkgsite, be careful that already in cache packages won't be downloaded again.
'''
EXAMPLES = '''
# Install package foo
- pkgng:
name: foo
state: present
# Annotate package foo and bar
- pkgng:
name: foo,bar
annotation: '+test1=baz,-test2,:test3=foobar'
# Remove packages foo and bar
- pkgng:
name: foo,bar
state: absent
'''
import re
from ansible.module_utils.basic import AnsibleModule
def query_package(module, pkgng_path, name, dir_arg):
rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, dir_arg, name))
if rc == 0:
return True
return False
def pkgng_older_than(module, pkgng_path, compare_version):
rc, out, err = module.run_command("%s -v" % pkgng_path)
version = [int(x) for x in re.split(r'[\._]', out)]
i = 0
new_pkgng = True
while compare_version[i] == version[i]:
i += 1
if i == min(len(compare_version), len(version)):
break
else:
if compare_version[i] > version[i]:
new_pkgng = False
return not new_pkgng
def remove_packages(module, pkgng_path, packages, dir_arg):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, dir_arg, package))
if not module.check_mode and query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
return (True, "removed %s package(s)" % remove_c)
return (False, "package(s) already absent")
def install_packages(module, pkgng_path, packages, cached, pkgsite, dir_arg):
install_c = 0
# as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
# in /usr/local/etc/pkg/repos
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
if pkgsite != "":
if old_pkgng:
pkgsite = "PACKAGESITE=%s" % (pkgsite)
else:
pkgsite = "-r %s" % (pkgsite)
# This environment variable skips mid-install prompts,
# setting them to their default values.
batch_var = 'env BATCH=yes'
if not module.check_mode and not cached:
if old_pkgng:
rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
else:
rc, out, err = module.run_command("%s %s update" % (pkgng_path, dir_arg))
if rc != 0:
module.fail_json(msg="Could not update catalogue")
for package in packages:
if query_package(module, pkgng_path, package, dir_arg):
continue
if not module.check_mode:
if old_pkgng:
rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package))
else:
rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, dir_arg, pkgsite, package))
if not module.check_mode and not query_package(module, pkgng_path, package, dir_arg):
module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err)
install_c += 1
if install_c > 0:
return (True, "added %s package(s)" % (install_c))
return (False, "package(s) already present")
def annotation_query(module, pkgng_path, package, tag, dir_arg):
rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, dir_arg, package))
match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
if match:
return match.group('value')
return False
def annotation_add(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not _value:
# Annotation does not exist, add it.
rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json(msg="could not annotate %s: %s"
% (package, out), stderr=err)
return True
elif _value != value:
# Annotation exists, but value differs
module.fail_json(
mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
% (package, tag, _value, value))
return False
else:
# Annotation exists, nothing to do
return False
def annotation_delete(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if _value:
rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
% (pkgng_path, dir_arg, package, tag))
if rc != 0:
module.fail_json(msg="could not delete annotation to %s: %s"
% (package, out), stderr=err)
return True
return False
def annotation_modify(module, pkgng_path, package, tag, value, dir_arg):
_value = annotation_query(module, pkgng_path, package, tag, dir_arg)
if not value:
# No such tag
module.fail_json(msg="could not change annotation to %s: tag %s does not exist"
% (package, tag))
elif _value == value:
# No change in value
return False
else:
rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"'
% (pkgng_path, dir_arg, package, tag, value))
if rc != 0:
module.fail_json(msg="could not change annotation annotation to %s: %s"
% (package, out), stderr=err)
return True
def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
annotate_c = 0
annotations = map(lambda _annotation:
re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
_annotation).groupdict(),
re.split(r',', annotation))
operation = {
'+': annotation_add,
'-': annotation_delete,
':': annotation_modify
}
for package in packages:
for _annotation in annotations:
if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
annotate_c += 1
if annotate_c > 0:
return (True, "added %s annotations." % annotate_c)
return (False, "changed no annotations")
def autoremove_packages(module, pkgng_path, dir_arg):
rc, out, err = module.run_command("%s %s autoremove -n" % (pkgng_path, dir_arg))
autoremove_c = 0
match = re.search('^Deinstallation has been requested for the following ([0-9]+) packages', out, re.MULTILINE)
if match:
autoremove_c = int(match.group(1))
if autoremove_c == 0:
return False, "no package(s) to autoremove"
if not module.check_mode:
rc, out, err = module.run_command("%s %s autoremove -y" % (pkgng_path, dir_arg))
return True, "autoremoved %d package(s)" % (autoremove_c)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"], required=False),
name = dict(aliases=["pkg"], required=True, type='list'),
cached = dict(default=False, type='bool'),
annotation = dict(default="", required=False),
pkgsite = dict(default="", required=False),
rootdir = dict(default="", required=False, type='path'),
chroot = dict(default="", required=False, type='path'),
autoremove = dict(default=False, type='bool')),
supports_check_mode = True,
mutually_exclusive =[["rootdir", "chroot"]])
pkgng_path = module.get_bin_path('pkg', True)
p = module.params
pkgs = p["name"]
changed = False
msgs = []
dir_arg = ""
if p["rootdir"] != "":
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
if old_pkgng:
module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
else:
dir_arg = "--rootdir %s" % (p["rootdir"])
if p["chroot"] != "":
dir_arg = '--chroot %s' % (p["chroot"])
if p["state"] == "present":
_changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
elif p["state"] == "absent":
_changed, _msg = remove_packages(module, pkgng_path, pkgs, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["autoremove"]:
_changed, _msg = autoremove_packages(module, pkgng_path, dir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["annotation"]:
_changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], dir_arg)
changed = changed or _changed
msgs.append(_msg)
module.exit_json(changed=changed, msg=", ".join(msgs))
if __name__ == '__main__':
main()
| gpl-3.0 |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/indicator/_legendgrouptitle.py | 1 | 4710 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Legendgrouptitle(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "indicator"
_path_str = "indicator.legendgrouptitle"
_valid_props = {"font", "text"}
# font
# ----
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.indicator.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super(Legendgrouptitle, self).__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.Legendgrouptitle`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
superdump/cerbero | cerbero/commands/genxcconfig.py | 29 | 2437 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.commands import Command, register_command
from cerbero.errors import UsageError
from cerbero.ide.xcode.xcconfig import XCConfig
from cerbero.utils import _, N_, ArgparseArgument
from cerbero.utils import messages as m
class GenXCodeConfig(Command):
doc = N_('Generate XCode config files to use the SDK from VS')
name = 'genxcconfig'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('-o', '--output_dir', default='.',
help=_('output directory where .xcconfig files will be saved')),
ArgparseArgument('-f', '--filename', default=None,
help=_('filename of the .xcconfig file')),
ArgparseArgument('libraries', nargs='*',
help=_('List of libraries to include')),
])
def run(self, config, args):
self.runargs(config, args.output_dir, args.filename, args.libraries)
def runargs(self, config, output_dir, filename, libraries):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if len(libraries) == 0:
raise UsageError("You need to specify at least one library name")
filename = filename or libraries[0]
filepath = os.path.join(output_dir, '%s.xcconfig' % filename)
xcconfig = XCConfig(libraries)
xcconfig.create(filepath)
m.action('Created %s.xcconfig' % filename)
m.message('XCode config file were sucessfully created in %s' %
os.path.abspath(filepath))
register_command(GenXCodeConfig)
| lgpl-2.1 |
HyperBaton/ansible | lib/ansible/modules/cloud/google/gce_snapshot.py | 29 | 6899 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_snapshot
version_added: "2.3"
short_description: Create or destroy snapshots for GCE storage volumes
description:
- Manages snapshots for GCE instances. This module manages snapshots for
the storage volumes of a GCE compute instance. If there are multiple
volumes, each snapshot will be prepended with the disk name
options:
instance_name:
description:
- The GCE instance to snapshot
required: True
snapshot_name:
description:
- The name of the snapshot to manage
disks:
description:
- A list of disks to create snapshots for. If none is provided,
all of the volumes will be snapshotted
default: all
required: False
state:
description:
- Whether a snapshot should be C(present) or C(absent)
required: false
default: present
choices: [present, absent]
service_account_email:
description:
- GCP service account email for the project where the instance resides
required: true
credentials_file:
description:
- The path to the credentials file associated with the service account
required: true
project_id:
description:
- The GCP project ID to use
required: true
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.19.0"
author: Rob Wagner (@robwagner33)
'''
EXAMPLES = '''
- name: Create gce snapshot
gce_snapshot:
instance_name: example-instance
snapshot_name: example-snapshot
state: present
service_account_email: [email protected]
credentials_file: /path/to/credentials
project_id: project_name
delegate_to: localhost
- name: Delete gce snapshot
gce_snapshot:
instance_name: example-instance
snapshot_name: example-snapshot
state: absent
service_account_email: [email protected]
credentials_file: /path/to/credentials
project_id: project_name
delegate_to: localhost
# This example creates snapshots for only two of the available disks as
# disk0-example-snapshot and disk1-example-snapshot
- name: Create snapshots of specific disks
gce_snapshot:
instance_name: example-instance
snapshot_name: example-snapshot
state: present
disks:
- disk0
- disk1
service_account_email: [email protected]
credentials_file: /path/to/credentials
project_id: project_name
delegate_to: localhost
'''
RETURN = '''
snapshots_created:
description: List of newly created snapshots
returned: When snapshots are created
type: list
sample: "[disk0-example-snapshot, disk1-example-snapshot]"
snapshots_deleted:
description: List of destroyed snapshots
returned: When snapshots are deleted
type: list
sample: "[disk0-example-snapshot, disk1-example-snapshot]"
snapshots_existing:
description: List of snapshots that already existed (no-op)
returned: When snapshots were already present
type: list
sample: "[disk0-example-snapshot, disk1-example-snapshot]"
snapshots_absent:
description: List of snapshots that were already absent (no-op)
returned: When snapshots were already absent
type: list
sample: "[disk0-example-snapshot, disk1-example-snapshot]"
'''
try:
from libcloud.compute.types import Provider
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
def find_snapshot(volume, name):
'''
Check if there is a snapshot already created with the given name for
the passed in volume.
Args:
volume: A gce StorageVolume object to manage
name: The name of the snapshot to look for
Returns:
The VolumeSnapshot object if one is found
'''
found_snapshot = None
snapshots = volume.list_snapshots()
for snapshot in snapshots:
if name == snapshot.name:
found_snapshot = snapshot
return found_snapshot
def main():
module = AnsibleModule(
argument_spec=dict(
instance_name=dict(required=True),
snapshot_name=dict(required=True),
state=dict(choices=['present', 'absent'], default='present'),
disks=dict(default=None, type='list'),
service_account_email=dict(type='str'),
credentials_file=dict(type='path'),
project_id=dict(type='str')
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.19.0+) is required for this module')
gce = gce_connect(module)
instance_name = module.params.get('instance_name')
snapshot_name = module.params.get('snapshot_name')
disks = module.params.get('disks')
state = module.params.get('state')
json_output = dict(
changed=False,
snapshots_created=[],
snapshots_deleted=[],
snapshots_existing=[],
snapshots_absent=[]
)
snapshot = None
instance = gce.ex_get_node(instance_name, 'all')
instance_disks = instance.extra['disks']
for instance_disk in instance_disks:
disk_snapshot_name = snapshot_name
disk_info = gce._get_components_from_path(instance_disk['source'])
device_name = disk_info['name']
device_zone = disk_info['zone']
if disks is None or device_name in disks:
volume_obj = gce.ex_get_volume(device_name, device_zone)
# If we have more than one disk to snapshot, prepend the disk name
if len(instance_disks) > 1:
disk_snapshot_name = device_name + "-" + disk_snapshot_name
snapshot = find_snapshot(volume_obj, disk_snapshot_name)
if snapshot and state == 'present':
json_output['snapshots_existing'].append(disk_snapshot_name)
elif snapshot and state == 'absent':
snapshot.destroy()
json_output['changed'] = True
json_output['snapshots_deleted'].append(disk_snapshot_name)
elif not snapshot and state == 'present':
volume_obj.snapshot(disk_snapshot_name)
json_output['changed'] = True
json_output['snapshots_created'].append(disk_snapshot_name)
elif not snapshot and state == 'absent':
json_output['snapshots_absent'].append(disk_snapshot_name)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| gpl-3.0 |
tonybaloney/st2contrib | packs/aws/actions/lib/action.py | 3 | 4279 | import re
import eventlet
import importlib
import boto.ec2
import boto.route53
import boto.vpc
from st2actions.runners.pythonrunner import Action
from ec2parsers import ResultSets
class BaseAction(Action):
def __init__(self, config):
super(BaseAction, self).__init__(config)
self.credentials = {
'region': None,
'aws_access_key_id': None,
'aws_secret_access_key': None
}
if config['st2_user_data']:
with open(config['st2_user_data'], 'r') as fp:
self.userdata = fp.read()
else:
self.userdata = None
# Note: In old static config credentials and region are under "setup" key and with a new
# dynamic config values are top-level
access_key_id = config.get('aws_access_key_id', None)
secret_access_key = config.get('aws_secret_access_key', None)
region = config.get('region', None)
if access_key_id and secret_access_key:
self.credentials['aws_access_key_id'] = access_key_id
self.credentials['aws_secret_access_key'] = secret_access_key
self.credentials['region'] = region
else:
# Assume old-style config
self.credentials = config['setup']
self.resultsets = ResultSets()
def ec2_connect(self):
region = self.credentials['region']
del self.credentials['region']
return boto.ec2.connect_to_region(region, **self.credentials)
def vpc_connect(self):
region = self.credentials['region']
del self.credentials['region']
return boto.vpc.connect_to_region(region, **self.credentials)
def r53_connect(self):
del self.credentials['region']
return boto.route53.connection.Route53Connection(**self.credentials)
def get_r53zone(self, zone):
conn = self.r53_connect()
return conn.get_zone(zone)
def st2_user_data(self):
return self.userdata
def split_tags(self, tags):
tag_dict = {}
split_tags = tags.split(',')
for tag in split_tags:
if re.search('=', tag):
k, v = tag.split('=', 1)
tag_dict[k] = v
return tag_dict
def wait_for_state(self, instance_id, state, timeout=10, retries=3):
state_list = {}
obj = self.ec2_connect()
eventlet.sleep(timeout)
instance_list = []
for _ in range(retries + 1):
try:
instance_list = obj.get_only_instances([instance_id, ])
except Exception:
self.logger.info("Waiting for instance to become available")
eventlet.sleep(timeout)
for instance in instance_list:
try:
current_state = instance.update()
except Exception, e:
self.logger.info("Instance (%s) not listed. Error: %s" %
(instance_id, e))
eventlet.sleep(timeout)
while current_state != state:
current_state = instance.update()
state_list[instance_id] = current_state
return state_list
def do_method(self, module_path, cls, action, **kwargs):
module = importlib.import_module(module_path)
# hack to connect to correct region
if cls == 'EC2Connection':
obj = self.ec2_connect()
elif cls == 'VPCConnection':
obj = self.vpc_connect()
elif module_path == 'boto.route53.zone' and cls == 'Zone':
zone = kwargs['zone']
del kwargs['zone']
obj = self.get_r53zone(zone)
else:
del self.credentials['region']
obj = getattr(module, cls)(**self.credentials)
if not obj:
raise ValueError('Invalid or missing credentials (aws_access_key_id,'
'aws_secret_access_key) or region')
resultset = getattr(obj, action)(**kwargs)
formatted = self.resultsets.formatter(resultset)
return formatted if isinstance(formatted, list) else [formatted]
def do_function(self, module_path, action, **kwargs):
module = __import__(module_path)
return getattr(module, action)(**kwargs)
| apache-2.0 |
rectory-school/rectory-apps | seating_charts/migrations/0001_initial.py | 1 | 12937 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('academics', '0022_auto_20160203_1038'),
]
operations = [
migrations.CreateModel(
name='Ethnicity',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('ethnicity', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='HistoricalEthnicity',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('ethnicity', models.CharField(max_length=200)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical ethnicity',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalMealTime',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('name', models.CharField(max_length=200)),
('include_boarding_students', models.BooleanField(default=False)),
('include_day_students', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical meal time',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalPinnedStudent',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical pinned student',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalSeatFiller',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('description', models.CharField(blank=True, max_length=200)),
('seats', models.IntegerField()),
('display', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical seat filler',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalTable',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('description', models.CharField(max_length=200)),
('capacity', models.IntegerField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical table',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalTableAssignment',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('waitor', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical table assignment',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='Layout',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='MealTime',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=200)),
('include_boarding_students', models.BooleanField(default=False)),
('include_day_students', models.BooleanField(default=False)),
('include_grades', models.ManyToManyField(to='academics.Grade')),
],
),
migrations.CreateModel(
name='PinnedStudent',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('meal_time', models.ForeignKey(to='seating_charts.MealTime')),
],
),
migrations.CreateModel(
name='SeatFiller',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('description', models.CharField(blank=True, max_length=200)),
('seats', models.IntegerField()),
('display', models.BooleanField(default=False)),
('meal_time', models.ManyToManyField(to='seating_charts.MealTime')),
],
),
migrations.CreateModel(
name='SeatingStudent',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('enrollment', models.ForeignKey(to='academics.Enrollment')),
('ethnicity', models.ForeignKey(null=True, to='seating_charts.Ethnicity')),
],
),
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('description', models.CharField(max_length=200)),
('capacity', models.IntegerField()),
('for_meals', models.ManyToManyField(to='seating_charts.MealTime')),
],
),
migrations.CreateModel(
name='TableAssignment',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('waitor', models.BooleanField(default=False)),
('meal_time', models.ForeignKey(to='seating_charts.MealTime')),
('student', models.ForeignKey(to='seating_charts.SeatingStudent')),
('table', models.ForeignKey(to='seating_charts.Table')),
],
options={
'permissions': (('view', 'Can view table assignments'), ('edit', 'Can edit table assignments')),
},
),
migrations.AddField(
model_name='seatfiller',
name='table',
field=models.ForeignKey(to='seating_charts.Table'),
),
migrations.AddField(
model_name='pinnedstudent',
name='student',
field=models.ForeignKey(to='seating_charts.SeatingStudent'),
),
migrations.AddField(
model_name='pinnedstudent',
name='table',
field=models.ForeignKey(to='seating_charts.Table'),
),
migrations.AddField(
model_name='layout',
name='left_print',
field=models.ForeignKey(related_name='+', to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='layout',
name='right_print',
field=models.ForeignKey(null=True, related_name='+', blank=True, to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='historicaltableassignment',
name='meal_time',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='historicaltableassignment',
name='student',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.SeatingStudent'),
),
migrations.AddField(
model_name='historicaltableassignment',
name='table',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.Table'),
),
migrations.AddField(
model_name='historicalseatfiller',
name='table',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.Table'),
),
migrations.AddField(
model_name='historicalpinnedstudent',
name='meal_time',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='historicalpinnedstudent',
name='student',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.SeatingStudent'),
),
migrations.AddField(
model_name='historicalpinnedstudent',
name='table',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.Table'),
),
migrations.AlterUniqueTogether(
name='tableassignment',
unique_together=set([('meal_time', 'student')]),
),
migrations.AlterUniqueTogether(
name='pinnedstudent',
unique_together=set([('student', 'meal_time')]),
),
]
| mit |
odooindia/odoo | addons/survey/__openerp__.py | 64 | 2413 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Survey',
'version': '2.0',
'category': 'Marketing',
'description': """
Create beautiful web surveys and visualize answers
==================================================
It depends on the answers or reviews of some questions by different users. A
survey may have multiple pages. Each page may contain multiple questions and
each question may have multiple answers. Different users may give different
answers of question and according to that survey is done. Partners are also
sent mails with personal token for the invitation of the survey.
""",
'summary': 'Create surveys, collect answers and print statistics',
'author': 'OpenERP SA',
'website': 'https://www.openerp.com/apps/survey/',
'depends': ['email_template', 'mail', 'website', 'marketing'],
'data': [
'security/survey_security.xml',
'security/ir.model.access.csv',
'views/survey_views.xml',
'views/survey_templates.xml',
'views/survey_result.xml',
'wizard/survey_email_compose_message.xml',
'data/survey_stages.xml',
'data/survey_cron.xml'
],
'demo': ['data/survey_demo_user.xml',
'data/survey_demo_feedback.xml',
'data/survey.user_input.csv',
'data/survey.user_input_line.csv'],
'installable': True,
'auto_install': False,
'application': True,
'sequence': 10,
'images': [],
}
| agpl-3.0 |
techdragon/django | django/contrib/messages/storage/fallback.py | 704 | 2172 | from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
| bsd-3-clause |
yoziru-desu/airflow | airflow/operators/http_operator.py | 7 | 2551 | import logging
from airflow.hooks import HttpHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults, AirflowException
class SimpleHttpOperator(BaseOperator):
"""
Calls an endpoint on an HTTP system to execute an action
:param http_conn_id: The connection to run the sensor against
:type http_conn_id: string
:param endpoint: The relative part of the full url
:type endpoint: string
:param method: The HTTP method to use, default = "POST"
:type method: string
:param data: The data to pass. POST-data in POST/PUT and params
in the URL for a GET request.
:type data: For POST/PUT, depends on the content-type parameter,
for GET a dictionary of key/value string pairs
:param headers: The HTTP headers to be added to the GET request
:type headers: a dictionary of string key/value pairs
:param response_check: A check against the 'requests' response object.
Returns True for 'pass' and False otherwise.
:type response_check: A lambda or defined function.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:type extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
"""
template_fields = ('endpoint','data',)
template_ext = ()
ui_color = '#f4a460'
@apply_defaults
def __init__(self,
endpoint,
method='POST',
data=None,
headers=None,
response_check=None,
extra_options=None,
http_conn_id='http_default', *args, **kwargs):
super(SimpleHttpOperator, self).__init__(*args, **kwargs)
self.http_conn_id = http_conn_id
self.method = method
self.endpoint = endpoint
self.headers = headers or {}
self.data = data or {}
self.response_check = response_check
self.extra_options = extra_options or {}
def execute(self, context):
http = HttpHook(self.method, http_conn_id=self.http_conn_id)
logging.info("Calling HTTP method")
response = http.run(self.endpoint,
self.data,
self.headers,
self.extra_options)
if self.response_check:
if not self.response_check(response):
raise AirflowException("Response check returned False.")
| apache-2.0 |
facaiy/spark | resource-managers/kubernetes/integration-tests/tests/worker_memory_check.py | 35 | 1624 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import resource
import sys
from pyspark.sql import SparkSession
if __name__ == "__main__":
"""
Usage: worker_memory_check [Memory_in_Mi]
"""
spark = SparkSession \
.builder \
.appName("PyMemoryTest") \
.getOrCreate()
sc = spark.sparkContext
if len(sys.argv) < 2:
print("Usage: worker_memory_check [Memory_in_Mi]", file=sys.stderr)
sys.exit(-1)
def f(x):
rLimit = resource.getrlimit(resource.RLIMIT_AS)
print("RLimit is " + str(rLimit))
return rLimit
resourceValue = sc.parallelize([1]).map(f).collect()[0][0]
print("Resource Value is " + str(resourceValue))
truthCheck = (resourceValue == int(sys.argv[1]))
print("PySpark Worker Memory Check is: " + str(truthCheck))
spark.stop()
| apache-2.0 |
georgewhewell/CouchPotatoServer | libs/sqlalchemy/dialects/firebird/kinterbasdb.py | 18 | 6572 | # firebird/kinterbasdb.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
The most common way to connect to a Firebird engine is implemented by
kinterbasdb__, currently maintained__ directly by the Firebird people.
The connection URL is of the form
``firebird[+kinterbasdb]://user:password@host:port/path/to/db[?key=value&key=value...]``.
Kinterbasedb backend specific keyword arguments are:
* type_conv - select the kind of mapping done on the types: by default
SQLAlchemy uses 200 with Unicode, datetime and decimal support (see
details__).
* concurrency_level - set the backend policy with regards to threading
issues: by default SQLAlchemy uses policy 1 (see details__).
* enable_rowcount - True by default, setting this to False disables
the usage of "cursor.rowcount" with the
Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically
after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
ResultProxy will return -1 for result.rowcount. The rationale here is
that Kinterbasdb requires a second round trip to the database when
.rowcount is called - since SQLA's resultproxy automatically closes
the cursor after a non-result-returning statement, rowcount must be
called, if at all, before the result object is returned. Additionally,
cursor.rowcount may not return correct results with older versions
of Firebird, and setting this flag to False will also cause the
SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a
per-execution basis using the `enable_rowcount` option with
:meth:`execution_options()`::
conn = engine.connect().execution_options(enable_rowcount=True)
r = conn.execute(stmt)
print r.rowcount
__ http://sourceforge.net/projects/kinterbasdb
__ http://firebirdsql.org/index.php?op=devel&sub=python
__ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation
__ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency
"""
from sqlalchemy.dialects.firebird.base import FBDialect, \
FBCompiler, FBExecutionContext
from sqlalchemy import util, types as sqltypes
from sqlalchemy.util.compat import decimal
from re import match
class _FBNumeric_kinterbasdb(sqltypes.Numeric):
def bind_processor(self, dialect):
def process(value):
if isinstance(value, decimal.Decimal):
return str(value)
else:
return value
return process
class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get('enable_rowcount',
self.dialect.enable_rowcount):
return self.cursor.rowcount
else:
return -1
class FBDialect_kinterbasdb(FBDialect):
driver = 'kinterbasdb'
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
execution_ctx_cls = FBExecutionContext_kinterbasdb
supports_native_decimal = True
colspecs = util.update_copy(
FBDialect.colspecs,
{
sqltypes.Numeric:_FBNumeric_kinterbasdb,
}
)
def __init__(self, type_conv=200, concurrency_level=1,
enable_rowcount=True, **kwargs):
super(FBDialect_kinterbasdb, self).__init__(**kwargs)
self.enable_rowcount = enable_rowcount
self.type_conv = type_conv
self.concurrency_level = concurrency_level
if enable_rowcount:
self.supports_sane_rowcount = True
@classmethod
def dbapi(cls):
k = __import__('kinterbasdb')
return k
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if opts.get('port'):
opts['host'] = "%s/%s" % (opts['host'], opts['port'])
del opts['port']
opts.update(url.query)
util.coerce_kw_type(opts, 'type_conv', int)
type_conv = opts.pop('type_conv', self.type_conv)
concurrency_level = opts.pop('concurrency_level',
self.concurrency_level)
if self.dbapi is not None:
initialized = getattr(self.dbapi, 'initialized', None)
if initialized is None:
# CVS rev 1.96 changed the name of the attribute:
# http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96
initialized = getattr(self.dbapi, '_initialized', False)
if not initialized:
self.dbapi.init(type_conv=type_conv,
concurrency_level=concurrency_level)
return ([], opts)
def _get_server_version_info(self, connection):
"""Get the version of the Firebird server used by a connection.
Returns a tuple of (`major`, `minor`, `build`), three integers
representing the version of the attached server.
"""
# This is the simpler approach (the other uses the services api),
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
return self._parse_version_info(version)
def _parse_version_info(self, version):
m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % version)
if m.group(5) != None:
return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird'])
else:
return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase'])
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or
'Invalid cursor state' in msg or
'connection shutdown' in msg)
else:
return False
dialect = FBDialect_kinterbasdb
| gpl-3.0 |
alexandrucoman/vbox-neutron-agent | neutron/plugins/ml2/config.py | 5 | 2670 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
ml2_opts = [
cfg.ListOpt('type_drivers',
default=['local', 'flat', 'vlan', 'gre', 'vxlan'],
help=_("List of network type driver entrypoints to be loaded "
"from the neutron.ml2.type_drivers namespace.")),
cfg.ListOpt('tenant_network_types',
default=['local'],
help=_("Ordered list of network_types to allocate as tenant "
"networks.")),
cfg.ListOpt('mechanism_drivers',
default=[],
help=_("An ordered list of networking mechanism driver "
"entrypoints to be loaded from the "
"neutron.ml2.mechanism_drivers namespace.")),
cfg.ListOpt('extension_drivers',
default=[],
help=_("An ordered list of extension driver "
"entrypoints to be loaded from the "
"neutron.ml2.extension_drivers namespace.")),
cfg.IntOpt('path_mtu', default=0,
help=_('The maximum permissible size of an unfragmented '
'packet travelling from and to addresses where '
'encapsulated Neutron traffic is sent. If <= 0, '
'the path MTU is indeterminate.')),
cfg.IntOpt('segment_mtu', default=0,
help=_('The maximum permissible size of an unfragmented '
'packet travelling a L2 network segment. If <= 0, the '
'segment MTU is indeterminate.')),
cfg.ListOpt('physical_network_mtus',
default=[],
help=_("A list of mappings of physical networks to MTU "
"values. The format of the mapping is "
"<physnet>:<mtu val>. This mapping allows "
"specifying a physical network MTU value that "
"differs from the default segment_mtu value.")),
]
cfg.CONF.register_opts(ml2_opts, "ml2")
| apache-2.0 |
sheshas/HGSHM | qemu-2.3.0-rc3/roms/u-boot/tools/reformat.py | 31 | 4371 | #! /usr/bin/python
########################################################################
#
# reorder and reformat a file in columns
#
# this utility takes lines from its standard input and reproduces them,
# partially reordered and reformatted, on its standard output.
#
# It has the same effect as a 'sort | column -t', with the exception
# that empty lines, as well as lines which start with a '#' sign, are
# not affected, i.e. they keep their position and formatting, and act
# as separators, i.e. the parts before and after them are each sorted
# separately (but overall field widths are computed across the whole
# input).
#
# Options:
# -i:
# --ignore-case:
# Do not consider case when sorting.
# -d:
# --default:
# What to chage empty fields to.
# -s <N>:
# --split=<N>:
# Treat only the first N whitespace sequences as separators.
# line content after the Nth separator will count as only one
# field even if it contains whitespace.
# Example : '-s 2' causes input 'a b c d e' to be split into
# three fields, 'a', 'b', and 'c d e'.
#
# boards.cfg requires -ids 6.
#
########################################################################
import sys, getopt, locale
# ensure we sort using the C locale.
locale.setlocale(locale.LC_ALL, 'C')
# check options
maxsplit = 0
ignore_case = 0
default_field =''
try:
opts, args = getopt.getopt(sys.argv[1:], "id:s:",
["ignore-case","default","split="])
except getopt.GetoptError as err:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
for o, a in opts:
if o in ("-s", "--split"):
maxsplit = eval(a)
elif o in ("-i", "--ignore-case"):
ignore_case = 1
elif o in ("-d", "--default"):
default_field = a
else:
assert False, "unhandled option"
# collect all lines from standard input and, for the ones which must be
# reformatted and sorted, count their fields and compute each field's
# maximum size
input_lines = []
field_width = []
for line in sys.stdin:
# remove final end of line
input_line = line.strip('\n')
if (len(input_line)>0) and (input_line[0] != '#'):
# sortable line: split into fields
fields = input_line.split(None,maxsplit)
# if there are new fields, top up field_widths
for f in range(len(field_width), len(fields)):
field_width.append(0)
# compute the maximum witdh of each field
for f in range(len(fields)):
field_width[f] = max(field_width[f],len(fields[f]))
# collect the line for next stage
input_lines.append(input_line)
# run through collected input lines, collect the ones which must be
# reformatted and sorted, and whenever a non-reformattable, non-sortable
# line is met, sort the collected lines before it and append them to the
# output lines, then add the non-sortable line too.
output_lines = []
sortable_lines = []
for input_line in input_lines:
if (len(input_line)>0) and (input_line[0] != '#'):
# this line should be reformatted and sorted
input_fields = input_line.split(None,maxsplit)
output_fields = [];
# reformat each field to this field's column width
for f in range(len(input_fields)):
output_field = input_fields[f];
output_fields.append(output_field.ljust(field_width[f]))
# any missing field is set to default if it exists
if default_field != '':
for f in range(len(input_fields),len(field_width)):
output_fields.append(default_field.ljust(field_width[f]))
# join fields using two spaces, like column -t would
output_line = ' '.join(output_fields);
# collect line for later
sortable_lines.append(output_line)
else:
# this line is non-sortable
# sort collected sortable lines
if ignore_case!=0:
sortable_lines.sort(key=lambda x: str.lower(locale.strxfrm(x)))
else:
sortable_lines.sort(key=lambda x: locale.strxfrm(x))
# append sortable lines to the final output
output_lines.extend(sortable_lines)
sortable_lines = []
# append non-sortable line to the final output
output_lines.append(input_line)
# maybe we had sortable lines pending, so append them to the final output
if ignore_case!=0:
sortable_lines.sort(key=lambda x: str.lower(locale.strxfrm(x)))
else:
sortable_lines.sort(key=lambda x: locale.strxfrm(x))
output_lines.extend(sortable_lines)
# run through output lines and print them, except rightmost whitespace
for output_line in output_lines:
print output_line.rstrip()
| gpl-2.0 |
smilecoin/smilecoin | contrib/testgen/base58.py | 2139 | 2818 | '''
Bitcoin base58 encoding and decoding.
Based on https://bitcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/bitcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| mit |
ConnorGBrewster/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/tests/test_serializer.py | 3 | 4547 | import sys
import unittest
import pytest
from .. import parser, serializer
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.serializer = serializer.ManifestSerializer()
self.parser = parser.Parser()
def serialize(self, input_str):
return self.serializer.serialize(self.parser.parse(input_str))
def compare(self, input_str, expected=None):
if expected is None:
expected = input_str
expected = expected.encode("utf8")
actual = self.serialize(input_str)
self.assertEquals(actual, expected)
def test_0(self):
self.compare("""key: value
[Heading 1]
other_key: other_value
""")
def test_1(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
""")
def test_2(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or b: other_value
fallback_value
""")
def test_3(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == 1: other_value
fallback_value
""")
def test_4(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "1": other_value
fallback_value
""")
def test_5(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[1]: other_value
fallback_value
""")
def test_6(self):
self.compare("""key: value
[Heading 1]
other_key:
if a == "abc"[c]: other_value
fallback_value
""")
def test_7(self):
self.compare("""key: value
[Heading 1]
other_key:
if (a or b) and c: other_value
fallback_value
""",
"""key: value
[Heading 1]
other_key:
if a or b and c: other_value
fallback_value
""")
def test_8(self):
self.compare("""key: value
[Heading 1]
other_key:
if a or (b and c): other_value
fallback_value
""")
def test_9(self):
self.compare("""key: value
[Heading 1]
other_key:
if not (a and b): other_value
fallback_value
""")
def test_10(self):
self.compare("""key: value
[Heading 1]
some_key: some_value
[Heading 2]
other_key: other_value
""")
def test_11(self):
self.compare("""key:
if not a and b and c and d: true
""")
def test_12(self):
self.compare("""[Heading 1]
key: [a:1, b:2]
""")
def test_13(self):
self.compare("""key: [a:1, "b:#"]
""")
def test_14(self):
self.compare("""key: [","]
""")
def test_15(self):
self.compare("""key: ,
""")
def test_16(self):
self.compare("""key: ["]", b]
""")
def test_17(self):
self.compare("""key: ]
""")
def test_18(self):
self.compare("""key: \]
""", """key: ]
""")
def test_escape_0(self):
self.compare(r"""k\t\:y: \a\b\f\n\r\t\v""",
r"""k\t\:y: \x07\x08\x0c\n\r\t\x0b
""")
def test_escape_1(self):
self.compare(r"""k\x00: \x12A\x45""",
r"""k\x00: \x12AE
""")
def test_escape_2(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_3(self):
self.compare(r"""k\u0045y: \u1234A\uABc6""",
u"""kEy: \u1234A\uabc6
""")
def test_escape_4(self):
self.compare(r"""key: '\u1234A\uABc6'""",
u"""key: \u1234A\uabc6
""")
def test_escape_5(self):
self.compare(r"""key: [\u1234A\uABc6]""",
u"""key: [\u1234A\uabc6]
""")
def test_escape_6(self):
self.compare(r"""key: [\u1234A\uABc6\,]""",
u"""key: ["\u1234A\uabc6,"]
""")
def test_escape_7(self):
self.compare(r"""key: [\,\]\#]""",
r"""key: [",]#"]
""")
def test_escape_8(self):
self.compare(r"""key: \#""",
r"""key: "#"
""")
@pytest.mark.xfail(sys.maxunicode == 0xFFFF, reason="narrow unicode")
def test_escape_9(self):
self.compare(r"""key: \U10FFFFabc""",
u"""key: \U0010FFFFabc
""")
def test_escape_10(self):
self.compare(r"""key: \u10FFab""",
u"""key: \u10FFab
""")
def test_escape_11(self):
self.compare(r"""key: \\ab
""")
def test_atom_1(self):
self.compare(r"""key: @True
""")
def test_atom_2(self):
self.compare(r"""key: @False
""")
def test_atom_3(self):
self.compare(r"""key: @Reset
""")
def test_atom_4(self):
self.compare(r"""key: [a, @Reset, b]
""")
| mpl-2.0 |
svenkreiss/databench | databench/cli.py | 1 | 5320 | #!/usr/bin/env python
"""Databench command line tool. See http://databench.trivial.io for
more info."""
from __future__ import absolute_import, print_function
from . import __version__ as DATABENCH_VERSION
import argparse
import logging
import os
import ssl
import sys
import tornado
def main(**kwargs):
"""Entry point to run databench."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version',
version='%(prog)s {}'.format(DATABENCH_VERSION))
parser.add_argument('--log', dest='loglevel', default="INFO",
type=str.upper,
help=('log level (info, warning, error, critical or '
'debug, default info)'))
parser.add_argument('--no-watch', dest='watch', default=True,
action='store_false',
help='do not watch and restart when files change')
parser.add_argument('--host', dest='host',
default=os.environ.get('HOST', '127.0.0.1'),
help='host address for webserver (default 127.0.0.1)')
parser.add_argument('--port', dest='port',
type=int, default=int(os.environ.get('PORT', 5000)),
help='port for webserver')
if not kwargs:
parser.add_argument('--analyses', default=None,
help='import path for analyses')
parser.add_argument('--build', default=False, action='store_true',
help='run the build command and exit')
parser.add_argument('--coverage', default=False,
help=argparse.SUPPRESS)
ssl_args = parser.add_argument_group('SSL')
ssl_args.add_argument('--ssl-certfile', dest='ssl_certfile',
default=os.environ.get('SSLCERTFILE'),
help='SSL certificate file')
ssl_args.add_argument('--ssl-keyfile', dest='ssl_keyfile',
default=os.environ.get('SSLKEYFILE'),
help='SSL key file')
ssl_args.add_argument('--ssl-port', dest='ssl_port', type=int,
default=int(os.environ.get('SSLPORT', 0)),
help='SSL port for webserver')
args, analyses_args = parser.parse_known_args()
# coverage
cov = None
if args.coverage:
import coverage
cov = coverage.Coverage(data_file=args.coverage, data_suffix=True)
cov.start()
# this is included here so that is included in coverage
from .app import App, SingleApp
# log
logging.basicConfig(level=getattr(logging, args.loglevel))
if args.loglevel != 'INFO':
logging.info('Set loglevel to {}.'.format(args.loglevel))
# show versions and setup
logging.info('Databench {}'.format(DATABENCH_VERSION))
if args.host in ('localhost', '127.0.0.1'):
logging.info('Open http://{}:{} in a web browser.'
''.format(args.host, args.port))
logging.debug('host={}, port={}'.format(args.host, args.port))
logging.debug('Python {}'.format(sys.version))
if analyses_args:
logging.debug('Arguments passed to analyses: {}'.format(analyses_args))
if not kwargs:
app = App(args.analyses, cli_args=analyses_args, debug=args.watch)
else:
app = SingleApp(cli_args=analyses_args, debug=args.watch, **kwargs)
# check whether this is just a quick build
if args.build:
logging.info('Build mode: only run build command and exit.')
app.build()
if cov:
cov.stop()
cov.save()
return
# HTTP server
tornado_app = app.tornado_app()
tornado_app.listen(args.port, args.host)
# HTTPS server
if args.ssl_port:
if args.ssl_certfile and args.ssl_keyfile:
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(args.ssl_certfile, args.ssl_keyfile)
else:
# use Tornado's self signed certificates
module_dir = os.path.dirname(tornado.__file__)
ssl_ctx = {
'certfile': os.path.join(module_dir, 'test', 'test.crt'),
'keyfile': os.path.join(module_dir, 'test', 'test.key'),
}
logging.info('Open https://{}:{} in a web browser.'
''.format(args.host, args.ssl_port))
tornado_app.listen(args.ssl_port, ssl_options=ssl_ctx)
try:
tornado.ioloop.IOLoop.current().start()
except KeyboardInterrupt:
tornado.ioloop.IOLoop.current().stop()
if cov:
cov.stop()
cov.save()
def run(analysis, path=None, name=None, info=None, **kwargs):
"""Run a single analysis.
:param Analysis analysis: Analysis class to run.
:param str path: Path of analysis. Can be `__file__`.
:param str name: Name of the analysis.
:param dict info: Optional entries are ``version``, ``title``,
``readme``, ...
:param dict static: Map[url regex, root-folder] to serve static content.
"""
kwargs.update({
'analysis': analysis,
'path': path,
'name': name,
'info': info,
})
main(**kwargs)
if __name__ == '__main__':
main()
| mit |
mangaki/mangaki | mangaki/mangaki/management/commands/retrieveposters.py | 1 | 2428 | from django.core.management.base import BaseCommand
from mangaki.models import Work
import requests
import time
class Command(BaseCommand):
help = 'Downloads posters'
def add_arguments(self, parser):
parser.add_argument('work_id', nargs='*', type=int)
parser.add_argument('--check-exists', action='store_true')
parser.add_argument('--ratelimit', type=int, default=10)
def handle(self, *args, **options):
qs = Work.objects.exclude(ext_poster='')
if options['work_id']:
qs = qs.filter(pk__in=options['work_id'])
nb_success = 0
failed = []
recent = []
with requests.Session() as s: # We use a session to use connection pooling
num_remaining = len(qs)
for work in qs:
if not (num_remaining % 10):
self.stdout.write('Remaining: {:d}'.format(num_remaining))
num_remaining -= 1
if work.int_poster:
if not options['check_exists']:
continue
if work.int_poster.storage.exists(work.int_poster.name):
continue
now = time.time()
while len(recent) >= options['ratelimit']:
now = time.time()
recent = [t for t in recent if now - t < 1.]
if len(recent) >= options['ratelimit']:
# We want to sleep:
# - No less than 0 seconds (as a sanity check)
# - No more than 1 second
# - Enough time so that when we wake up, the oldest
# time in recent was more than 1 second ago.
time.sleep(min(max(1.1 - now + recent[0], 0.), 1.))
recent.append(now)
if work.retrieve_poster(session=s):
nb_success += 1
else:
failed.append(work)
if nb_success:
self.stdout.write(self.style.SUCCESS(
'{:d} poster(s) successfully downloaded.'.format(nb_success)))
if failed:
self.stdout.write(self.style.ERROR('Some posters failed to download:'))
for work in failed:
self.stdout.write(self.style.ERROR(
' - {:s} ({:s})'.format(work.title, work.ext_poster)))
| agpl-3.0 |
fpy171/django | django/contrib/admin/checks.py | 186 | 38800 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import chain
from django.contrib.admin.utils import (
NotRelationField, flatten, get_fields_from_path,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.forms.models import (
BaseModelForm, BaseModelFormSet, _get_foreign_key,
)
def check_admin_app(**kwargs):
from django.contrib.admin.sites import system_check_errors
return system_check_errors
class BaseModelAdminChecks(object):
def check(self, admin_obj, **kwargs):
errors = []
errors.extend(self._check_raw_id_fields(admin_obj))
errors.extend(self._check_fields(admin_obj))
errors.extend(self._check_fieldsets(admin_obj))
errors.extend(self._check_exclude(admin_obj))
errors.extend(self._check_form(admin_obj))
errors.extend(self._check_filter_vertical(admin_obj))
errors.extend(self._check_filter_horizontal(admin_obj))
errors.extend(self._check_radio_fields(admin_obj))
errors.extend(self._check_prepopulated_fields(admin_obj))
errors.extend(self._check_view_on_site_url(admin_obj))
errors.extend(self._check_ordering(admin_obj))
errors.extend(self._check_readonly_fields(admin_obj))
return errors
def _check_raw_id_fields(self, obj):
""" Check that `raw_id_fields` only contains field names that are listed
on the model. """
if not isinstance(obj.raw_id_fields, (list, tuple)):
return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001')
else:
return list(chain(*[
self._check_raw_id_fields_item(obj, obj.model, field_name, 'raw_id_fields[%d]' % index)
for index, field_name in enumerate(obj.raw_id_fields)
]))
def _check_raw_id_fields_item(self, obj, model, field_name, label):
""" Check an item of `raw_id_fields`, i.e. check that field named
`field_name` exists in model `model` and is a ForeignKey or a
ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E002')
else:
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
return must_be('a ForeignKey or ManyToManyField',
option=label, obj=obj, id='admin.E003')
else:
return []
def _check_fields(self, obj):
""" Check that `fields` only refer to existing fields, doesn't contain
duplicates. Check if at most one of `fields` and `fieldsets` is defined.
"""
if obj.fields is None:
return []
elif not isinstance(obj.fields, (list, tuple)):
return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004')
elif obj.fieldsets:
return [
checks.Error(
"Both 'fieldsets' and 'fields' are specified.",
hint=None,
obj=obj.__class__,
id='admin.E005',
)
]
fields = flatten(obj.fields)
if len(fields) != len(set(fields)):
return [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=obj.__class__,
id='admin.E006',
)
]
return list(chain(*[
self._check_field_spec(obj, obj.model, field_name, 'fields')
for field_name in obj.fields
]))
def _check_fieldsets(self, obj):
""" Check that fieldsets is properly formatted and doesn't contain
duplicates. """
if obj.fieldsets is None:
return []
elif not isinstance(obj.fieldsets, (list, tuple)):
return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007')
else:
return list(chain(*[
self._check_fieldsets_item(obj, obj.model, fieldset, 'fieldsets[%d]' % index)
for index, fieldset in enumerate(obj.fieldsets)
]))
def _check_fieldsets_item(self, obj, model, fieldset, label):
""" Check an item of `fieldsets`, i.e. check that this is a pair of a
set name and a dictionary containing "fields" key. """
if not isinstance(fieldset, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E008')
elif len(fieldset) != 2:
return must_be('of length 2', option=label, obj=obj, id='admin.E009')
elif not isinstance(fieldset[1], dict):
return must_be('a dictionary', option='%s[1]' % label, obj=obj, id='admin.E010')
elif 'fields' not in fieldset[1]:
return [
checks.Error(
"The value of '%s[1]' must contain the key 'fields'." % label,
hint=None,
obj=obj.__class__,
id='admin.E011',
)
]
elif not isinstance(fieldset[1]['fields'], (list, tuple)):
return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=obj, id='admin.E008')
fields = flatten(fieldset[1]['fields'])
if len(fields) != len(set(fields)):
return [
checks.Error(
"There are duplicate field(s) in '%s[1]'." % label,
hint=None,
obj=obj.__class__,
id='admin.E012',
)
]
return list(chain(*[
self._check_field_spec(obj, model, fieldset_fields, '%s[1]["fields"]' % label)
for fieldset_fields in fieldset[1]['fields']
]))
def _check_field_spec(self, obj, model, fields, label):
""" `fields` should be an item of `fields` or an item of
fieldset[1]['fields'] for any `fieldset` in `fieldsets`. It should be a
field name or a tuple of field names. """
if isinstance(fields, tuple):
return list(chain(*[
self._check_field_spec_item(obj, model, field_name, "%s[%d]" % (label, index))
for index, field_name in enumerate(fields)
]))
else:
return self._check_field_spec_item(obj, model, fields, label)
def _check_field_spec_item(self, obj, model, field_name, label):
if field_name in obj.readonly_fields:
# Stuff can be put in fields that isn't actually a model field if
# it's in readonly_fields, readonly_fields will handle the
# validation of such things.
return []
else:
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# If we can't find a field on the model that matches, it could
# be an extra field on the form.
return []
else:
if (isinstance(field, models.ManyToManyField) and
not field.remote_field.through._meta.auto_created):
return [
checks.Error(
("The value of '%s' cannot include the ManyToManyField '%s', "
"because that field manually specifies a relationship model.")
% (label, field_name),
hint=None,
obj=obj.__class__,
id='admin.E013',
)
]
else:
return []
def _check_exclude(self, obj):
""" Check that exclude is a sequence without duplicates. """
if obj.exclude is None: # default value is None
return []
elif not isinstance(obj.exclude, (list, tuple)):
return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014')
elif len(obj.exclude) > len(set(obj.exclude)):
return [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=obj.__class__,
id='admin.E015',
)
]
else:
return []
def _check_form(self, obj):
""" Check that form subclasses BaseModelForm. """
if hasattr(obj, 'form') and not issubclass(obj.form, BaseModelForm):
return must_inherit_from(parent='BaseModelForm', option='form',
obj=obj, id='admin.E016')
else:
return []
def _check_filter_vertical(self, obj):
""" Check that filter_vertical is a sequence of field names. """
if not hasattr(obj, 'filter_vertical'):
return []
elif not isinstance(obj.filter_vertical, (list, tuple)):
return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017')
else:
return list(chain(*[
self._check_filter_item(obj, obj.model, field_name, "filter_vertical[%d]" % index)
for index, field_name in enumerate(obj.filter_vertical)
]))
def _check_filter_horizontal(self, obj):
""" Check that filter_horizontal is a sequence of field names. """
if not hasattr(obj, 'filter_horizontal'):
return []
elif not isinstance(obj.filter_horizontal, (list, tuple)):
return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018')
else:
return list(chain(*[
self._check_filter_item(obj, obj.model, field_name, "filter_horizontal[%d]" % index)
for index, field_name in enumerate(obj.filter_horizontal)
]))
def _check_filter_item(self, obj, model, field_name, label):
""" Check one item of `filter_vertical` or `filter_horizontal`, i.e.
check that given field exists and is a ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E019')
else:
if not isinstance(field, models.ManyToManyField):
return must_be('a ManyToManyField', option=label, obj=obj, id='admin.E020')
else:
return []
def _check_radio_fields(self, obj):
""" Check that `radio_fields` is a dictionary. """
if not hasattr(obj, 'radio_fields'):
return []
elif not isinstance(obj.radio_fields, dict):
return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021')
else:
return list(chain(*[
self._check_radio_fields_key(obj, obj.model, field_name, 'radio_fields') +
self._check_radio_fields_value(obj, val, 'radio_fields["%s"]' % field_name)
for field_name, val in obj.radio_fields.items()
]))
def _check_radio_fields_key(self, obj, model, field_name, label):
""" Check that a key of `radio_fields` dictionary is name of existing
field and that the field is a ForeignKey or has `choices` defined. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E022')
else:
if not (isinstance(field, models.ForeignKey) or field.choices):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an "
"instance of ForeignKey, and does not have a 'choices' definition." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E023',
)
]
else:
return []
def _check_radio_fields_value(self, obj, val, label):
""" Check type of a value of `radio_fields` dictionary. """
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if val not in (HORIZONTAL, VERTICAL):
return [
checks.Error(
"The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label,
hint=None,
obj=obj.__class__,
id='admin.E024',
)
]
else:
return []
def _check_view_on_site_url(self, obj):
if hasattr(obj, 'view_on_site'):
if not callable(obj.view_on_site) and not isinstance(obj.view_on_site, bool):
return [
checks.Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
hint=None,
obj=obj.__class__,
id='admin.E025',
)
]
else:
return []
else:
return []
def _check_prepopulated_fields(self, obj):
""" Check that `prepopulated_fields` is a dictionary containing allowed
field types. """
if not hasattr(obj, 'prepopulated_fields'):
return []
elif not isinstance(obj.prepopulated_fields, dict):
return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026')
else:
return list(chain(*[
self._check_prepopulated_fields_key(obj, obj.model, field_name, 'prepopulated_fields') +
self._check_prepopulated_fields_value(obj, obj.model, val, 'prepopulated_fields["%s"]' % field_name)
for field_name, val in obj.prepopulated_fields.items()
]))
def _check_prepopulated_fields_key(self, obj, model, field_name, label):
""" Check a key of `prepopulated_fields` dictionary, i.e. check that it
is a name of existing field and the field is one of the allowed types.
"""
forbidden_field_types = (
models.DateTimeField,
models.ForeignKey,
models.ManyToManyField
)
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E027')
else:
if isinstance(field, forbidden_field_types):
return [
checks.Error(
"The value of '%s' refers to '%s', which must not be a DateTimeField, "
"ForeignKey or ManyToManyField." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E028',
)
]
else:
return []
def _check_prepopulated_fields_value(self, obj, model, val, label):
""" Check a value of `prepopulated_fields` dictionary, i.e. it's an
iterable of existing fields. """
if not isinstance(val, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E029')
else:
return list(chain(*[
self._check_prepopulated_fields_value_item(obj, model, subfield_name, "%s[%r]" % (label, index))
for index, subfield_name in enumerate(val)
]))
def _check_prepopulated_fields_value_item(self, obj, model, field_name, label):
""" For `prepopulated_fields` equal to {"slug": ("title",)},
`field_name` is "title". """
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E030')
else:
return []
def _check_ordering(self, obj):
""" Check that ordering refers to existing fields or is random. """
# ordering = None
if obj.ordering is None: # The default value is None
return []
elif not isinstance(obj.ordering, (list, tuple)):
return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031')
else:
return list(chain(*[
self._check_ordering_item(obj, obj.model, field_name, 'ordering[%d]' % index)
for index, field_name in enumerate(obj.ordering)
]))
def _check_ordering_item(self, obj, model, field_name, label):
""" Check that `ordering` refers to existing fields. """
if field_name == '?' and len(obj.ordering) != 1:
return [
checks.Error(
("The value of 'ordering' has the random ordering marker '?', "
"but contains other fields as well."),
hint='Either remove the "?", or remove the other fields.',
obj=obj.__class__,
id='admin.E032',
)
]
elif field_name == '?':
return []
elif '__' in field_name:
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
return []
else:
if field_name.startswith('-'):
field_name = field_name[1:]
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E033')
else:
return []
def _check_readonly_fields(self, obj):
""" Check that readonly_fields refers to proper attribute or field. """
if obj.readonly_fields == ():
return []
elif not isinstance(obj.readonly_fields, (list, tuple)):
return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034')
else:
return list(chain(*[
self._check_readonly_fields_item(obj, obj.model, field_name, "readonly_fields[%d]" % index)
for index, field_name in enumerate(obj.readonly_fields)
]))
def _check_readonly_fields_item(self, obj, model, field_name, label):
if callable(field_name):
return []
elif hasattr(obj, field_name):
return []
elif hasattr(model, field_name):
return []
else:
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % (
label, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E035',
)
]
else:
return []
class ModelAdminChecks(BaseModelAdminChecks):
def check(self, admin_obj, **kwargs):
errors = super(ModelAdminChecks, self).check(admin_obj)
errors.extend(self._check_save_as(admin_obj))
errors.extend(self._check_save_on_top(admin_obj))
errors.extend(self._check_inlines(admin_obj))
errors.extend(self._check_list_display(admin_obj))
errors.extend(self._check_list_display_links(admin_obj))
errors.extend(self._check_list_filter(admin_obj))
errors.extend(self._check_list_select_related(admin_obj))
errors.extend(self._check_list_per_page(admin_obj))
errors.extend(self._check_list_max_show_all(admin_obj))
errors.extend(self._check_list_editable(admin_obj))
errors.extend(self._check_search_fields(admin_obj))
errors.extend(self._check_date_hierarchy(admin_obj))
return errors
def _check_save_as(self, obj):
""" Check save_as is a boolean. """
if not isinstance(obj.save_as, bool):
return must_be('a boolean', option='save_as',
obj=obj, id='admin.E101')
else:
return []
def _check_save_on_top(self, obj):
""" Check save_on_top is a boolean. """
if not isinstance(obj.save_on_top, bool):
return must_be('a boolean', option='save_on_top',
obj=obj, id='admin.E102')
else:
return []
def _check_inlines(self, obj):
""" Check all inline model admin classes. """
if not isinstance(obj.inlines, (list, tuple)):
return must_be('a list or tuple', option='inlines', obj=obj, id='admin.E103')
else:
return list(chain(*[
self._check_inlines_item(obj, obj.model, item, "inlines[%d]" % index)
for index, item in enumerate(obj.inlines)
]))
def _check_inlines_item(self, obj, model, inline, label):
""" Check one inline model admin. """
inline_label = '.'.join([inline.__module__, inline.__name__])
from django.contrib.admin.options import BaseModelAdmin
if not issubclass(inline, BaseModelAdmin):
return [
checks.Error(
"'%s' must inherit from 'BaseModelAdmin'." % inline_label,
hint=None,
obj=obj.__class__,
id='admin.E104',
)
]
elif not inline.model:
return [
checks.Error(
"'%s' must have a 'model' attribute." % inline_label,
hint=None,
obj=obj.__class__,
id='admin.E105',
)
]
elif not issubclass(inline.model, models.Model):
return must_be('a Model', option='%s.model' % inline_label,
obj=obj, id='admin.E106')
else:
return inline(model, obj.admin_site).check()
def _check_list_display(self, obj):
""" Check that list_display only contains fields or usable attributes.
"""
if not isinstance(obj.list_display, (list, tuple)):
return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107')
else:
return list(chain(*[
self._check_list_display_item(obj, obj.model, item, "list_display[%d]" % index)
for index, item in enumerate(obj.list_display)
]))
def _check_list_display_item(self, obj, model, item, label):
if callable(item):
return []
elif hasattr(obj, item):
return []
elif hasattr(model, item):
# getattr(model, item) could be an X_RelatedObjectsDescriptor
try:
field = model._meta.get_field(item)
except FieldDoesNotExist:
try:
field = getattr(model, item)
except AttributeError:
field = None
if field is None:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not a "
"callable, an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E108',
)
]
elif isinstance(field, models.ManyToManyField):
return [
checks.Error(
"The value of '%s' must not be a ManyToManyField." % label,
hint=None,
obj=obj.__class__,
id='admin.E109',
)
]
else:
return []
else:
try:
model._meta.get_field(item)
except FieldDoesNotExist:
return [
# This is a deliberate repeat of E108; there's more than one path
# required to test this condition.
checks.Error(
"The value of '%s' refers to '%s', which is not a callable, "
"an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E108',
)
]
else:
return []
def _check_list_display_links(self, obj):
""" Check that list_display_links is a unique subset of list_display.
"""
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110')
else:
return list(chain(*[
self._check_list_display_links_item(obj, field_name, "list_display_links[%d]" % index)
for index, field_name in enumerate(obj.list_display_links)
]))
def _check_list_display_links_item(self, obj, field_name, label):
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not defined in 'list_display'." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E111',
)
]
else:
return []
def _check_list_filter(self, obj):
if not isinstance(obj.list_filter, (list, tuple)):
return must_be('a list or tuple', option='list_filter', obj=obj, id='admin.E112')
else:
return list(chain(*[
self._check_list_filter_item(obj, obj.model, item, "list_filter[%d]" % index)
for index, item in enumerate(obj.list_filter)
]))
def _check_list_filter_item(self, obj, model, item, label):
"""
Check one item of `list_filter`, i.e. check if it is one of three options:
1. 'field' -- a basic field filter, possibly w/ relationships (e.g.
'field__rel')
2. ('field', SomeFieldListFilter) - a field-based list filter class
3. SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
return must_inherit_from(parent='ListFilter', option=label,
obj=obj, id='admin.E113')
# ... but not a FieldListFilter.
elif issubclass(item, FieldListFilter):
return [
checks.Error(
"The value of '%s' must not inherit from 'FieldListFilter'." % label,
hint=None,
obj=obj.__class__,
id='admin.E114',
)
]
else:
return []
elif isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label,
obj=obj, id='admin.E115')
else:
return []
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of '%s' refers to '%s', which does not refer to a Field." % (label, field),
hint=None,
obj=obj.__class__,
id='admin.E116',
)
]
else:
return []
def _check_list_select_related(self, obj):
""" Check that list_select_related is a boolean, a list or a tuple. """
if not isinstance(obj.list_select_related, (bool, list, tuple)):
return must_be('a boolean, tuple or list', option='list_select_related',
obj=obj, id='admin.E117')
else:
return []
def _check_list_per_page(self, obj):
""" Check that list_per_page is an integer. """
if not isinstance(obj.list_per_page, int):
return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118')
else:
return []
def _check_list_max_show_all(self, obj):
""" Check that list_max_show_all is an integer. """
if not isinstance(obj.list_max_show_all, int):
return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119')
else:
return []
def _check_list_editable(self, obj):
""" Check that list_editable is a sequence of editable fields from
list_display without first element. """
if not isinstance(obj.list_editable, (list, tuple)):
return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120')
else:
return list(chain(*[
self._check_list_editable_item(obj, obj.model, item, "list_editable[%d]" % index)
for index, item in enumerate(obj.list_editable)
]))
def _check_list_editable_item(self, obj, model, field_name, label):
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E121')
else:
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not "
"contained in 'list_display'." % (label, field_name),
hint=None,
obj=obj.__class__,
id='admin.E122',
)
]
elif obj.list_display_links and field_name in obj.list_display_links:
return [
checks.Error(
"The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name,
hint=None,
obj=obj.__class__,
id='admin.E123',
)
]
# Check that list_display_links is set, and that the first values of list_editable and list_display are
# not the same. See ticket #22792 for the use case relating to this.
elif (obj.list_display[0] in obj.list_editable and obj.list_display[0] != obj.list_editable[0] and
obj.list_display_links is not None):
return [
checks.Error(
"The value of '%s' refers to the first field in 'list_display' ('%s'), "
"which cannot be used unless 'list_display_links' is set." % (
label, obj.list_display[0]
),
hint=None,
obj=obj.__class__,
id='admin.E124',
)
]
elif not field.editable:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not editable through the admin." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E125',
)
]
else:
return []
def _check_search_fields(self, obj):
""" Check search_fields is a sequence. """
if not isinstance(obj.search_fields, (list, tuple)):
return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126')
else:
return []
def _check_date_hierarchy(self, obj):
""" Check that date_hierarchy refers to DateField or DateTimeField. """
if obj.date_hierarchy is None:
return []
else:
try:
field = obj.model._meta.get_field(obj.date_hierarchy)
except FieldDoesNotExist:
return refer_to_missing_field(option='date_hierarchy',
field=obj.date_hierarchy,
model=obj.model, obj=obj, id='admin.E127')
else:
if not isinstance(field, (models.DateField, models.DateTimeField)):
return must_be('a DateField or DateTimeField', option='date_hierarchy',
obj=obj, id='admin.E128')
else:
return []
class InlineModelAdminChecks(BaseModelAdminChecks):
def check(self, inline_obj, **kwargs):
errors = super(InlineModelAdminChecks, self).check(inline_obj)
parent_model = inline_obj.parent_model
errors.extend(self._check_relation(inline_obj, parent_model))
errors.extend(self._check_exclude_of_parent_model(inline_obj, parent_model))
errors.extend(self._check_extra(inline_obj))
errors.extend(self._check_max_num(inline_obj))
errors.extend(self._check_min_num(inline_obj))
errors.extend(self._check_formset(inline_obj))
return errors
def _check_exclude_of_parent_model(self, obj, parent_model):
# Do not perform more specific checks if the base checks result in an
# error.
errors = super(InlineModelAdminChecks, self)._check_exclude(obj)
if errors:
return []
# Skip if `fk_name` is invalid.
if self._check_relation(obj, parent_model):
return []
if obj.exclude is None:
return []
fk = _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
if fk.name in obj.exclude:
return [
checks.Error(
"Cannot exclude the field '%s', because it is the foreign key "
"to the parent model '%s.%s'." % (
fk.name, parent_model._meta.app_label, parent_model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E201',
)
]
else:
return []
def _check_relation(self, obj, parent_model):
try:
_get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
except ValueError as e:
return [checks.Error(e.args[0], hint=None, obj=obj.__class__, id='admin.E202')]
else:
return []
def _check_extra(self, obj):
""" Check that extra is an integer. """
if not isinstance(obj.extra, int):
return must_be('an integer', option='extra', obj=obj, id='admin.E203')
else:
return []
def _check_max_num(self, obj):
""" Check that max_num is an integer. """
if obj.max_num is None:
return []
elif not isinstance(obj.max_num, int):
return must_be('an integer', option='max_num', obj=obj, id='admin.E204')
else:
return []
def _check_min_num(self, obj):
""" Check that min_num is an integer. """
if obj.min_num is None:
return []
elif not isinstance(obj.min_num, int):
return must_be('an integer', option='min_num', obj=obj, id='admin.E205')
else:
return []
def _check_formset(self, obj):
""" Check formset is a subclass of BaseModelFormSet. """
if not issubclass(obj.formset, BaseModelFormSet):
return must_inherit_from(parent='BaseModelFormSet', option='formset',
obj=obj, id='admin.E206')
else:
return []
def must_be(type, option, obj, id):
return [
checks.Error(
"The value of '%s' must be %s." % (option, type),
hint=None,
obj=obj.__class__,
id=id,
),
]
def must_inherit_from(parent, option, obj, id):
return [
checks.Error(
"The value of '%s' must inherit from '%s'." % (option, parent),
hint=None,
obj=obj.__class__,
id=id,
),
]
def refer_to_missing_field(field, option, model, obj, id):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an attribute of '%s.%s'." % (
option, field, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id=id,
),
]
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/numpy/distutils/fcompiler/vast.py | 184 | 1775 | from __future__ import division, absolute_import, print_function
import os
from numpy.distutils.fcompiler.gnu import GnuFCompiler
compilers = ['VastFCompiler']
class VastFCompiler(GnuFCompiler):
compiler_type = 'vast'
compiler_aliases = ()
description = 'Pacific-Sierra Research Fortran 90 Compiler'
version_pattern = r'\s*Pacific-Sierra Research vf90 '\
'(Personal|Professional)\s+(?P<version>[^\s]*)'
# VAST f90 does not support -o with -c. So, object files are created
# to the current directory and then moved to build directory
object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile '
executables = {
'version_cmd' : ["vf90", "-v"],
'compiler_f77' : ["g77"],
'compiler_fix' : ["f90", "-Wv,-ya"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def find_executables(self):
pass
def get_version_cmd(self):
f90 = self.compiler_f90[0]
d, b = os.path.split(f90)
vf90 = os.path.join(d, 'v'+b)
return vf90
def get_flags_arch(self):
vast_version = self.get_version()
gnu = GnuFCompiler()
gnu.customize(None)
self.version = gnu.get_version()
opt = GnuFCompiler.get_flags_arch(self)
self.version = vast_version
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='vast')
compiler.customize()
print(compiler.get_version())
| mit |
ytjiang/django | tests/template_tests/syntax_tests/test_filter_tag.py | 521 | 1795 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class FilterTagTests(SimpleTestCase):
@setup({'filter01': '{% filter upper %}{% endfilter %}'})
def test_filter01(self):
output = self.engine.render_to_string('filter01')
self.assertEqual(output, '')
@setup({'filter02': '{% filter upper %}django{% endfilter %}'})
def test_filter02(self):
output = self.engine.render_to_string('filter02')
self.assertEqual(output, 'DJANGO')
@setup({'filter03': '{% filter upper|lower %}django{% endfilter %}'})
def test_filter03(self):
output = self.engine.render_to_string('filter03')
self.assertEqual(output, 'django')
@setup({'filter04': '{% filter cut:remove %}djangospam{% endfilter %}'})
def test_filter04(self):
output = self.engine.render_to_string('filter04', {'remove': 'spam'})
self.assertEqual(output, 'django')
@setup({'filter05': '{% filter safe %}fail{% endfilter %}'})
def test_filter05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter05')
@setup({'filter05bis': '{% filter upper|safe %}fail{% endfilter %}'})
def test_filter05bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter05bis')
@setup({'filter06': '{% filter escape %}fail{% endfilter %}'})
def test_filter06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter06')
@setup({'filter06bis': '{% filter upper|escape %}fail{% endfilter %}'})
def test_filter06bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('filter06bis')
| bsd-3-clause |
mom-ocean/MOM5 | test/test_run.py | 3 | 2465 |
from __future__ import print_function
from model_test_setup import ModelTestSetup
import os
import sys
import shutil
# This defines the different tests. To run an individual test on the command
# line type (for example):
# $ python -c "import test_run ; tc = test_run.TestRun() ; test_run.TestRun.check_run(tc, 'MOM_SIS.om3_core3')"
#
# If you want the test harness to submit a job to run the test, then type:
# $ python -c "import test_run ; tc = test_run.TestRun() ; test_run.TestRun.check_run(tc, 'om3_core3', qsub=True)"
tests = {
'om3_core3' : (('MOM_SIS', 'om3_core3'), {'ncpus' : '32', 'npes' : '24'}),
# 'om3_core1' : (('MOM_SIS', 'om3_core1'), {'ncpus' : '32', 'npes' : '24'}),
# 'atlantic1' : (('MOM_SIS', 'atlantic1'), {'ncpus' : '32', 'npes' : '24', 'mem' : '64Gb'}),
# 'mom4p1_ebm1' : (('EBM', 'mom4p1_ebm1'), {'ncpus' : '32', 'npes' : '17', 'mem' : '64Gb'}),
# 'MOM_SIS_TOPAZ' : (('MOM_SIS', 'MOM_SIS_TOPAZ'), {'ncpus' : '32', 'npes' : '24', 'walltime' : '02:00:00'}),
# 'MOM_SIS_BLING' : (('MOM_SIS', 'MOM_SIS_BLING'), {'ncpus' : '32', 'npes' : '24'}),
# 'CM2.1p1' : (('CM2M', 'CM2.1p1'), {'ncpus' : '64', 'npes' : '45', 'mem' : '128Gb'}),
# 'CM2M_coarse_BLING' : (('CM2M', 'CM2M_coarse_BLING'), {'ncpus' : '64', 'npes' : '45', 'mem' : '128Gb'}),
# 'ICCMp1' : (('ICCM', 'ICCMp1'), {'ncpus' : '64', 'npes' : '54', 'mem' : '128Gb'}),
# 'ESM2M_pi-control_C2' : (('ESM2M', 'ESM2M_pi-control_C2'), {'ncpus' : '128', 'npes' : '90', 'mem' : '256Gb'}),
'global_0.25_degree_NYF' : (('MOM_SIS', 'global_0.25_degree_NYF'), {'ncpus' : '960', 'npes' : '960', 'mem' : '1900Gb'})
}
class TestRun(ModelTestSetup):
"""
Run all test cases and check for successful output.
"""
# Run tests in parallel.
# Run with nosetests test_run.py --processes=<n>
_multiprocess_can_split_ = True
def __init__(self):
super(TestRun, self).__init__()
def check_run(self, key, qsub=False):
args = tests[key][0]
kwargs = tests[key][1]
kwargs['qsub'] = qsub
r, so, se = self.run(*args, **kwargs)
print(so)
print(se)
sys.stdout.flush()
assert(r == 0)
assert('NOTE: Natural end-of-script for experiment {} with model {}'.format(key, tests[key][0][0]) in so)
def test_experiments(self):
for k in tests.keys():
yield self.check_run, k
| gpl-2.0 |
pchauncey/ansible | lib/ansible/modules/network/avi/avi_snmptrapprofile.py | 27 | 3396 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_snmptrapprofile
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of SnmpTrapProfile Avi RESTful Object
description:
- This module is used to configure SnmpTrapProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
name:
description:
- A user-friendly name of the snmp trap configuration.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
trap_servers:
description:
- The ip address or hostname of the snmp trap destination server.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the snmp trap profile object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create SnmpTrapProfile object
avi_snmptrapprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_snmptrapprofile
"""
RETURN = '''
obj:
description: SnmpTrapProfile (api/snmptrapprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
trap_servers=dict(type='list',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'snmptrapprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
oouyang/fxos-certsuite | mcts/web-platform-tests/tests/webdriver/base_test.py | 7 | 1303 | import ConfigParser
import json
import os
import sys
import unittest
from webserver import Httpd
from network import get_lan_ip
repo_root = os.path.abspath(os.path.join(__file__, "../.."))
sys.path.insert(1, os.path.join(repo_root, "tools", "webdriver"))
from webdriver.driver import WebDriver
from webdriver import exceptions, wait
class WebDriverBaseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = create_driver()
cls.webserver = Httpd(host=get_lan_ip())
cls.webserver.start()
@classmethod
def tearDownClass(cls):
cls.webserver.stop()
if cls.driver:
cls.driver.quit()
def create_driver():
config = ConfigParser.ConfigParser()
config.read('webdriver.cfg')
section = os.environ.get("WD_BROWSER", 'firefox')
url = 'http://127.0.0.1:4444/wd/hub'
if config.has_option(section, 'url'):
url = config.get(section, "url")
capabilities = None
if config.has_option(section, 'capabilities'):
try:
capabilities = json.loads(config.get(section, "capabilities"))
except:
pass
mode = 'compatibility'
if config.has_option(section, 'mode'):
mode = config.get(section, 'mode')
return WebDriver(url, {}, capabilities, mode)
| mpl-2.0 |
mastizada/kuma | vendor/packages/nose/unit_tests/test_config.py | 10 | 4551 | import re
import os
import tempfile
import unittest
import warnings
import pickle
import sys
import nose.config
from nose.plugins.manager import DefaultPluginManager
from nose.plugins.skip import SkipTest
from nose.plugins.prof import Profile
class TestNoseConfig(unittest.TestCase):
def test_defaults(self):
c = nose.config.Config()
assert c.addPaths == True
# FIXME etc
def test_reset(self):
c = nose.config.Config()
c.include = 'include'
assert c.include == 'include'
c.reset()
assert c.include is None
def test_update(self):
c = nose.config.Config()
c.update({'exclude':'x'})
assert c.exclude == 'x'
def test_ignore_files_default(self):
"""
The default configuration should have several ignore file settings.
"""
c = nose.config.Config()
c.configure(['program'])
self.assertEqual(len(c.ignoreFiles), 3)
def test_ignore_files_single(self):
"""A single ignore-files flag should override the default settings."""
c = nose.config.Config()
c.configure(['program', '--ignore-files=a'])
self.assertEqual(len(c.ignoreFiles), 1)
aMatcher = c.ignoreFiles[0]
assert aMatcher.match('a')
assert not aMatcher.match('b')
def test_ignore_files_multiple(self):
"""
Multiple ignore-files flags should be appended together, overriding
the default settings.
"""
c = nose.config.Config()
c.configure(['program', '--ignore-files=a', '-Ib'])
self.assertEqual(len(c.ignoreFiles), 2)
aMatcher, bMatcher = c.ignoreFiles
assert aMatcher.match('a')
assert not aMatcher.match('b')
assert bMatcher.match('b')
assert not bMatcher.match('a')
def test_multiple_include(self):
c = nose.config.Config()
c.configure(['program', '--include=a', '--include=b'])
self.assertEqual(len(c.include), 2)
a, b = c.include
assert a.match('a')
assert not a.match('b')
assert b.match('b')
assert not b.match('a')
def test_single_include(self):
c = nose.config.Config()
c.configure(['program', '--include=b'])
self.assertEqual(len(c.include), 1)
b = c.include[0]
assert b.match('b')
assert not b.match('a')
def test_plugins(self):
c = nose.config.Config()
assert c.plugins
c.plugins.begin()
def test_testnames(self):
c = nose.config.Config()
c.configure(['program', 'foo', 'bar', 'baz.buz.biz'])
self.assertEqual(c.testNames, ['foo', 'bar', 'baz.buz.biz'])
c = nose.config.Config(testNames=['foo'])
c.configure([])
self.assertEqual(c.testNames, ['foo'])
def test_where(self):
# we don't need to see our own warnings
warnings.filterwarnings(action='ignore',
category=DeprecationWarning,
module='nose.config')
here = os.path.dirname(__file__)
support = os.path.join(here, 'support')
foo = os.path.abspath(os.path.join(support, 'foo'))
c = nose.config.Config()
c.configure(['program', '-w', foo, '-w', 'bar'])
self.assertEqual(c.workingDir, foo)
self.assertEqual(c.testNames, ['bar'])
def test_progname_looks_like_option(self):
# issue #184
c = nose.config.Config()
# the -v here is the program name, not an option
# this matters eg. with python -c "import nose; nose.main()"
c.configure(['-v', 'mytests'])
self.assertEqual(c.verbosity, 1)
def test_pickle_empty(self):
c = nose.config.Config()
cp = pickle.dumps(c)
cc = pickle.loads(cp)
def test_pickle_configured(self):
if 'java' in sys.version.lower():
raise SkipTest("jython has no profiler plugin")
c = nose.config.Config(plugins=DefaultPluginManager())
config_args = ['--with-doctest', '--with-coverage',
'--with-id', '--attr=A', '--collect', '--all',
'--with-isolation', '-d', '--with-xunit', '--processes=2',
'--pdb']
if Profile.available():
config_args.append('--with-profile')
c.configure(config_args)
cp = pickle.dumps(c)
cc = pickle.loads(cp)
assert cc.plugins._plugins
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
saurabh6790/test_final_med_app | support/doctype/maintenance_schedule/maintenance_schedule.py | 29 | 11570 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import add_days, cstr, getdate
from webnotes.model.doc import addchild
from webnotes.model.bean import getlist
from webnotes import msgprint
from utilities.transaction_base import TransactionBase, delete_events
class DocType(TransactionBase):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def get_item_details(self, item_code):
item = webnotes.conn.sql("select item_name, description from `tabItem` where name = '%s'" %(item_code), as_dict=1)
ret = {
'item_name': item and item[0]['item_name'] or '',
'description' : item and item[0]['description'] or ''
}
return ret
def generate_schedule(self):
self.doclist = self.doc.clear_table(self.doclist, 'maintenance_schedule_detail')
count = 0
webnotes.conn.sql("delete from `tabMaintenance Schedule Detail` where parent='%s'" %(self.doc.name))
for d in getlist(self.doclist, 'item_maintenance_detail'):
self.validate_maintenance_detail()
s_list =[]
s_list = self.create_schedule_list(d.start_date, d.end_date, d.no_of_visits)
for i in range(d.no_of_visits):
child = addchild(self.doc, 'maintenance_schedule_detail',
'Maintenance Schedule Detail', self.doclist)
child.item_code = d.item_code
child.item_name = d.item_name
child.scheduled_date = s_list[i].strftime('%Y-%m-%d')
if d.serial_no:
child.serial_no = d.serial_no
child.idx = count
count = count+1
child.incharge_name = d.incharge_name
child.save(1)
self.on_update()
def on_submit(self):
if not getlist(self.doclist, 'maintenance_schedule_detail'):
msgprint("Please click on 'Generate Schedule' to get schedule")
raise Exception
self.check_serial_no_added()
self.validate_serial_no_warranty()
self.validate_schedule()
email_map ={}
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
self.update_amc_date(d.serial_no, d.end_date)
if d.incharge_name not in email_map:
email_map[d.incharge_name] = webnotes.bean("Sales Person",
d.incharge_name).run_method("get_email_id")
scheduled_date =webnotes.conn.sql("select scheduled_date from `tabMaintenance Schedule Detail` \
where incharge_name='%s' and item_code='%s' and parent='%s' " %(d.incharge_name, \
d.item_code, self.doc.name), as_dict=1)
for key in scheduled_date:
if email_map[d.incharge_name]:
description = "Reference: %s, Item Code: %s and Customer: %s" % \
(self.doc.name, d.item_code, self.doc.customer)
webnotes.bean({
"doctype": "Event",
"owner": email_map[d.incharge_name] or self.doc.owner,
"subject": description,
"description": description,
"starts_on": key["scheduled_date"] + " 10:00:00",
"event_type": "Private",
"ref_type": self.doc.doctype,
"ref_name": self.doc.name
}).insert()
webnotes.conn.set(self.doc, 'status', 'Submitted')
#get schedule dates
#----------------------
def create_schedule_list(self, start_date, end_date, no_of_visit):
schedule_list = []
start_date1 = start_date
date_diff = (getdate(end_date) - getdate(start_date)).days
add_by = date_diff/no_of_visit
#schedule_list.append(start_date1)
while(getdate(start_date1) < getdate(end_date)):
start_date1 = add_days(start_date1, add_by)
if len(schedule_list) < no_of_visit:
schedule_list.append(getdate(start_date1))
return schedule_list
#validate date range and periodicity selected
#-------------------------------------------------
def validate_period(self, arg):
arg1 = eval(arg)
if getdate(arg1['start_date']) >= getdate(arg1['end_date']):
msgprint("Start date should be less than end date ")
raise Exception
period = (getdate(arg1['end_date'])-getdate(arg1['start_date'])).days+1
if (arg1['periodicity']=='Yearly' or arg1['periodicity']=='Half Yearly' or arg1['periodicity']=='Quarterly') and period<365:
msgprint(cstr(arg1['periodicity'])+ " periodicity can be set for period of atleast 1 year or more only")
raise Exception
elif arg1['periodicity']=='Monthly' and period<30:
msgprint("Monthly periodicity can be set for period of atleast 1 month or more")
raise Exception
elif arg1['periodicity']=='Weekly' and period<7:
msgprint("Weekly periodicity can be set for period of atleast 1 week or more")
raise Exception
def get_no_of_visits(self, arg):
arg1 = eval(arg)
self.validate_period(arg)
period = (getdate(arg1['end_date'])-getdate(arg1['start_date'])).days+1
count =0
if arg1['periodicity'] == 'Weekly':
count = period/7
elif arg1['periodicity'] == 'Monthly':
count = period/30
elif arg1['periodicity'] == 'Quarterly':
count = period/91
elif arg1['periodicity'] == 'Half Yearly':
count = period/182
elif arg1['periodicity'] == 'Yearly':
count = period/365
ret = {'no_of_visits':count}
return ret
def validate_maintenance_detail(self):
if not getlist(self.doclist, 'item_maintenance_detail'):
msgprint("Please enter Maintaince Details first")
raise Exception
for d in getlist(self.doclist, 'item_maintenance_detail'):
if not d.item_code:
msgprint("Please select item code")
raise Exception
elif not d.start_date or not d.end_date:
msgprint("Please select Start Date and End Date for item "+d.item_code)
raise Exception
elif not d.no_of_visits:
msgprint("Please mention no of visits required")
raise Exception
elif not d.incharge_name:
msgprint("Please select Incharge Person's name")
raise Exception
if getdate(d.start_date) >= getdate(d.end_date):
msgprint("Start date should be less than end date for item "+d.item_code)
raise Exception
#check if maintenance schedule already created against same sales order
#-----------------------------------------------------------------------------------
def validate_sales_order(self):
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.prevdoc_docname:
chk = webnotes.conn.sql("select t1.name from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2 where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1", d.prevdoc_docname)
if chk:
msgprint("Maintenance Schedule against "+d.prevdoc_docname+" already exist")
raise Exception
def validate_serial_no(self):
for d in getlist(self.doclist, 'item_maintenance_detail'):
cur_s_no=[]
if d.serial_no:
cur_serial_no = d.serial_no.replace(' ', '')
cur_s_no = cur_serial_no.split(',')
for x in cur_s_no:
chk = webnotes.conn.sql("select name, status from `tabSerial No` where docstatus!=2 and name=%s", (x))
chk1 = chk and chk[0][0] or ''
status = chk and chk[0][1] or ''
if not chk1:
msgprint("Serial no "+x+" does not exist in system.")
raise Exception
def validate(self):
self.validate_maintenance_detail()
self.validate_sales_order()
self.validate_serial_no()
self.validate_start_date()
# validate that maintenance start date can not be before serial no delivery date
#-------------------------------------------------------------------------------------------
def validate_start_date(self):
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
cur_serial_no = d.serial_no.replace(' ', '')
cur_s_no = cur_serial_no.split(',')
for x in cur_s_no:
dt = webnotes.conn.sql("select delivery_date from `tabSerial No` where name = %s", x)
dt = dt and dt[0][0] or ''
if dt:
if dt > getdate(d.start_date):
msgprint("Maintenance start date can not be before delivery date "+dt.strftime('%Y-%m-%d')+" for serial no "+x)
raise Exception
#update amc expiry date in serial no
#------------------------------------------
def update_amc_date(self,serial_no,amc_end_date):
#get current list of serial no
cur_serial_no = serial_no.replace(' ', '')
cur_s_no = cur_serial_no.split(',')
for x in cur_s_no:
webnotes.conn.sql("update `tabSerial No` set amc_expiry_date = '%s', maintenance_status = 'Under AMC' where name = '%s'"% (amc_end_date,x))
def on_update(self):
webnotes.conn.set(self.doc, 'status', 'Draft')
def validate_serial_no_warranty(self):
for d in getlist(self.doclist, 'item_maintenance_detail'):
if cstr(d.serial_no).strip():
dt = webnotes.conn.sql("""select warranty_expiry_date, amc_expiry_date
from `tabSerial No` where name = %s""", d.serial_no, as_dict=1)
if dt[0]['warranty_expiry_date'] and dt[0]['warranty_expiry_date'] >= d.start_date:
webnotes.msgprint("""Serial No: %s is already under warranty upto %s.
Please check AMC Start Date.""" %
(d.serial_no, dt[0]["warranty_expiry_date"]), raise_exception=1)
if dt[0]['amc_expiry_date'] and dt[0]['amc_expiry_date'] >= d.start_date:
webnotes.msgprint("""Serial No: %s is already under AMC upto %s.
Please check AMC Start Date.""" %
(d.serial_no, dt[0]["amc_expiry_date"]), raise_exception=1)
def validate_schedule(self):
item_lst1 =[]
item_lst2 =[]
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.item_code not in item_lst1:
item_lst1.append(d.item_code)
for m in getlist(self.doclist, 'maintenance_schedule_detail'):
if m.item_code not in item_lst2:
item_lst2.append(m.item_code)
if len(item_lst1) != len(item_lst2):
msgprint("Maintenance Schedule is not generated for all the items. Please click on 'Generate Schedule'")
raise Exception
else:
for x in item_lst1:
if x not in item_lst2:
msgprint("Maintenance Schedule is not generated for item "+x+". Please click on 'Generate Schedule'")
raise Exception
#check if serial no present in item maintenance table
#-----------------------------------------------------------
def check_serial_no_added(self):
serial_present =[]
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
serial_present.append(d.item_code)
for m in getlist(self.doclist, 'maintenance_schedule_detail'):
if serial_present:
if m.item_code in serial_present and not m.serial_no:
msgprint("Please click on 'Generate Schedule' to fetch serial no added for item "+m.item_code)
raise Exception
def on_cancel(self):
for d in getlist(self.doclist, 'item_maintenance_detail'):
if d.serial_no:
self.update_amc_date(d.serial_no, '')
webnotes.conn.set(self.doc, 'status', 'Cancelled')
delete_events(self.doc.doctype, self.doc.name)
def on_trash(self):
delete_events(self.doc.doctype, self.doc.name)
@webnotes.whitelist()
def make_maintenance_visit(source_name, target_doclist=None):
from webnotes.model.mapper import get_mapped_doclist
def update_status(source, target, parent):
target.maintenance_type = "Scheduled"
doclist = get_mapped_doclist("Maintenance Schedule", source_name, {
"Maintenance Schedule": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "maintenance_schedule"
},
"validation": {
"docstatus": ["=", 1]
},
"postprocess": update_status
},
"Maintenance Schedule Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
"incharge_name": "service_person"
}
}
}, target_doclist)
return [d.fields for d in doclist] | agpl-3.0 |
jss-emr/openerp-7-src | openerp/addons/analytic/analytic.py | 1 | 17758 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_analytic_account(osv.osv):
_name = 'account.analytic.account'
_inherit = ['mail.thread']
_description = 'Analytic Account'
_track = {
'state': {
'analytic.mt_account_pending': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'pending',
'analytic.mt_account_closed': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'close',
'analytic.mt_account_opened': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'open',
},
}
def _compute_level_tree(self, cr, uid, ids, child_ids, res, field_names, context=None):
currency_obj = self.pool.get('res.currency')
recres = {}
def recursive_computation(account):
result2 = res[account.id].copy()
for son in account.child_ids:
result = recursive_computation(son)
for field in field_names:
if (account.currency_id.id != son.currency_id.id) and (field!='quantity'):
result[field] = currency_obj.compute(cr, uid, son.currency_id.id, account.currency_id.id, result[field], context=context)
result2[field] += result[field]
return result2
for account in self.browse(cr, uid, ids, context=context):
if account.id not in child_ids:
continue
recres[account.id] = recursive_computation(account)
return recres
def _debit_credit_bal_qtty(self, cr, uid, ids, fields, arg, context=None):
res = {}
if context is None:
context = {}
child_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
for i in child_ids:
res[i] = {}
for n in fields:
res[i][n] = 0.0
if not child_ids:
return res
where_date = ''
where_clause_args = [tuple(child_ids)]
if context.get('from_date', False):
where_date += " AND l.date >= %s"
where_clause_args += [context['from_date']]
if context.get('to_date', False):
where_date += " AND l.date <= %s"
where_clause_args += [context['to_date']]
cr.execute("""
SELECT a.id,
sum(
CASE WHEN l.amount > 0
THEN l.amount
ELSE 0.0
END
) as debit,
sum(
CASE WHEN l.amount < 0
THEN -l.amount
ELSE 0.0
END
) as credit,
COALESCE(SUM(l.amount),0) AS balance,
COALESCE(SUM(l.unit_amount),0) AS quantity
FROM account_analytic_account a
LEFT JOIN account_analytic_line l ON (a.id = l.account_id)
WHERE a.id IN %s
""" + where_date + """
GROUP BY a.id""", where_clause_args)
for row in cr.dictfetchall():
res[row['id']] = {}
for field in fields:
res[row['id']][field] = row[field]
return self._compute_level_tree(cr, uid, ids, child_ids, res, fields, context)
def name_get(self, cr, uid, ids, context=None):
res = []
if not ids:
return res
if isinstance(ids, (int, long)):
ids = [ids]
for id in ids:
elmt = self.browse(cr, uid, id, context=context)
res.append((id, self._get_one_full_name(elmt)))
return res
def _get_full_name(self, cr, uid, ids, name=None, args=None, context=None):
if context == None:
context = {}
res = {}
for elmt in self.browse(cr, uid, ids, context=context):
res[elmt.id] = self._get_one_full_name(elmt)
return res
def _get_one_full_name(self, elmt, level=6):
if level<=0:
return '...'
if elmt.parent_id and not elmt.type == 'template':
parent_path = self._get_one_full_name(elmt.parent_id, level-1) + " / "
else:
parent_path = ''
return parent_path + elmt.name
def _child_compute(self, cr, uid, ids, name, arg, context=None):
result = {}
if context is None:
context = {}
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = map(lambda x: x.id, [child for child in account.child_ids if child.state != 'template'])
return result
def _get_analytic_account(self, cr, uid, ids, context=None):
company_obj = self.pool.get('res.company')
analytic_obj = self.pool.get('account.analytic.account')
accounts = []
for company in company_obj.browse(cr, uid, ids, context=context):
accounts += analytic_obj.search(cr, uid, [('company_id', '=', company.id)])
return accounts
def _set_company_currency(self, cr, uid, ids, name, value, arg, context=None):
if isinstance(ids, (int, long)):
ids=[ids]
for account in self.browse(cr, uid, ids, context=context):
if account.company_id:
if account.company_id.currency_id.id != value:
raise osv.except_osv(_('Error!'), _("If you set a company, the currency selected has to be the same as it's currency. \nYou can remove the company belonging, and thus change the currency, only on analytic account of type 'view'. This can be really usefull for consolidation purposes of several companies charts with different currencies, for example."))
if value:
return cr.execute("""update account_analytic_account set currency_id=%s where id=%s""", (value, account.id, ))
def _currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.company_id:
result[rec.id] = rec.company_id.currency_id.id
else:
result[rec.id] = rec.currency_id.id
return result
_columns = {
'name': fields.char('Account/Contract Name', size=128, required=True),
'complete_name': fields.function(_get_full_name, type='char', string='Full Name'),
'code': fields.char('Reference', select=True),
'type': fields.selection([('view','Analytic View'), ('normal','Analytic Account'),('contract','Contract or Project'),('template','Template of Contract')], 'Type of Account', required=True,
help="If you select the View Type, it means you won\'t allow to create journal entries using that account.\n"\
"The type 'Analytic account' stands for usual accounts that you only want to use in accounting.\n"\
"If you select Contract or Project, it offers you the possibility to manage the validity and the invoicing options for this account.\n"\
"The special type 'Template of Contract' allows you to define a template with default data that you can reuse easily."),
'template_id': fields.many2one('account.analytic.account', 'Template of Contract'),
'description': fields.text('Description'),
'parent_id': fields.many2one('account.analytic.account', 'Parent Analytic Account', select=2),
'child_ids': fields.one2many('account.analytic.account', 'parent_id', 'Child Accounts'),
'child_complete_ids': fields.function(_child_compute, relation='account.analytic.account', string="Account Hierarchy", type='many2many'),
'line_ids': fields.one2many('account.analytic.line', 'account_id', 'Analytic Entries'),
'balance': fields.function(_debit_credit_bal_qtty, type='float', string='Balance', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'debit': fields.function(_debit_credit_bal_qtty, type='float', string='Debit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'credit': fields.function(_debit_credit_bal_qtty, type='float', string='Credit', multi='debit_credit_bal_qtty', digits_compute=dp.get_precision('Account')),
'quantity': fields.function(_debit_credit_bal_qtty, type='float', string='Quantity', multi='debit_credit_bal_qtty'),
'quantity_max': fields.float('Prepaid Service Units', help='Sets the higher limit of time to work on the contract, based on the timesheet. (for instance, number of hours in a limited support contract.)'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'user_id': fields.many2one('res.users', 'Project Manager'),
'manager_id': fields.many2one('res.users', 'Account Manager'),
'date_start': fields.date('Start Date'),
'date': fields.date('Date End', select=True),
'company_id': fields.many2one('res.company', 'Company', required=False), #not required because we want to allow different companies to use the same chart of account, except for leaf accounts.
'state': fields.selection([('template', 'Template'),('draft','New'),('open','In Progress'),('pending','To Renew'),('close','Closed'),('cancelled', 'Cancelled')], 'Status', required=True, track_visibility='onchange'),
'currency_id': fields.function(_currency, fnct_inv=_set_company_currency, #the currency_id field is readonly except if it's a view account and if there is no company
store = {
'res.company': (_get_analytic_account, ['currency_id'], 10),
}, string='Currency', type='many2one', relation='res.currency'),
}
def on_change_template(self, cr, uid, ids, template_id, context=None):
if not template_id:
return {}
res = {'value':{}}
template = self.browse(cr, uid, template_id, context=context)
if template.date_start and template.date:
from_dt = datetime.strptime(template.date_start, tools.DEFAULT_SERVER_DATE_FORMAT)
to_dt = datetime.strptime(template.date, tools.DEFAULT_SERVER_DATE_FORMAT)
timedelta = to_dt - from_dt
res['value']['date'] = datetime.strftime(datetime.now() + timedelta, tools.DEFAULT_SERVER_DATE_FORMAT)
res['value']['date_start'] = fields.date.today()
res['value']['quantity_max'] = template.quantity_max
res['value']['parent_id'] = template.parent_id and template.parent_id.id or False
res['value']['description'] = template.description
return res
def on_change_partner_id(self, cr, uid, ids,partner_id, name, context={}):
res={}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
if partner.user_id:
res['manager_id'] = partner.user_id.id
if not name:
res['name'] = _('Contract: ') + partner.name
return {'value': res}
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
def _get_default_currency(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.currency_id.id
_defaults = {
'type': 'normal',
'company_id': _default_company,
'code' : lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'account.analytic.account'),
'state': 'open',
'user_id': lambda self, cr, uid, ctx: uid,
'partner_id': lambda self, cr, uid, ctx: ctx.get('partner_id', False),
'date_start': lambda *a: time.strftime('%Y-%m-%d'),
'currency_id': _get_default_currency,
}
def check_recursion(self, cr, uid, ids, context=None, parent=None):
return super(account_analytic_account, self)._check_recursion(cr, uid, ids, context=context, parent=parent)
_order = 'name asc'
_constraints = [
(check_recursion, 'Error! You cannot create recursive analytic accounts.', ['parent_id']),
]
def name_create(self, cr, uid, name, context=None):
raise osv.except_osv(_('Warning'), _("Quick account creation disallowed."))
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
analytic = self.browse(cr, uid, id, context=context)
default.update(
code=False,
line_ids=[],
name=_("%s (copy)") % (analytic['name']))
return super(account_analytic_account, self).copy(cr, uid, id, default, context=context)
def on_change_company(self, cr, uid, id, company_id):
if not company_id:
return {}
currency = self.pool.get('res.company').read(cr, uid, [company_id], ['currency_id'])[0]['currency_id']
return {'value': {'currency_id': currency}}
def on_change_parent(self, cr, uid, id, parent_id):
if not parent_id:
return {}
parent = self.read(cr, uid, [parent_id], ['partner_id','code'])[0]
if parent['partner_id']:
partner = parent['partner_id'][0]
else:
partner = False
res = {'value': {}}
if partner:
res['value']['partner_id'] = partner
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if context is None:
context={}
if context.get('current_model') == 'project.project':
project_obj = self.pool.get("account.analytic.account")
project_ids = project_obj.search(cr, uid, args)
return self.name_get(cr, uid, project_ids, context=context)
if name:
account_ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context)
if not account_ids:
dom = []
for name2 in name.split('/'):
name = name2.strip()
account_ids = self.search(cr, uid, dom + [('name', 'ilike', name)] + args, limit=limit, context=context)
if not account_ids: break
dom = [('parent_id','in',account_ids)]
else:
account_ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, account_ids, context=context)
class account_analytic_line(osv.osv):
_name = 'account.analytic.line'
_description = 'Analytic Line'
_columns = {
'name': fields.char('Description', size=256, required=True),
'date': fields.date('Date', required=True, select=True),
'amount': fields.float('Amount', required=True, help='Calculated by multiplying the quantity and the price given in the Product\'s cost price. Always expressed in the company main currency.', digits_compute=dp.get_precision('Account')),
'unit_amount': fields.float('Quantity', help='Specifies the amount of quantity to count.'),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, ondelete='restrict', select=True, domain=[('type','<>','view')]),
'user_id': fields.many2one('res.users', 'User'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
}
def _get_default_date(self, cr, uid, context=None):
return fields.date.context_today(self, cr, uid, context=context)
def __get_default_date(self, cr, uid, context=None):
return self._get_default_date(cr, uid, context=context)
_defaults = {
'date': __get_default_date,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.analytic.line', context=c),
'amount': 0.00
}
_order = 'date desc'
def _check_no_view(self, cr, uid, ids, context=None):
analytic_lines = self.browse(cr, uid, ids, context=context)
for line in analytic_lines:
if line.account_id.type == 'view':
return False
return True
_constraints = [
(_check_no_view, 'You cannot create analytic line on view account.', ['account_id']),
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sfprime/pattern | examples/01-web/07-wikipedia.py | 21 | 1524 | import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.web import Wikipedia
# This example retrieves an article from Wikipedia (http://en.wikipedia.org).
# Wikipedia queries request the article HTML source from the server. This can be slow.
# It is a good idea to cache results from Wikipedia locally,
# and to set a high timeout when calling Wikipedia.search().
engine = Wikipedia(language="en")
# Contrary to the other search engines in the pattern.web module,
# Wikipedia simply returns one WikipediaArticle object (or None),
# instead of a list of results.
article = engine.search("alice in wonderland", cached=True, timeout=30)
print article.title # Article title (may differ from the search query).
print
print article.languages["fr"] # Article in French, can be retrieved with Wikipedia(language="fr").
print article.links[:10], "..." # List of linked Wikipedia articles.
print article.external[:5], "..." # List of external URL's.
print
#print article.source # The full article content as HTML.
#print article.string # The full article content, plain text with HTML tags stripped.
# An article is made up of different sections with a title.
# WikipediaArticle.sections is a list of WikipediaSection objects.
# Each section has a title + content and can have a linked parent section or child sections.
for s in article.sections:
print s.title.upper()
print
print s.content # = ArticleSection.string, minus the title.
print
| bsd-3-clause |
jjmleiro/hue | desktop/core/ext-py/markdown/markdown/extensions/abbr.py | 131 | 2899 | '''
Abbreviation Extension for Python-Markdown
==========================================
This extension adds abbreviation handling to Python-Markdown.
Simple Usage:
>>> import markdown
>>> text = """
... Some text with an ABBR and a REF. Ignore REFERENCE and ref.
...
... *[ABBR]: Abbreviation
... *[REF]: Abbreviation Reference
... """
>>> markdown.markdown(text, ['abbr'])
u'<p>Some text with an <abbr title="Abbreviation">ABBR</abbr> and a <abbr title="Abbreviation Reference">REF</abbr>. Ignore REFERENCE and ref.</p>'
Copyright 2007-2008
* [Waylan Limberg](http://achinghead.com/)
* [Seemant Kulleen](http://www.kulleen.org/)
'''
import markdown, re
from markdown import etree
# Global Vars
ABBR_REF_RE = re.compile(r'[*]\[(?P<abbr>[^\]]*)\][ ]?:\s*(?P<title>.*)')
class AbbrExtension(markdown.Extension):
""" Abbreviation Extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Insert AbbrPreprocessor before ReferencePreprocessor. """
md.preprocessors.add('abbr', AbbrPreprocessor(md), '<reference')
class AbbrPreprocessor(markdown.preprocessors.Preprocessor):
""" Abbreviation Preprocessor - parse text for abbr references. """
def run(self, lines):
'''
Find and remove all Abbreviation references from the text.
Each reference is set as a new AbbrPattern in the markdown instance.
'''
new_text = []
for line in lines:
m = ABBR_REF_RE.match(line)
if m:
abbr = m.group('abbr').strip()
title = m.group('title').strip()
self.markdown.inlinePatterns['abbr-%s'%abbr] = \
AbbrPattern(self._generate_pattern(abbr), title)
else:
new_text.append(line)
return new_text
def _generate_pattern(self, text):
'''
Given a string, returns an regex pattern to match that string.
'HTML' -> r'(?P<abbr>[H][T][M][L])'
Note: we force each char as a literal match (in brackets) as we don't
know what they will be beforehand.
'''
chars = list(text)
for i in range(len(chars)):
chars[i] = r'[%s]' % chars[i]
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
class AbbrPattern(markdown.inlinepatterns.Pattern):
""" Abbreviation inline pattern. """
def __init__(self, pattern, title):
markdown.inlinepatterns.Pattern.__init__(self, pattern)
self.title = title
def handleMatch(self, m):
abbr = etree.Element('abbr')
abbr.text = m.group('abbr')
abbr.set('title', self.title)
return abbr
def makeExtension(configs=None):
return AbbrExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| apache-2.0 |
prutseltje/ansible | lib/ansible/modules/storage/netapp/na_cdot_volume.py | 23 | 15029 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_volume
short_description: Manage NetApp cDOT volumes
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create or destroy volumes on NetApp cDOT
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
infinite:
description:
- Set True if the volume is an Infinite Volume.
type: bool
default: 'no'
online:
description:
- Whether the specified volume is online, or not.
type: bool
default: 'yes'
aggregate_name:
description:
- The name of the aggregate the flexvol should exist on. Required when C(state=present).
size:
description:
- The size of the volume in (size_unit). Required when C(state=present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
vserver:
description:
- Name of the vserver to use.
required: true
junction_path:
description:
- Junction path where to mount the volume
required: false
version_added: '2.6'
export_policy:
description:
- Export policy to set for the specified junction path.
required: false
default: default
version_added: '2.6'
snapshot_policy:
description:
- Snapshot policy to set for the specified volume.
required: false
default: default
version_added: '2.6'
'''
EXAMPLES = """
- name: Create FlexVol
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
aggregate_name: aggr1
size: 20
size_unit: mb
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
junction_path: /ansibleVolume
export_policy: all_nfs_networks
snapshot_policy: daily
- name: Make FlexVol offline
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
online: False
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTVolume(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']),
is_online=dict(required=False, type='bool', default=True, aliases=['online']),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
aggregate_name=dict(type='str'),
vserver=dict(required=True, type='str', default=None),
junction_path=dict(required=False, type='str', default=None),
export_policy=dict(required=False, type='str', default='default'),
snapshot_policy=dict(required=False, type='str', default='default'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['aggregate_name', 'size'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.is_infinite = p['is_infinite']
self.is_online = p['is_online']
self.size_unit = p['size_unit']
self.vserver = p['vserver']
self.junction_path = p['junction_path']
self.export_policy = p['export_policy']
self.snapshot_policy = p['snapshot_policy']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.aggregate_name = p['aggregate_name']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_volume(self):
"""
Return details about the volume
:param:
name : Name of the volume
:return: Details about the volume. None if not found.
:rtype: dict
"""
volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
volume_id_attributes.add_new_child('name', self.name)
volume_attributes.add_child_elem(volume_id_attributes)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(volume_attributes)
volume_info.add_child_elem(query)
result = self.server.invoke_successfully(volume_info, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
volume_attributes = result.get_child_by_name(
'attributes-list').get_child_by_name(
'volume-attributes')
# Get volume's current size
volume_space_attributes = volume_attributes.get_child_by_name(
'volume-space-attributes')
current_size = volume_space_attributes.get_child_content('size')
# Get volume's state (online/offline)
volume_state_attributes = volume_attributes.get_child_by_name(
'volume-state-attributes')
current_state = volume_state_attributes.get_child_content('state')
is_online = None
if current_state == "online":
is_online = True
elif current_state == "offline":
is_online = False
return_value = {
'name': self.name,
'size': current_size,
'is_online': is_online,
}
return return_value
def create_volume(self):
create_parameters = {'volume': self.name,
'containing-aggr-name': self.aggregate_name,
'size': str(self.size),
}
if self.junction_path:
create_parameters['junction-path'] = str(self.junction_path)
if self.export_policy != 'default':
create_parameters['export-policy'] = str(self.export_policy)
if self.snapshot_policy != 'default':
create_parameters['snapshot-policy'] = str(self.snapshot_policy)
volume_create = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-create', **create_parameters)
try:
self.server.invoke_successfully(volume_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)),
exception=traceback.format_exc())
def delete_volume(self):
if self.is_infinite:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy-async', **{'volume-name': self.name})
else:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy', **{'name': self.name, 'unmount-and-offline':
'true'})
try:
self.server.invoke_successfully(volume_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def rename_volume(self):
"""
Rename the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename-async',
**{'volume-name': self.name, 'new-volume-name': str(
self.name)})
else:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename', **{'volume': self.name, 'new-volume-name': str(
self.name)})
try:
self.server.invoke_successfully(volume_rename,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def resize_volume(self):
"""
Re-size the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size-async',
**{'volume-name': self.name, 'new-size': str(
self.size)})
else:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size', **{'volume': self.name, 'new-size': str(
self.size)})
try:
self.server.invoke_successfully(volume_resize,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def change_volume_state(self):
"""
Change volume's state (offline/online).
Note: 'is_infinite' needs to be set to True in order to change the
state of an Infinite Volume.
"""
state_requested = None
if self.is_online:
# Requested state is 'online'.
state_requested = "online"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online',
**{'name': self.name})
else:
# Requested state is 'offline'.
state_requested = "offline"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline',
**{'name': self.name})
try:
self.server.invoke_successfully(volume_change_state,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' %
(self.name, state_requested, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
volume_exists = False
rename_volume = False
resize_volume = False
volume_detail = self.get_volume()
if volume_detail:
volume_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
if str(volume_detail['size']) != str(self.size):
resize_volume = True
changed = True
if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online):
changed = True
if self.is_online is False:
# Volume is online, but requested state is offline
pass
else:
# Volume is offline but requested state is online
pass
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not volume_exists:
self.create_volume()
else:
if resize_volume:
self.resize_volume()
if volume_detail['is_online'] is not \
None and volume_detail['is_online'] != \
self.is_online:
self.change_volume_state()
# Ensure re-naming is the last change made.
if rename_volume:
self.rename_volume()
elif self.state == 'absent':
self.delete_volume()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTVolume()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
YeEmrick/learning | stanford-tensorflow/examples/03_linreg_starter.py | 1 | 3321 | """ Starter code for simple linear regression example using placeholders
Created by Chip Huyen ([email protected])
CS20: "TensorFlow for Deep Learning Research"
cs20.stanford.edu
Lecture 03
"""
import os
from tensorflow.python.summary import writer
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import utils
DATA_FILE = 'data/birth_life_2010.txt'
# Step 1: read in data from the .txt file
data, n_samples = utils.read_birth_life_data(DATA_FILE)
# Step 2: create placeholders for X (birth rate) and Y (life expectancy)
# Remember both X and Y are scalars with type float
X, Y = None, None
#############################
########## TO DO ############
#############################
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
# Step 3: create weight and bias, initialized to 0.0
# Make sure to use tf.get_variable
w, b = None, None
#############################
########## TO DO ############
#############################
w = tf.get_variable(name="w", initializer=0.0, dtype=tf.float32)
b = tf.get_variable(name="b", initializer=0.0, dtype=tf.float32)
# Step 4: build model to predict Y
# e.g. how would you derive at Y_predicted given X, w, and b
Y_predicted = None
#############################
########## TO DO ############
#############################
Y_predicted = w * X + b
# Step 5: use the square error as the loss function
loss = None
#############################
########## TO DO ############
#############################
loss = tf.square(Y_predicted - Y, name='loss')
# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss)
start = time.time()
# Create a filewriter to write the model's graph to TensorBoard
#############################
########## TO DO ############
#############################
writer = tf.summary.FileWriter('./graphs/linear_reg', tf.get_default_graph())
with tf.Session() as sess:
# Step 7: initialize the necessary variables, in this case, w and b
#############################
########## TO DO ############
#############################
sess.run(tf.global_variables_initializer())
# Step 8: train the model for 100 epochs
for i in range(100):
total_loss = 0
for x, y in data:
# Execute train_op and get the value of loss.
# Don't forget to feed in data for placeholders
_, loss_ = sess.run([optimizer, loss], feed_dict={X: x, Y: y})########## TO DO ############
total_loss += loss_
print('Epoch {0}: {1}'.format(i, total_loss/n_samples))
# close the writer when you're done using it
#############################
########## TO DO ############
#############################
writer.close()
# Step 9: output the values of w and b
w_out, b_out = sess.run([w, b])
#############################
########## TO DO ############
#############################
print('Took: %f seconds' %(time.time() - start))
# uncomment the following lines to see the plot
plt.plot(data[:,0], data[:,1], 'bo', label='Real data')
plt.plot(data[:,0], data[:,0] * w_out + b_out, 'r', label='Predicted data')
plt.legend()
plt.show() | apache-2.0 |
okjohn/SHARPpy | sharppy/drawing/skewt.py | 2 | 15486 | import math
import sharppy as sp
import sharppy.sharptab as tab
from sharppy.sharptab.constants import *
__all__ = ['SkewT']
class SkewT:
def __init__(self, canvas, **kwargs):
self.gCanvas = canvas
# Canvas Widths
self.rpad = 100
self.bpad = 20
self.wid = kwargs.get('width', 800) - self.rpad
self.hgt = kwargs.get('height', 800) - self.bpad
# Where on Canvas to start drawing the SkewT
self.tlx = 30 # Top-Left X
self.tly = 20 # Top-Left Y
# Dimensions of the SkewT Frame
self.brx = self.wid # Bottom-Right X
self.bry = self.hgt # Bottom-Right Y
# Maximum (bottom) & Minimum (top) Pressures
self.pmin = 100.
self.pmax = 1075.
# Drawing Parameters
self.bltemp = -55 # temperature at the bottom-left of the chart
self.brtemp = 55 # temperature at the bottom-right of the chart
# Rotation Angle in Degrees
self.rot = 100/3.
# SkewT Fonts
self.font1 = ("Helvetica", 9)
self.font2 = ("Helvetica", 11)
self.font3 = ("Helvetica", 7)
# Colors
self.framefg = "#FFFFFF"
self.framebg = "#000000"
self.icolor = "#BBBBBB" # Isobar
self.tcolor = "#FF0000" # Temperature Trace
self.tdcolor = "#00FF00" # Dewpoint Trace
self.twcolor = "#AAAAFF" # Wetbulb Temperature Trace
self.tpcolor = "#AAAA00" # Parcel Trace
self.tvpcolor = "#FFFF00" # Virtual Parcel
self.ithermcolor = "#333333" # Isotherms
self.ithermbold = "#CCCCCC" # Bolded Isotherms
self.adiabatcolor = "#333333" # Dry Adiabat
self.madiabatcolor = "#663333" # Moist Adiabat
self.mixratcolor = "#006600" # Mixing Ratio
self.stntextcolor = "#FF0000" # Station ID Text
self.tcolor = "#FF0000" # Temperature Trace
self.tdcolor = "#00FF00" # Dew Point Trace
self.twcolor = "#AAAAFF" # Wetbulb Trace
self.dgzcolor = "#00FFFF" # DGZ Trace Color
self.barbcolor = '#FFFFFF' # Wind Barb Color
# Lines to Plot
self.dp = -25
self.presrange = range(int(self.pmax), int(self.pmin-1), self.dp)
self.isobars = [1000, 850, 700, 500, 300, 200, 100]
self.isotherms = range(-160, 61, 10)
self.thtas = range(-70, 350, 20)
self.thtes = range(-160, 61, 10)
self.mixws = [2] + range(4, 33, 4)
self.wbot = self.pmax - 5 # Mixing Ratio Bottom Pressure
self.wtop = 600 # Mixing Ratio Top Pressure
self.minTdgz = -18 # Minimum temperature of DGZ
self.maxTdgz = -12 # Maximum temperature of DGZ
self.tracewidth = 4 # Tracewidth
# Update All Keyword Arguments
self.__dict__.update(kwargs)
# Horizontal temperature spread (dT across the bottom of the chart)
self.hspread = self.brtemp - self.bltemp
# Vertical temperature spread (dT along the left edge of the chart)
self.vspread = math.tan(math.radians(self.rot)) * self.hspread
def drawSkewT(self):
""" Draw the background SkewT """
btm = int(self.pmax) / 50 * 50
for p in range(btm, int(self.pmin), -50): self.drawIsobar(p, 1)
for tw in self.thtes: self.drawMoistAdiabat(tw)
for t in self.isotherms: self.drawIsotherm(t)
for thta in self.thtas: self.drawDryAdiabat(thta)
for w in self.mixws: self.drawMixRatioLine(w, self.font3)
# Colorfill boxes around plotting area to mask lines
self.gCanvas.create_rectangle((0, 0, self.tlx, self.bry),
fill=self.framebg, outline=self.framebg)
self.gCanvas.create_rectangle((0, self.pres2Pix(self.pmax), self.brx,
self.bry), fill=self.framebg, outline=self.framebg)
self.gCanvas.create_rectangle((self.brx, 0, self.wid+self.rpad,
self.pres2Pix(self.pmax)), fill=self.framebg, outline=self.framebg)
for isobar in self.isobars: self.drawIsobar(isobar, 0)
# Plot frame around SkewT
self.gCanvas.create_rectangle((self.tlx, self.tly, self.brx, self.bry),
fill="", outline=self.framefg)
def drawProfile(self, prof, **kwargs):
''' Draw the Sounding '''
# Create the Sounding ID and Date/Time Header
txt = prof.gStation + " - " + prof.gDate
self.gCanvas.create_text(self.tlx, 2, fill=self.stntextcolor,
text=txt, anchor='nw', font=self.font2)
# Create the Model/Obs Header
self.gCanvas.create_text(self.wid-150, 2, fill=self.stntextcolor,
text=prof.gModel, anchor='nw', font=self.font2)
# Add WetBulb to Profile
prof = self.createWetBulb(prof)
# Make the Drawings
twwidth = kwargs.get('twwidth', 1)
plottxt = kwargs.get('plottxt', True)
self.__dict__.update(kwargs)
self.drawTrace(prof, -1, color=self.twcolor, width=twwidth,
plottxt=plottxt)
self.drawTrace(prof, prof.tdind, self.tdcolor, width=self.tracewidth,
plottxt=plottxt)
self.drawTrace(prof, prof.tind, self.tcolor, width=self.tracewidth,
plottxt=plottxt)
self.drawDGZ(prof, self.dgzcolor, width=self.tracewidth)
def drawBarbs(self, prof, color=None, **kwargs):
''' Draw the Wind Barbs '''
if not color: color = self.barbcolor
self.plevs = [prof.gSndg[prof.sfc][prof.pind]] + self.presrange
self.__dict__.update(kwargs)
if not self.plevs:
self.plevs = [prof.gSndg[i][prof.sfc]
for i in range(prof.gNumLevels)]
for p in self.plevs:
if p < self.pmin or p > self.pmax or \
p > prof.gSndg[prof.sfc][prof.pind]: continue
u, v = tab.interp.components(p, prof)
y1 = self.pres2Pix(p)
x1 = self.brx + self.rpad/2
wdir, wspd = tab.vector.comp2vec(u, v)
sp.Barb(self.gCanvas, x1, y1, wdir, wspd, color=color,
**kwargs)
def drawDGZ(self, prof, color=None, width=3):
''' Draw the Dendritic Snow Growth Zone '''
if not color: color=self.dgzcolor
if prof.gNumLevels < 3: return
for i in range(prof.gNumLevels-1):
if not QC(prof.gSndg[i][prof.tind]): continue
if prof.gSndg[i][prof.tind] <= self.maxTdgz and \
prof.gSndg[i][prof.tind] >= self.minTdgz and \
prof.gSndg[i+1][prof.tind] <= self.maxTdgz and \
prof.gSndg[i+1][prof.tind] >= self.minTdgz:
rh = tab.thermo.relh(prof.gSndg[i][prof.pind],
prof.gSndg[i][prof.tind], prof.gSndg[i][prof.tdind])
if rh >= 75:
rh2 = tab.thermo.relh(prof.gSndg[i+1][prof.pind],
prof.gSndg[i+1][prof.tind],
prof.gSndg[i+1][prof.tdind])
if rh2 >= 75:
x1 = self.temp2Pix(prof.gSndg[i][prof.tind],
prof.gSndg[i][prof.pind])
y1 = self.pres2Pix(prof.gSndg[i][prof.pind])
x2 = self.temp2Pix(prof.gSndg[i+1][prof.tind],
prof.gSndg[i+1][prof.pind])
y2 = self.pres2Pix(prof.gSndg[i+1][prof.pind])
self.gCanvas.create_line(x1, y1, x2, y2, fill=color,
width=width)
def drawTrace(self, prof, ind, color, **kwargs):
''' Draw the Temperature Trace on the Sounding '''
font = kwargs.get('font', self.font3)
width = kwargs.get('width', 4)
plottxt = kwargs.get('plottxt', True)
if prof.gNumLevels < 3: return
x1 = self.temp2Pix(prof.gSndg[prof.sfc][ind],
prof.gSndg[prof.sfc][prof.pind])
y1 = self.pres2Pix(prof.gSndg[prof.sfc][prof.pind])
txt = "%.1f" % tab.thermo.ctof(prof.gSndg[prof.sfc][ind])
xoff = int((float(len(txt)) / 2.) * font[1]) - 1
yoff = font[1]
x2 = 0; y2 = 0
if plottxt:
self.gCanvas.create_rectangle((x1-xoff, y1, x1+xoff, y1+2*yoff),
fill=self.framebg)
self.gCanvas.create_text(x1, y1+yoff, fill=color, text=txt,
font=font)
for i in range(prof.gNumLevels):
if QC(prof.gSndg[i][ind]):
x1 = x2
y1 = y2
if prof.gSndg[i][0] > self.pmin:
x2 = self.temp2Pix(prof.gSndg[i][ind],
prof.gSndg[i][prof.pind])
y2 = self.pres2Pix(prof.gSndg[i][prof.pind])
if x1 <= 0: continue
self.gCanvas.create_line(x1, y1, x2, y2, fill=color,
width=width)
else:
v = tab.interp.interp_from_pres(self.pmin, prof, ind)
x2 = self.temp2Pix(v, self.pmin)
y2 = self.pres2Pix(self.pmin)
self.gCanvas.create_line(x1, y1, x2, y2, fill=color,
width=width)
break
def drawParcelTrace(self, pcl, width=2, dash=(1,1), color=None):
''' Draw the trace of supplied parcel '''
if not color: self.tpcolor
p = pcl.pres
t = pcl.temp
td = pcl.dwpt
x1 = self.temp2Pix(t, p)
y1 = self.pres2Pix(p)
p2, t2 = tab.thermo.drylift(p, t, td)
x2 = self.temp2Pix(t2, p2)
y2 = self.pres2Pix(p2)
self.gCanvas.create_line(x1, y1, x2, y2, fill=color,
width=width, dash=dash)
for i in range(int(p2 + self.dp), int(self.pmin-1), int(self.dp)):
x1 = x2
y1 = y2
t3 = tab.thermo.wetlift(p2, t2, float(i))
x2 = self.temp2Pix(t3, float(i))
y2 = self.pres2Pix(float(i))
if x2 < self.tlx: break
self.gCanvas.create_line(x1, y1, x2, y2, fill=color,
width=width, dash=dash)
def drawVirtualParcelTrace(self, pcl, width=2, dash=(1,1), color=None):
''' Draw the trace of supplied parcel '''
if not color: color = self.tvpcolor
p = pcl.pres
t = pcl.temp
td = pcl.dwpt
x1 = self.temp2Pix(tab.thermo.virtemp(p, t, td), p)
y1 = self.pres2Pix(p)
p2, t2 = tab.thermo.drylift(p, t, td)
x2 = self.temp2Pix(tab.thermo.virtemp(p2, t2, t2), p2)
y2 = self.pres2Pix(p2)
self.gCanvas.create_line(x1, y1, x2, y2, fill=color,
width=width, dash=dash)
for i in range(int(p2 + self.dp), int(self.pmin-1), int(self.dp)):
x1 = x2
y1 = y2
t3 = tab.thermo.wetlift(p2, t2, float(i))
x2 = self.temp2Pix(tab.thermo.virtemp(i, t3, t3), float(i))
y2 = self.pres2Pix(float(i))
if x2 < self.tlx: break
self.gCanvas.create_line(x1, y1, x2, y2, fill=color,
width=width, dash=dash)
def drawDryAdiabat(self, thta):
''' Draw dry adiabats on background SkewT '''
for p in self.presrange:
t = ((thta + ZEROCNK) / ((1000. / p)**ROCP)) - ZEROCNK
x = self.temp2Pix(t, p)
y = self.pres2Pix(p)
if p == self.pmax:
x2 = x
y2 = y
else:
x1 = x2
y1 = y2
x2 = x
y2 = y
self.gCanvas.create_line(x1, y1, x2, y2,
fill=self.adiabatcolor, width=1)
def drawIsotherm(self, t):
''' Draw isotherms on background SkewT '''
x1 = self.temp2Pix(t, self.pmax-5)
x2 = self.temp2Pix(t, self.pmin)
if t >= self.bltemp and t <= self.brtemp:
self.gCanvas.create_text(x1-2, self.bry+2, fill=self.ithermbold,
text=t, anchor="n", font=self.font1)
self.gCanvas.create_line(x1, self.bry, x2, self.tly,
fill=self.ithermcolor, dash=(4, 2), width=1)
if t == 0 or t==-20:
self.gCanvas.create_line(x1, self.bry, x2, self.tly,
fill=self.ithermbold, dash=(4, 2), width=1)
def drawMoistAdiabat(self, tw, width=1):
''' Draw moist adiabats on background SkewT '''
for p in self.presrange:
t = tab.thermo.wetlift(1000., tw, p)
x = self.temp2Pix(t, p)
y = self.pres2Pix(p)
if p == self.pmax:
x2 = x
y2 = y
else:
x1 = x2
y1 = y2
x2 = x
y2 = y
self.gCanvas.create_line(x1, y1, x2, y2,
fill=self.madiabatcolor, width=width)
def drawIsobar(self, p, pipflag, width=1):
''' Draw isobars on background SkewT '''
y1 = self.pres2Pix(p)
if pipflag == 0:
self.gCanvas.create_line(self.tlx, y1, self.brx, y1,
fill=self.icolor, width=width)
self.gCanvas.create_text(self.tlx-2, y1,
fill=self.framefg, text=p, anchor="e", font=self.font1)
else:
self.gCanvas.create_line(self.tlx, y1, self.tlx+5, y1,
fill=self.icolor, width=width)
self.gCanvas.create_line(self.brx, y1, self.brx-5, y1,
fill=self.icolor, width=width)
def drawMixRatioLine(self, w, font, width=1):
''' Function to draw mixing ratio lines '''
t = tab.thermo.temp_at_mixrat(w, self.wbot)
x1 = self.temp2Pix(t, self.wbot)
y1 = self.pres2Pix(self.wbot)
t = tab.thermo.temp_at_mixrat(w, self.wtop)
x2 = self.temp2Pix(t, self.wtop)
y2 = self.pres2Pix(self.wtop)
self.gCanvas.create_line(x1, y1, x2, y2, fill=self.mixratcolor,
width=width)
self.gCanvas.create_rectangle((x2-font[1], y2-2*font[1],
x2+font[1], y2), fill=self.framebg,
outline=self.framebg)
self.gCanvas.create_text(x2, y2-font[1], fill=self.mixratcolor,
text=w, font=font)
def createWetBulb(self, prof):
''' Create the Wetbulb Temperature Array '''
for i in range(prof.gNumLevels):
prof.gSndg[i].append(tab.thermo.wetbulb(prof.gSndg[i][prof.pind],
prof.gSndg[i][prof.tind], prof.gSndg[i][prof.tdind]))
return prof
def temp2Pix(self, t, p):
''' Function to convert a temperature level to a pixel '''
scl1 = self.brtemp - (((self.bry - self.pres2Pix(p)) /
(self.bry - self.tly)) * self.vspread)
scl2 = self.brx - (((scl1 - t) / self.hspread) * (self.brx - self.tlx))
return scl2
def pres2Pix(self, p):
''' Function to convert a pressure level to a pixel level '''
scl1 = math.log(self.pmax) - math.log(self.pmin)
scl2 = math.log(self.pmax) - math.log(p)
return (self.bry - (scl2/scl1) * (self.bry - self.tly))
def pix2Pres(self, y):
''' Function to convert a pixel to a pressure level'''
scl1 = math.log(self.pmax) - math.log(self.pmin)
scl2 = self.bry - float(y)
scl3 = self.bry - self.tly + 1
return (self.pmax / math.exp((scl2/scl3) * scl1));
| bsd-3-clause |
ianatpn/nupictest | examples/bindings/sparse_matrix_how_to.py | 5 | 12347 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import cPickle
# SparseMatrix is a versatile class that offers a wide range of functionality.
# This tutorial will introduce you to the main features of SparseMatrix.
# SparseMatrix is located in nupic.bindings.math, and here is the import you need:
from nupic.bindings.math import *
# 1. Types of sparse matrices:
# ===========================
# There are three types of SparseMatrix, depending on the precision you need
# in your application: 32 and 32 bits. To create a SparseMatrix holding
# floating point values of the desired precision, simply specify it as the
# 'dtype' parameter in the constructor:
s = SparseMatrix(dtype='Float32')
# 2. Global Epsilon:
# =================
# By default, NuPIC is compiled to handle only 32 bits of precision at max,
# and sparse matrices consider a floating point value to be zero if it's less than
# 1e-6 (the best precision possible with 32 bits floats). This value of 1e-6 is
# called "epsilon", and it is a global value used throughout NuPIC to deal with
# near-zero floating point numbers.
# If this is not enough, NuPIC can be recompiled to access more precision.
# With NTA_DOUBLE_PRECISION or NTA_QUAD_PRECISION set at compile time, NuPIC can
# use 32 bits to represent floating point values. The global epsilon can
# then be set to smaller values via the variable nta::Epsilon in nta/math/math.hpp
print '\nGlobal epsilon :', getGlobalEpsilon()
# 3. Creation of sparse matrices:
# ==============================
# There are several convenient ways to create sparse matrices.
# You can create a SparseMatrix by passing it a 2D array:
s = SparseMatrix([[1,2],[3,4]], dtype='Float32')
print '\nFrom array 32\n', s
# ... or by passing it a numpy.array:
s = SparseMatrix(numpy.array([[1,2],[3,4]]),dtype='Float32')
print '\nFrom numpy array 32\n', s
# ... or by using one of the shortcuts: SM32, SM32:
s = SM32([[1,2],[3,4]])
print '\nWith shortcut 32\n', s
# It is also possible to create an empty SparseMatrix, or a copy of another
# SparseMatrix, or a SparseMatrix from a string in CSR format:
s_empty = SM32()
print '\nEmpty sparse matrix\n', s_empty
s_string = SM32('sm_csr_1.5 26 2 2 4 2 0 1 1 2 2 0 3 1 4')
print '\nSparse matrix from string\n', s_string
# A sparse matrix can be converted to a dense one via toDense:
a = numpy.array(s_string.toDense())
print '\ntoDense\n', a
# To set a sparse matrix from a dense one, one can use fromDense:
s = SM32()
s.fromDense(numpy.random.random((4,4)))
print '\nfromDense\n', s
# A sparse matrix can be pickled:
cPickle.dump(s, open('sm.txt', 'wb'))
s2 = cPickle.load(open('sm.txt', 'rb'))
print '\nPickling\n', s2
# 4. Simple queries:
# =================
# You can print a SparseMatrix, and query it for its number of rows, columns,
# non-zeros per row or column... There are many query methods available.
# All row operations are mirrored by the equivalent column operations
# Most operations are available either for a given row, or a given col, or
# all rows or all cols simultaneously. All col operations can be pretty efficient,
# even if the internal storage is CSR.
s = SM32(numpy.random.random((4,4)))
s.threshold(.5)
print '\nPrint\n', s
print '\nNumber of rows ', s.nRows()
print 'Number of columns ', s.nCols()
print 'Is matrix zero? ', s.isZero()
print 'Total number of non zeros ', s.nNonZeros()
print 'Sum of all values ', s.sum()
print 'Prod of non-zeros ', s.prod()
print 'Maximum value and its location ', s.max()
print 'Minimum value and its location ', s.min()
print 'Number of non-zeros on row 0 ', s.nNonZerosOnRow(0)
print 'If first row zero? ', s.isRowZero(0)
print 'Number of non-zeros on each row ', s.nNonZerosPerRow()
print 'Minimum on row 0 ', s.rowMin(0)
print 'Minimum values and locations for all rows', s.rowMin()
print 'Maximum on row 0 ', s.rowMax(0)
print 'Maximum values and locations for all rows', s.rowMax()
print 'Sum of values on row 0 ', s.rowSum(0)
print 'Sum of each row ', s.rowSums()
print 'Product of non-zeros on row 1', s.rowProd(1)
print 'Product of each row ', s.rowProds()
print 'Number of non-zeros on col 0 ', s.nNonZerosOnCol(0)
print 'If first col zero? ', s.isColZero(0)
print 'Number of non-zeros on each col ', s.nNonZerosPerCol()
print 'Minimum on col 0 ', s.colMin(0)
print 'Minimum values and locations for all cols', s.colMin()
print 'Maximum on col 0 ', s.colMax(0)
print 'Maximum values and locations for all cols', s.colMax()
print 'Sum of values on col 0 ', s.colSum(0)
print 'Sum of each col ', s.colSums()
print 'Product of non-zeros on col 1', s.colProd(1)
print 'Product of each col ', s.colProds()
# 5. Element access and slicing:
# =============================
# It is very easy to access individual elements:
print '\n', s
print '\ns[0,0] = ', s[0,0], 's[1,1] = ', s[1,1]
s[0,0] = 3.5
print 'Set [0,0] to 3.5 ', s[0,0]
# There are powerful slicing operations:
print '\ngetOuter\n', s.getOuter([0,2],[0,2])
s.setOuter([0,2],[0,2],[[1,2],[3,4]])
print '\nsetOuter\n', s
s.setElements([0,1,2],[0,1,2],[1,1,1])
print '\nsetElements\n', s
print '\ngetElements\n', s.getElements([0,1,2],[0,1,2])
s2 = s.getSlice(0,2,0,3)
print '\ngetSlice\n', s2
s.setSlice(1,1, s2)
print '\nsetSlice\n', s
# A whole row or col can be set to zero with one call:
s.setRowToZero(1)
print '\nsetRowToZero\n', s
s.setColToZero(1)
print '\nsetColToZero\n', s
# Individual rows and cols can be retrieved as sparse or dense vectors:
print '\nrowNonZeros ', s.rowNonZeros(0)
print 'colNonZeros ', s.colNonZeros(0)
print 'getRow ', s.getRow(0)
print 'getCol ', s.getCol(0)
# 6. Dynamic features:
# ===================
# SparseMatrix is very dynamic. Rows and columns can be added and deleted.
# A sparse matrix can also be resized and reshaped.
print '\n', s
s.reshape(2,8)
print '\nreshape 2 8\n', s
s.reshape(8,2)
print '\nreshape 8 2\n', s
s.reshape(1,16)
print '\nreshape 1 16\n', s
s.reshape(4,4)
print '\nreshape 4 4\n', s
s.resize(5,5)
print '\nresize 5 5\n', s
s.resize(3,3)
print '\nresize 3 3\n', s
s.resize(4,4)
print '\nresize 4 4\n', s
s.deleteRows([3])
print '\ndelete row 3\n', s
s.deleteCols([1])
print '\ndelete col 1\n', s
s.addRow([1,2,3])
print '\nadd row 1 2 3\n', s
s.addCol([1,2,3,4])
print '\nadd col 1 2 3 4\n', s
s.deleteRows([0,3])
print '\ndelete rows 0 and 3\n', s
s.deleteCols([1,2])
print '\ndelete cols 1 and 2\n', s
# It is also possible to threshold a row, column or whole sparse matrix.
# This operation usually introduces zeros.
s.normalize()
print '\n', s
s.thresholdRow(0, .1)
print '\nthreshold row 0 .1\n', s
s.thresholdCol(1, .1)
print '\nthreshold col 1 .1\n', s
s.threshold(.1)
print '\nthreshold .1\n', s
# 7. Element wise operations:
# ==========================
# Element wise operations are prefixed with 'element'. There are row-oriented
# column-oriented and whole matrix element-wise operations.
s = SM32(numpy.random.random((4,4)))
print '\n', s
s.elementNZInverse()
print '\nelementNZInverse\n', s
s.elementNZLog()
print '\nelementNZLog\n', s
s = abs(s)
print '\nabs\n', s
s.elementSqrt()
print '\nelementSqrt\n', s
s.add(4)
print '\nadd 4\n', s
s.normalizeRow(1, 10)
print '\nnormalizeRow 1 10\n', s
print 'sum row 1 = ', s.rowSum(1)
s.normalizeCol(0, 3)
print '\nnormalizeCol 0 3\n', s
print 'sum col 0 = ', s.colSum(0)
s.normalize(5)
print '\nnormalize to 5\n', s
print 'sum = ', s.sum()
s.normalize()
print '\nnormalize\n', s
print 'sum = ', s.sum()
s.transpose()
print '\ntranspose\n', s
s2 = SM32(numpy.random.random((3,4)))
print '\n', s2
s2.transpose()
print '\ntranspose rectangular\n', s2
s2.transpose()
print '\ntranspose rectangular again\n', s2
# 8. Matrix vector and matrix matrix operations:
# =============================================
# SparseMatrix provides matrix vector multiplication on the right and left,
# as well as specialized operations between the a vector and the rows
# of the SparseMatrix.
x = numpy.array([1,2,3,4])
print '\nx = ', x
print 'Product on the right:\n', s.rightVecProd(x)
print 'Product on the left:\n', s.leftVecProd(x)
print 'Product of x elements corresponding to nz on each row:\n', s.rightVecProdAtNZ(x)
print 'Product of x elements and nz:\n', s.rowVecProd(x)
print 'Max of x elements corresponding to nz:\n', s.vecMaxAtNZ(x)
print 'Max of products of x elements and nz:\n', s.vecMaxProd(x)
print 'Max of elements of x corresponding to nz:\n', s.vecMaxAtNZ(x)
# axby computes linear combinations of rows and vectors
s.axby(0, 1.5, 1.5, x)
print '\naxby 0 1.5 1.5\n', s
s.axby(1.5, 1.5, x)
print '\naxby 1.5 1.5\n', s
# The multiplication operator can be used both for inner and outer product,
# depending on the shape of its operands, when using SparseMatrix instances:
s_row = SM32([[1,2,3,4]])
s_col = SM32([[1],[2],[3],[4]])
print '\nInner product: ', s_row * s_col
print '\nOuter product:\n', s_col * s_row
# SparseMatrix supports matrix matrix multiplication:
s1 = SM32(numpy.random.random((4,4)))
s2 = SM32(numpy.random.random((4,4)))
print '\nmatrix matrix multiplication\n', s1 * s2
# The block matrix vector multiplication treats the matrix as if it were
# a collection of narrower matrices. The following multiplies a1 by x and then a2 by x,
# where a1 is the sub-matrix of size (4,2) obtained by considering
# only the first two columns of a, and a2 the sub-matrix obtained by considering only
# the last two columns of x.
a = SM32([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
x = [1,2,3,4]
print a.blockRightVecProd(2, x)
# To do an element multiplication of two matrices, do:
print a
b = SM32(numpy.random.randint(0,2,(4,4)))
print b
a.elementNZMultiply(b)
print a
# In general, the "element..." operations implement element by element operations.
# 9. Arithmetic operators:
# =======================
# It is possible to use all 4 arithmetic operators, with scalars or matrices:
print '\ns + 3\n', s + 3
print '\n3 + s\n', 3 + s
print '\ns - 1\n', s - 1
print '\n1 - s\n', 1 - s
print '\ns + s\n', s + s
print '\ns * 3\n', s * 3
print '\n3 * s\n', 3 * s
print '\ns * s\n', s * s
print '\ns / 3.1\n', s / 3.1
# ... and to write arbitrarily linear combinations of sparse matrices:
print '\ns1 + 2 * s - s2 / 3.1\n', s1 + 2 * s - s2 / 3.1
# In place operators are supported:
s += 3.5
print '\n+= 3.5\n', s
s -= 3.2
print '\n-= 3.2\n', s
s *= 3.1
print '\n*= 3.1\n', s
s /= -1.5
print '\n/= -1.5\n', s
# 10. Count/find:
# ==============
# Use countWhereEqual and whereEqual to count or find the elements that have
# a specific value. The first four parameters define a box in which to look:
# [begin_row, end_row) X [begin_col, end _col). The indices returned by whereEqual
# are relative to the orignal matrix. countWhereEqual is faster than using len()
# on the list returned by whereEqual.
s = SM32(numpy.random.randint(0,3,(5,5)))
print '\nThe matrix is now:\n', s
print '\nNumber of elements equal to 0=', s.countWhereEqual(0,5,0,5,0)
print 'Number of elements equal to 1=', s.countWhereEqual(0,5,0,5,1)
print 'Number of elements equal to 2=', s.countWhereEqual(0,5,0,5,2)
print '\nIndices of the elements == 0:', s.whereEqual(0,5,0,5,0)
print '\nIndices of the elements == 1:', s.whereEqual(0,5,0,5,1)
print '\nIndices of the elements == 2:', s.whereEqual(0,5,0,5,2)
# ... and there is even more:
print '\nAll ' + str(len(dir(s))) + ' methods:\n', dir(s)
| gpl-3.0 |
joachimmetz/dfvfs | dfvfs/credentials/keychain.py | 2 | 2707 | # -*- coding: utf-8 -*-
"""The path specification key chain.
The key chain is used to manage credentials for path specifications.
E.g. BitLocker Drive Encryption (BDE) encrypted volumes can require a
credential (e.g. password) to access the unencrypted data (unlock).
"""
from dfvfs.credentials import manager
class KeyChain(object):
"""Key chain."""
def __init__(self):
"""Initializes a key chain."""
super(KeyChain, self).__init__()
self._credentials_per_path_spec = {}
def Empty(self):
"""Empties the key chain."""
self._credentials_per_path_spec = {}
def ExtractCredentialsFromPathSpec(self, path_spec):
"""Extracts credentials from a path specification.
Args:
path_spec (PathSpec): path specification to extract credentials from.
"""
credentials = manager.CredentialsManager.GetCredentials(path_spec)
for identifier in credentials.CREDENTIALS:
value = getattr(path_spec, identifier, None)
if value is None:
continue
self.SetCredential(path_spec, identifier, value)
def GetCredential(self, path_spec, identifier):
"""Retrieves a specific credential from the key chain.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
Returns:
object: credential or None if the credential for the path specification
is not set.
"""
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
return credentials.get(identifier, None)
def GetCredentials(self, path_spec):
"""Retrieves all credentials for the path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
dict[str,object]: credentials for the path specification.
"""
return self._credentials_per_path_spec.get(path_spec.comparable, {})
def SetCredential(self, path_spec, identifier, data):
"""Sets a specific credential for the path specification.
Args:
path_spec (PathSpec): path specification.
identifier (str): credential identifier.
data (object): credential data.
Raises:
KeyError: if the credential is not supported by the path specification
type.
"""
supported_credentials = manager.CredentialsManager.GetCredentials(path_spec)
if identifier not in supported_credentials.CREDENTIALS:
raise KeyError((
'Unsuppored credential: {0:s} for path specification type: '
'{1:s}').format(identifier, path_spec.type_indicator))
credentials = self._credentials_per_path_spec.get(path_spec.comparable, {})
credentials[identifier] = data
self._credentials_per_path_spec[path_spec.comparable] = credentials
| apache-2.0 |
xbianonpi/Sick-Beard-TPB | lib/unidecode/x099.py | 252 | 4629 | data = (
'Hai ', # 0x00
'Ren ', # 0x01
'Tian ', # 0x02
'Jiao ', # 0x03
'Jia ', # 0x04
'Bing ', # 0x05
'Yao ', # 0x06
'Tong ', # 0x07
'Ci ', # 0x08
'Xiang ', # 0x09
'Yang ', # 0x0a
'Yang ', # 0x0b
'Er ', # 0x0c
'Yan ', # 0x0d
'Le ', # 0x0e
'Yi ', # 0x0f
'Can ', # 0x10
'Bo ', # 0x11
'Nei ', # 0x12
'E ', # 0x13
'Bu ', # 0x14
'Jun ', # 0x15
'Dou ', # 0x16
'Su ', # 0x17
'Yu ', # 0x18
'Shi ', # 0x19
'Yao ', # 0x1a
'Hun ', # 0x1b
'Guo ', # 0x1c
'Shi ', # 0x1d
'Jian ', # 0x1e
'Zhui ', # 0x1f
'Bing ', # 0x20
'Xian ', # 0x21
'Bu ', # 0x22
'Ye ', # 0x23
'Tan ', # 0x24
'Fei ', # 0x25
'Zhang ', # 0x26
'Wei ', # 0x27
'Guan ', # 0x28
'E ', # 0x29
'Nuan ', # 0x2a
'Hun ', # 0x2b
'Hu ', # 0x2c
'Huang ', # 0x2d
'Tie ', # 0x2e
'Hui ', # 0x2f
'Jian ', # 0x30
'Hou ', # 0x31
'He ', # 0x32
'Xing ', # 0x33
'Fen ', # 0x34
'Wei ', # 0x35
'Gu ', # 0x36
'Cha ', # 0x37
'Song ', # 0x38
'Tang ', # 0x39
'Bo ', # 0x3a
'Gao ', # 0x3b
'Xi ', # 0x3c
'Kui ', # 0x3d
'Liu ', # 0x3e
'Sou ', # 0x3f
'Tao ', # 0x40
'Ye ', # 0x41
'Yun ', # 0x42
'Mo ', # 0x43
'Tang ', # 0x44
'Man ', # 0x45
'Bi ', # 0x46
'Yu ', # 0x47
'Xiu ', # 0x48
'Jin ', # 0x49
'San ', # 0x4a
'Kui ', # 0x4b
'Zhuan ', # 0x4c
'Shan ', # 0x4d
'Chi ', # 0x4e
'Dan ', # 0x4f
'Yi ', # 0x50
'Ji ', # 0x51
'Rao ', # 0x52
'Cheng ', # 0x53
'Yong ', # 0x54
'Tao ', # 0x55
'Hui ', # 0x56
'Xiang ', # 0x57
'Zhan ', # 0x58
'Fen ', # 0x59
'Hai ', # 0x5a
'Meng ', # 0x5b
'Yan ', # 0x5c
'Mo ', # 0x5d
'Chan ', # 0x5e
'Xiang ', # 0x5f
'Luo ', # 0x60
'Zuan ', # 0x61
'Nang ', # 0x62
'Shi ', # 0x63
'Ding ', # 0x64
'Ji ', # 0x65
'Tuo ', # 0x66
'Xing ', # 0x67
'Tun ', # 0x68
'Xi ', # 0x69
'Ren ', # 0x6a
'Yu ', # 0x6b
'Chi ', # 0x6c
'Fan ', # 0x6d
'Yin ', # 0x6e
'Jian ', # 0x6f
'Shi ', # 0x70
'Bao ', # 0x71
'Si ', # 0x72
'Duo ', # 0x73
'Yi ', # 0x74
'Er ', # 0x75
'Rao ', # 0x76
'Xiang ', # 0x77
'Jia ', # 0x78
'Le ', # 0x79
'Jiao ', # 0x7a
'Yi ', # 0x7b
'Bing ', # 0x7c
'Bo ', # 0x7d
'Dou ', # 0x7e
'E ', # 0x7f
'Yu ', # 0x80
'Nei ', # 0x81
'Jun ', # 0x82
'Guo ', # 0x83
'Hun ', # 0x84
'Xian ', # 0x85
'Guan ', # 0x86
'Cha ', # 0x87
'Kui ', # 0x88
'Gu ', # 0x89
'Sou ', # 0x8a
'Chan ', # 0x8b
'Ye ', # 0x8c
'Mo ', # 0x8d
'Bo ', # 0x8e
'Liu ', # 0x8f
'Xiu ', # 0x90
'Jin ', # 0x91
'Man ', # 0x92
'San ', # 0x93
'Zhuan ', # 0x94
'Nang ', # 0x95
'Shou ', # 0x96
'Kui ', # 0x97
'Guo ', # 0x98
'Xiang ', # 0x99
'Fen ', # 0x9a
'Ba ', # 0x9b
'Ni ', # 0x9c
'Bi ', # 0x9d
'Bo ', # 0x9e
'Tu ', # 0x9f
'Han ', # 0xa0
'Fei ', # 0xa1
'Jian ', # 0xa2
'An ', # 0xa3
'Ai ', # 0xa4
'Fu ', # 0xa5
'Xian ', # 0xa6
'Wen ', # 0xa7
'Xin ', # 0xa8
'Fen ', # 0xa9
'Bin ', # 0xaa
'Xing ', # 0xab
'Ma ', # 0xac
'Yu ', # 0xad
'Feng ', # 0xae
'Han ', # 0xaf
'Di ', # 0xb0
'Tuo ', # 0xb1
'Tuo ', # 0xb2
'Chi ', # 0xb3
'Xun ', # 0xb4
'Zhu ', # 0xb5
'Zhi ', # 0xb6
'Pei ', # 0xb7
'Xin ', # 0xb8
'Ri ', # 0xb9
'Sa ', # 0xba
'Yin ', # 0xbb
'Wen ', # 0xbc
'Zhi ', # 0xbd
'Dan ', # 0xbe
'Lu ', # 0xbf
'You ', # 0xc0
'Bo ', # 0xc1
'Bao ', # 0xc2
'Kuai ', # 0xc3
'Tuo ', # 0xc4
'Yi ', # 0xc5
'Qu ', # 0xc6
'[?] ', # 0xc7
'Qu ', # 0xc8
'Jiong ', # 0xc9
'Bo ', # 0xca
'Zhao ', # 0xcb
'Yuan ', # 0xcc
'Peng ', # 0xcd
'Zhou ', # 0xce
'Ju ', # 0xcf
'Zhu ', # 0xd0
'Nu ', # 0xd1
'Ju ', # 0xd2
'Pi ', # 0xd3
'Zang ', # 0xd4
'Jia ', # 0xd5
'Ling ', # 0xd6
'Zhen ', # 0xd7
'Tai ', # 0xd8
'Fu ', # 0xd9
'Yang ', # 0xda
'Shi ', # 0xdb
'Bi ', # 0xdc
'Tuo ', # 0xdd
'Tuo ', # 0xde
'Si ', # 0xdf
'Liu ', # 0xe0
'Ma ', # 0xe1
'Pian ', # 0xe2
'Tao ', # 0xe3
'Zhi ', # 0xe4
'Rong ', # 0xe5
'Teng ', # 0xe6
'Dong ', # 0xe7
'Xun ', # 0xe8
'Quan ', # 0xe9
'Shen ', # 0xea
'Jiong ', # 0xeb
'Er ', # 0xec
'Hai ', # 0xed
'Bo ', # 0xee
'Zhu ', # 0xef
'Yin ', # 0xf0
'Luo ', # 0xf1
'Shuu ', # 0xf2
'Dan ', # 0xf3
'Xie ', # 0xf4
'Liu ', # 0xf5
'Ju ', # 0xf6
'Song ', # 0xf7
'Qin ', # 0xf8
'Mang ', # 0xf9
'Liang ', # 0xfa
'Han ', # 0xfb
'Tu ', # 0xfc
'Xuan ', # 0xfd
'Tui ', # 0xfe
'Jun ', # 0xff
)
| gpl-3.0 |
ITA-ftuyama/TG | pgu/hexvid.py | 3 | 4158 | """Hexagonal tile engine.
Note -- this engine is not finished. Sprites are not supported. It
can still be useful for using the level editor, and for rendering hex
terrains, however. If you are able to update it and use it in a real game,
help would be greatly appreciated!
Please note that this file is alpha, and is subject to modification in
future versions of pgu!
"""
print('pgu.hexvid - This module is alpha, and is subject to change.')
from pgu.vid import *
import pygame
class Hexvid(Vid):
"""Create an hex vid engine. See [[vid]]"""
def update(self,screen):
return self.paint(screen)
def paint(self,screen):
sw,sh = screen.get_width(),screen.get_height()
self.view.w,self.view.h = sw,sh
tlayer = self.tlayer
blayer = self.blayer
#zlayer = self.zlayer
w,h = len(tlayer[0]),len(tlayer)
#iso_w,iso_h,iso_z,tile_w,tile_h,base_w,base_h = self.iso_w,self.iso_h,self.iso_z,self.tile_w,self.tile_h,self.base_w,self.base_h
tile_w,tile_h = self.tile_w,self.tile_h
tile_w2,tile_h2 = tile_w/2,tile_h/2
view = self.view
adj = self.adj = pygame.Rect(-self.view.x,-self.view.y,0,0)
w,h = len(tlayer[0]),len(tlayer)
tiles = self.tiles
#""
if self.bounds == None:
tmp,y1 = self.tile_to_view((0,0))
x1,tmp = self.tile_to_view((0,h+1))
tmp,y2 = self.tile_to_view((w+1,h+1))
x2,tmp = self.tile_to_view((w+1,0))
self.bounds = pygame.Rect(x1,y1,x2-x1,y2-y1)
print self.bounds
#""
if self.bounds != None: self.view.clamp_ip(self.bounds)
ox,oy = self.screen_to_tile((0,0))
sx,sy = self.tile_to_view((ox,oy))
dx,dy = sx - self.view.x,sy - self.view.y
bot = 1
tile_wi = tile_w + tile_w/2
tile_wi2 = tile_wi/2
#dx += tile_w/2
for i2 in xrange(-bot,self.view.h//tile_h2+bot*3): #NOTE: 3 seems a bit much, but it works.
tx,ty = ox + i2/2 + i2%2,oy + i2/2
x,y = (i2%2)*tile_wi2 + dx,i2*tile_h2 + dy
#to adjust for the -1 in i1
x,tx,ty = x-tile_wi,tx-1,ty+1
x -= tile_w/2
for i1 in xrange(-1,self.view.w//tile_wi+1):
if ty >= 0 and ty < h and tx >= 0 and tx < w:
if blayer != None:
n = blayer[ty][tx]
if n != 0:
t = tiles[n]
if t != None and t.image != None:
screen.blit(t.image,(x,y))
n = tlayer[ty][tx]
if n != 0:
t = tiles[n]
if t != None and t.image != None:
screen.blit(t.image,(x,y))
tx += 1
ty -= 1
x += tile_wi
return [pygame.Rect(0,0,screen.get_width(),screen.get_height())]
def view_to_tile(self,pos):
x,y = pos
#x = x + (self.tile_w*1/2)
x,y = int(x*4/(self.tile_w*3)), y*2/self.tile_h
nx = (x + y) / 2
ny = (y - x) / 2
return nx,ny
def tile_to_view(self,pos):
x,y = pos
nx = x - y
ny = x + y
nx,ny = int(nx*(self.tile_w*3)/4), ny*self.tile_h/2
#nx = nx - (self.tile_w*1/2)
return nx,ny
def screen_to_tile(self,pos): #NOTE HACK : not sure if the 3/8 is right or not, but it is pretty close...
pos = pos[0]+self.view.x + self.tile_w*3/8,pos[1]+self.view.y
pos = self.view_to_tile(pos)
return pos
def tile_to_screen(self,pos):
pos = self.tile_to_view(pos)
pos = pos[0]-self.view.x,pos[1]-self.view.y
return pos
def tga_load_tiles(self,fname,size,tdata={}):
Vid.tga_load_tiles(self,fname,size,tdata)
self.tile_w,self.tile_h = size
| mit |
ashleyrback/echidna | echidna/output/plot_chi_squared.py | 4 | 19917 | """ *** DEPRECIATED ***
Contains functions to view and interrogate chi-squared minimisation
Attributes:
MAIN_FONT (dict): style properties for the main font to use in plot labels
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.colorbar import make_axes_gridspec
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import FixedLocator, ScalarFormatter
import numpy
import echidna.calc.decay as decay
MAIN_FONT = {"size": 22}
BOLD_FONT = {"size": 22, "weight": "bold"}
def chi_squared_vs_signal(signal_config, converter=None, fig_num=1,
n_atoms=None, penalty=None, effective_mass=False,
half_life=False, save_as=None, show=False, **kwargs):
""" Plot the chi squared as a function of signal counts
Args:
signal_config (:class:`echidna.limit.limit_config.LimitConfig`): Signal
config class, where chi squareds have been stored.
converter (:class:`echidna.calc.decay.DBIsotope`, optional): Converter
used to convert between counts and half-life/effective mass.
fig_num (int): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
n_atoms (float): Number of atoms for converter to use in
calculations of half life or effective mass.
penalty (:class:`echidna.limit.limit_config.LimitConfig`, optional):
config for signal with penalty term.
effective_mass (bool, optional): if True, plot the x-axis as the
signal contribution effective mass.
half_life (bool, optional): if True, plot the x-axis as the signal
contribution half life.
save_as (string, optional): Name of plot to save. All plots are
saved in .png format.
show (bool, optional): Display the plot to screen. Default is False.
\**kwargs: Keyword arguments to pass to converter methods.
Raises:
TypeError: If 'half_life' or 'effective_mass' keyword arguments
are used without :class:`echidna.calc.decay.DBIsotope` object
to use as converter.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
if (converter is None and half_life or effective_mass):
raise TypeError("converter is None. Cannot use 'half_life' or "
"'effective_mass' keywords without converter")
# Fig. 1 (axes generated automatically)
fig = plt.figure(fig_num, figsize=(10, 10))
# X axis values
if effective_mass:
x = numpy.zeros(shape=(signal_config.get_chi_squareds()[2].shape))
for i_bin, count in enumerate(signal_config.get_chi_squareds()[2]):
effective_mass = converter.counts_to_eff_mass(count, **kwargs)
x[i_bin] = effective_mass
plt.xlabel(r"$m_{\beta\beta}$", **BOLD_FONT)
elif half_life:
x = numpy.zeros(shape=(signal_config.get_chi_squareds()[2].shape))
for i_bin, count in enumerate(signal_config.get_chi_squareds()[2]):
x.append(1./converter.counts_to_half_life(count, **kwargs))
plt.xlabel(r"$1/T_{1/2}^{0\nu}$", **BOLD_FONT)
else:
x = signal_config.get_chi_squareds()[2]
plt.xlabel("Signal counts", **BOLD_FONT)
# Y axis values
y_1 = signal_config.get_chi_squareds()[0]
plt.ylabel(r"$\chi^{2}$", **BOLD_FONT)
if penalty:
y_2 = penalty.get_chi_squareds()[0]
plt.plot(x, y_1, "bo-", label="no systematic uncertainties")
# lines and dots
plt.plot(x, y_2, "ro-", label="systematic uncertainties")
plt.legend(loc="upper left")
else:
plt.plot(x, y_1, "o-") # lines and dots
# Set the tick labels, via Axes instance
ax = fig.gca() # Get current Axes instance
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
# Set other properties here e.g. colour, rotation
label.set_fontsize(MAIN_FONT.get("size"))
if save_as:
plt.savefig(save_as + ".png", dpi=400)
if show:
plt.show()
return fig
def chi_squared_map(syst_analyser, fig_num=1, preferred_values=True,
minima=True, contours=False, save_as=None):
""" Plot chi squared surface for systematic vs. signal counts
Args:
syst_analyser (:class:`echidna.limit.limit_setting.SystAnalyser`): A
systematic analyser object, created during limit setting. Can be used
during limit setting setting or can load an instance from hdf5
fig_num (int): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
preferred_values (bool, optional): if False "preferred values" curve
is not overlayed on colour map. Default is True.
minima (bool, optional): if False "minima" are not overlayed on
colour map. Default is True.
contours (bool, optional): if True produces a contour plot of chi
squared surface. Default is False.
save_as (string, optional): Name of plot to save. All plots are
saved with in .png format.
Default is to produce a colour map, with "preferred values" curve
and "minima" overlayed.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
# Set x and y axes
x = syst_analyser.get_actual_counts()
y = syst_analyser.get_syst_values()
# Set chi squared map values
data = numpy.average(syst_analyser.get_chi_squareds(), axis=1)
data = numpy.transpose(data) # transpose it so that axes are correct
# Set preferred value values
y_2 = numpy.average(syst_analyser.get_preferred_values(), axis=1)
# Set minima values
x_3 = syst_analyser.get_minima()[0]
y_3 = syst_analyser.get_minima()[1]
# Create meshgrid
X, Y = numpy.meshgrid(x, y)
# Set sensible levels, pick the desired colormap and define normalization
color_map = plt.get_cmap('hot_r')
linear = numpy.linspace(numpy.sqrt(data.min()), numpy.sqrt(data.max()),
num=100)
locator = FixedLocator(linear**2)
levels = locator.tick_values(data.min(), data.max())
norm = BoundaryNorm(levels, ncolors=color_map.N)
if contours:
fig = plt.figure(fig_num, figsize=(16, 10)) # Fig. 2
fig.text(0.1, 0.9, syst_analyser._name, **BOLD_FONT)
ax = Axes3D(fig)
ax.view_init(elev=17.0, azim=-136.0) # set intial viewing position
# Plot surface
surf = ax.plot_surface(X, Y, data, rstride=1, cstride=1,
cmap=color_map, norm=norm, linewidth=0,
antialiased=False)
ax.zaxis.set_minor_locator(locator)
ax.ticklabel_format(style="scientific", scilimits=(3, 4))
# Set axis labels
ax.set_xlabel("\nSignal counts", **BOLD_FONT)
ax.set_ylabel("\nValue of systematic", **BOLD_FONT)
for label in (ax.get_xticklabels() +
ax.get_yticklabels() +
ax.get_zticklabels()):
label.set_fontsize(MAIN_FONT.get("size")) # tick label size
ax.dist = 11 # Ensures tick labels are not cut off
ax.margins(0.05, 0.05, 0.05) # Adjusts tick margins
# Draw colorbar
color_bar = fig.colorbar(surf, ax=ax, orientation="vertical",
fraction=0.2, shrink=0.5, aspect=10)
# kwargs here control axes that the colorbar is drawn in
color_bar.set_label(r"$\chi^2$", size=MAIN_FONT.get("size"))
color_bar.ax.tick_params(labelsize=MAIN_FONT.get("size"))
plt.show()
if save_as:
fig.savefig(save_as + "_contour.png", dpi=300)
else:
fig = plt.figure(fig_num, figsize=(12, 10)) # Fig. 2
fig.text(0.1, 0.95, syst_analyser._name, **BOLD_FONT)
ax = fig.add_subplot(1, 1, 1)
# Set labels
ax.set_xlabel("Signal counts", **BOLD_FONT)
ax.set_ylabel("Value of systematic", **BOLD_FONT)
# Plot color map
color_map = ax.pcolormesh(X, Y, data, cmap=color_map, norm=norm)
color_bar = fig.colorbar(color_map)
color_bar.set_label("$\chi^2$", size=MAIN_FONT.get("size"))
# tick label size
color_bar.ax.tick_params(labelsize=MAIN_FONT.get("size"))
# Set axes limits
ax.set_xlim([X.min(), X.max()])
ax.set_ylim([Y.min(), Y.max()])
if preferred_values:
ax.plot(x, y_2, "bo-", label="Preferred values")
if minima:
ax.plot(x_3, y_3, "ko", label="Minima")
# Set axes tick label size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(MAIN_FONT.get("size"))
ax.legend(loc="upper left")
if save_as:
fig.savefig(save_as + "_color_map.png", dpi=300)
return fig
def penalty_vs_systematic(syst_analyser, fig_num=1, save_as=None):
""" Plot penalty_value vs. systematic
Args:
syst_analyser (:class:`echidna.limit.limit_setting.SystAnalyser`): A
systematic analyser object, created during limit setting. Can be used
during limit setting setting or can load an instance from hdf5
fig_num (int, optional): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
save_as (string, optional): Name of plot to save. All plots are
saved with in .png format.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
fig = plt.figure(fig_num, figsize=(9, 7)) # Fig. 3
fig.text(0.1, 0.95, syst_analyser._name, **BOLD_FONT)
ax = fig.add_subplot(1, 1, 1)
x = syst_analyser._penalty_values[0]
y = syst_analyser._penalty_values[1]
plt.xlabel("Value of systematic", **BOLD_FONT)
plt.ylabel("Value of penalty term", **BOLD_FONT)
plt.plot(x, y, "bo")
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
# Set other properties here e.g. colour, rotation
label.set_fontsize(MAIN_FONT.get("size"))
if save_as:
plt.savefig(kwagrs.get("save_as") + ".png")
return fig
def turn_on(syst_analyser, signal_config, fig=1, save_as=None):
""" Plot deviation from chi-squared with no floated systematics.
When does the effect of floating the systematic "turn on"?
Args:
syst_analyser (:class:`echidna.limit.limit_setting.SystAnalyser`): A
systematic analyser object, created during limit setting. Can be used
during limit setting setting or can load an instance from hdf5.
signal_config (:class:`echidna.limit.limit_config.LimitConfig`): Signal
config class, where chi squareds have been stored.
fig_num (int): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
save_as (string, optional): Name of plot to save. All plots are
saved with in .png format.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
# Set x and y axes
x = syst_analyser.get_actual_counts()
y = syst_analyser.get_syst_values()
# Set chi squared map values
data = numpy.average(syst_analyser.get_chi_squareds(), axis=1)
data = numpy.transpose(data) # transpose it so that axes are correct
# Create meshgrid
X, Y = numpy.meshgrid(x, y)
# Define an array of \chi_0 values - chi squared without
# floating systematics
chi_squareds = signal_config.get_chi_squareds()[0]
data_np = numpy.zeros(data.shape) # zeroed array the same size as data
for y in range(len(data_np)):
for x, chi_squared in enumerate(chi_squareds):
data_np[y][x] = chi_squared
#if numpy.any((numpy.average(data_np, axis=0) != chi_squareds)):
# raise AssertionError("Incorrect chi squareds (no floating) array.")
# Make an array of the offsets
offsets = data - data_np
# Set sensible levels, pick the desired colormap and define normalization
color_map = plt.get_cmap('coolwarm')
positives = numpy.linspace(numpy.log10(offsets.max())*-1.,
numpy.log10(offsets.max()), num=50)
# linear array in log space
if offsets.min() < 0.:
negatives = numpy.linspace(offsets.min(), 0.0, num=51)
else:
negatives = numpy.zeros((51))
# Add the positive part to the negative part
full_scale = numpy.append(negatives, numpy.power(10, positives))
locator = FixedLocator(full_scale)
levels = locator.tick_values(offsets.min(), offsets.max())
norm = BoundaryNorm(levels, ncolors=color_map.N)
fig = plt.figure(fig, figsize=(12, 10)) # Fig. 4
fig.text(0.1, 0.95, syst_analyser._name, **BOLD_FONT)
ax = fig.add_subplot(1, 1, 1)
# Set labels
ax.set_xlabel("Signal counts", **BOLD_FONT)
ax.set_ylabel("Value of systematic", **BOLD_FONT)
# Plot color map
color_map = ax.pcolormesh(X, Y, offsets, cmap=color_map, norm=norm)
color_bar = fig.colorbar(color_map)
color_bar.set_label("$\chi^2 - \chi_0^2$", size=MAIN_FONT.get("size"))
# tick label size
color_bar.ax.tick_params(labelsize=MAIN_FONT.get("size"))
# Set axes limits
ax.set_xlim([X.min(), X.max()])
ax.set_ylim([Y.min(), Y.max()])
# Set axes tick label size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(MAIN_FONT.get("size"))
ax.legend(loc="upper left")
if save_as:
fig.savefig(save_as + "_turn_on.png", dpi=300)
return fig
def push_pull(syst_analyser, fig=1, save_as=None):
""" Plot penalty value - poisson likelihood chi squared.
When does minimising chi squared, which wants to "pull" the away
from the data/prior value dominate and when does the penalty term,
which wants to "pull" towards the data/prior, constraining the fit
dominate?
Args:
syst_analyser (:class:`echidna.limit.limit_setting.SystAnalyser`): A
systematic analyser object, created during limit setting. Can be used
during limit setting setting or can load an instance from hdf5
fig_num (int): Fig number. When creating multiple plots in the
same script, ensures matplotlib doesn't overwrite them.
save_as (string, optional): Name of plot to save. All plots are
saved with in .png format.
Returns:
matplotlib.pyplot.figure: Plotted figure.
"""
# Set x and y axes
x = syst_analyser.get_actual_counts()
y = syst_analyser.get_syst_values()
# Set chi squared map values
data = numpy.average(syst_analyser.get_chi_squareds(), axis=1)
data = numpy.transpose(data) # transpose it so that axes are correct
# Create meshgrid
X, Y = numpy.meshgrid(x, y)
# Define an array penalty values
penalty_values = syst_analyser._penalty_values[1, 0:len(y)]
# zeroed array the same size as data
penalty_array = numpy.zeros(data.shape)
for y, penalty_value in enumerate(penalty_values):
for x in range(len(penalty_array[y])):
penalty_array[y][x] = penalty_value
# Define the push pull array penalty term - chi_squared
# --> push_pull > 0 when penalty_value > chi_squared
# --> push_pull < 1 when penalty_value < chi_squared
push_pull = (2.*penalty_array) - data
# Set sensible levels, pick the desired colormap and define normalization
color_map = plt.get_cmap('coolwarm')
if push_pull.min() < 0.:
negatives = numpy.linspace(push_pull.min(), 0.,
num=50, endpoint=False)
else:
negatives = numpy.zeros((50))
if push_pull.max() > 0.:
positives = numpy.linspace(0., push_pull.max(), num=51)
else:
positives = numpy.zeros((51))
# Add the pull part to the push part
full_scale = numpy.append(negatives, positives)
locator = FixedLocator(full_scale)
levels = locator.tick_values(push_pull.min(), push_pull.max())
norm = BoundaryNorm(levels, ncolors=color_map.N)
fig = plt.figure(fig, figsize=(12, 10)) # Fig. 4
fig.text(0.1, 0.95, syst_analyser._name, **BOLD_FONT)
ax = fig.add_subplot(1, 1, 1)
# Set labels
ax.set_xlabel("Signal counts", **BOLD_FONT)
ax.set_ylabel("Value of systematic", **BOLD_FONT)
# Plot color map
color_map = ax.pcolormesh(X, Y, push_pull, cmap=color_map, norm=norm)
color_bar = fig.colorbar(color_map)
color_bar.set_label("$s-\chi^{2}_{\lambda,p}$", size=MAIN_FONT.get("size"))
# tick label size
color_bar.ax.tick_params(labelsize=MAIN_FONT.get("size"))
# Set axes limits
ax.set_xlim([X.min(), X.max()])
ax.set_ylim([Y.min(), Y.max()])
# Set axes tick label size
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(MAIN_FONT.get("size"))
ax.legend(loc="upper left")
if save_as:
fig.savefig(save_as + "_push_pull.png", dpi=300)
return fig
def main(args):
""" Script to produce chi squared plots for a given systematic.
.. note:: Produces
* Plot of chi squared vs. signal counts
* Plot of systematic vs. signal chi squared surface, either
contour plot or color map
* Plot of systematic value vs. penalty term value
Args:
args (dict): command line arguments from argparse.
"""
# Load signal config from hdf5
signal_config = LimitConfig(0, [0])
signal_config = store.load_ndarray(args.signal_config, signal_config)
if args.penalty_config is not None:
penalty_config = LimitConfig(0, [0])
penalty_config = store.load_ndarray(args.penalty_config,
penalty_config)
else:
penalty_config = None
# Loaf systematic analyser from hdf5
syst_analyser = SystAnalyser("", numpy.zeros((1)), numpy.zeros((1)))
syst_analyser = store.load_ndarray(args.syst_analyser, syst_analyser)
# Produce plots
# Currently not possible to produce chi squared vs signal plot with half
# life or effective mass on x-axis, from outside of limit setting code.
# Just produce with signal counts on x-axis here.
fig_1 = chi_squared_vs_signal(signal_config, fig_num=1,
penalty=penalty_config,
save_as=args.image_name)
fig_2 = penalty_vs_systematic(syst_analyser, 2)
fig_3 = turn_on(syst_analyser, signal_config, 3, save_as=args.image_name)
fig_4 = push_pull(syst_analyser, 4, save_as=args.image_name)
fig_5 = chi_squared_map(syst_analyser, 5,
contours=args.contours,
save_as=args.image_name)
plt.show()
raw_input("RETURN to exit")
if __name__ == "__main__":
import echidna.output.store as store
from echidna.limit.limit_config import LimitConfig
from echidna.limit.limit_setting import SystAnalyser
from echidna.scripts.zero_nu_limit import ReadableDir
import argparse
parser = argparse.ArgumentParser(description="Produce chi squared plots "
"for a systematic")
parser.add_argument("-s", "--signal_config", action=ReadableDir,
help="Supply location of signal config hdf5 file")
parser.add_argument("-p", "--penalty_config", action=ReadableDir,
help="Supply location of signal config with "
"penalty term")
parser.add_argument("-a", "--syst_analyser", action=ReadableDir,
help="Supply location of syst analyser hdf5 file")
parser.add_argument("-i", "--image_name", type=str, default="output",
help="Supply an image name")
parser.add_argument("-c", "--contours", action="store_true",
help="If true produces a contour plot, "
"defualt is colour map")
args = parser.parse_args()
main(args)
| mit |
Litetokens/liteblockd | lib/api.py | 1 | 82959 | import os
import json
import re
import time
import datetime
import base64
import decimal
import operator
import logging
import copy
import uuid
import urllib
import functools
from logging import handlers as logging_handlers
from gevent import wsgi
from geventhttpclient import HTTPClient
from geventhttpclient.url import URL
import flask
import jsonrpc
from jsonrpc import dispatcher
import pymongo
from bson import json_util
from bson.son import SON
from lib import config, siofeeds, util, blockchain, util_litecoin
from lib.components import betting, rps, assets, assets_trading, dex
PREFERENCES_MAX_LENGTH = 100000 #in bytes, as expressed in JSON
API_MAX_LOG_SIZE = 10 * 1024 * 1024 #max log size of 20 MB before rotation (make configurable later)
API_MAX_LOG_COUNT = 10
decimal.setcontext(decimal.Context(prec=8, rounding=decimal.ROUND_HALF_EVEN))
D = decimal.Decimal
def serve_api(mongo_db, redis_client):
# Preferneces are just JSON objects... since we don't force a specific form to the wallet on
# the server side, this makes it easier for 3rd party wallets (i.e. not Craftwallet) to fully be able to
# use liteblockd to not only pull useful data, but also load and store their own preferences, containing
# whatever data they need
DEFAULT_COUNTERPARTYD_API_CACHE_PERIOD = 60 #in seconds
app = flask.Flask(__name__)
tx_logger = logging.getLogger("transaction_log") #get transaction logger
@dispatcher.add_method
def is_ready():
"""this method used by the client to check if the server is alive, caught up, and ready to accept requests.
If the server is NOT caught up, a 525 error will be returned actually before hitting this point. Thus,
if we actually return data from this function, it should always be true. (may change this behaviour later)"""
blockchainInfo = blockchain.getinfo()
ip = flask.request.headers.get('X-Real-Ip', flask.request.remote_addr)
country = config.GEOIP.country_code_by_addr(ip)
return {
'caught_up': util.is_caught_up_well_enough_for_government_work(),
'last_message_index': config.LAST_MESSAGE_INDEX,
'block_height': blockchainInfo['info']['blocks'],
'testnet': config.TESTNET,
'ip': ip,
'country': country,
'quote_assets': config.QUOTE_ASSETS,
'quick_buy_enable': True if config.VENDING_MACHINE_PROVIDER is not None else False
}
@dispatcher.add_method
def get_reflected_host_info():
"""Allows the requesting host to get some info about itself, such as its IP. Used for troubleshooting."""
ip = flask.request.headers.get('X-Real-Ip', flask.request.remote_addr)
country = config.GEOIP.country_code_by_addr(ip)
return {
'ip': ip,
'cookie': flask.request.headers.get('Cookie', ''),
'country': country
}
@dispatcher.add_method
def get_messagefeed_messages_by_index(message_indexes):
messages = util.call_jsonrpc_api("get_messages_by_index", {'message_indexes': message_indexes}, abort_on_error=True)['result']
events = []
for m in messages:
events.append(util.decorate_message_for_feed(m))
return events
@dispatcher.add_method
def get_chain_block_height():
#DEPRECATED 1.5
data = blockchain.getinfo()
return data['info']['blocks']
@dispatcher.add_method
def get_chain_address_info(addresses, with_uxtos=True, with_last_txn_hashes=4, with_block_height=False):
if not isinstance(addresses, list):
raise Exception("addresses must be a list of addresses, even if it just contains one address")
results = []
if with_block_height:
block_height_response = blockchain.getinfo()
block_height = block_height_response['info']['blocks'] if block_height_response else None
for address in addresses:
info = blockchain.getaddressinfo(address)
txns = info['transactions']
del info['transactions']
result = {}
result['addr'] = address
result['info'] = info
if with_block_height: result['block_height'] = block_height
#^ yeah, hacky...it will be the same block height for each address (we do this to avoid an extra API call to get_block_height)
if with_uxtos:
result['uxtos'] = blockchain.listunspent(address)
if with_last_txn_hashes:
#with last_txns, only show CONFIRMED txns (so skip the first info['unconfirmedTxApperances'] # of txns, if not 0
result['last_txns'] = txns[info['unconfirmedTxApperances']:with_last_txn_hashes+info['unconfirmedTxApperances']]
results.append(result)
return results
@dispatcher.add_method
def get_chain_txns_status(txn_hashes):
if not isinstance(txn_hashes, list):
raise Exception("txn_hashes must be a list of txn hashes, even if it just contains one hash")
results = []
for tx_hash in txn_hashes:
tx_info = blockchain.gettransaction(tx_hash)
if tx_info:
assert tx_info['txid'] == tx_hash
results.append({
'tx_hash': tx_info['txid'],
'blockhash': tx_info.get('blockhash', None), #not provided if not confirmed on network
'confirmations': tx_info.get('confirmations', 0), #not provided if not confirmed on network
'blocktime': tx_info.get('time', None),
})
return results
@dispatcher.add_method
def get_normalized_balances(addresses):
"""
This call augments litetokensd's get_balances with a normalized_quantity field. It also will include any owned
assets for an address, even if their balance is zero.
NOTE: Does not retrieve LTC balance. Use get_address_info for that.
"""
if not isinstance(addresses, list):
raise Exception("addresses must be a list of addresses, even if it just contains one address")
if not len(addresses):
raise Exception("Invalid address list supplied")
filters = []
for address in addresses:
filters.append({'field': 'address', 'op': '==', 'value': address})
mappings = {}
result = util.call_jsonrpc_api("get_balances",
{'filters': filters, 'filterop': 'or'}, abort_on_error=True)['result']
isowner = {}
owned_assets = mongo_db.tracked_assets.find( { '$or': [{'owner': a } for a in addresses] }, { '_history': 0, '_id': 0 } )
for o in owned_assets:
isowner[o['owner'] + o['asset']] = o
data = []
for d in result:
if not d['quantity'] and ((d['address'] + d['asset']) not in isowner):
continue #don't include balances with a zero asset value
asset_info = mongo_db.tracked_assets.find_one({'asset': d['asset']})
d['normalized_quantity'] = util_litecoin.normalize_quantity(d['quantity'], asset_info['divisible'])
d['owner'] = (d['address'] + d['asset']) in isowner
mappings[d['address'] + d['asset']] = d
data.append(d)
#include any owned assets for each address, even if their balance is zero
for key in isowner:
if key not in mappings:
o = isowner[key]
data.append({
'address': o['owner'],
'asset': o['asset'],
'quantity': 0,
'normalized_quantity': 0,
'owner': True,
})
return data
@dispatcher.add_method
def get_escrowed_balances(addresses):
return assets.get_escrowed_balances(addresses)
def _get_address_history(address, start_block=None, end_block=None):
address_dict = {}
address_dict['balances'] = util.call_jsonrpc_api("get_balances",
{ 'filters': [{'field': 'address', 'op': '==', 'value': address},],
}, abort_on_error=True)['result']
address_dict['debits'] = util.call_jsonrpc_api("get_debits",
{ 'filters': [{'field': 'address', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['credits'] = util.call_jsonrpc_api("get_credits",
{ 'filters': [{'field': 'address', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['burns'] = util.call_jsonrpc_api("get_burns",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['sends'] = util.call_jsonrpc_api("get_sends",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address}, {'field': 'destination', 'op': '==', 'value': address}],
'filterop': 'or',
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
#^ with filterop == 'or', we get all sends where this address was the source OR destination
address_dict['orders'] = util.call_jsonrpc_api("get_orders",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['order_matches'] = util.call_jsonrpc_api("get_order_matches",
{ 'filters': [{'field': 'tx0_address', 'op': '==', 'value': address}, {'field': 'tx1_address', 'op': '==', 'value': address},],
'filterop': 'or',
'order_by': 'tx0_block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['ltcpays'] = util.call_jsonrpc_api("get_ltcpays",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address}, {'field': 'destination', 'op': '==', 'value': address}],
'filterop': 'or',
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['issuances'] = util.call_jsonrpc_api("get_issuances",
{ 'filters': [{'field': 'issuer', 'op': '==', 'value': address}, {'field': 'source', 'op': '==', 'value': address}],
'filterop': 'or',
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['broadcasts'] = util.call_jsonrpc_api("get_broadcasts",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['bets'] = util.call_jsonrpc_api("get_bets",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['bet_matches'] = util.call_jsonrpc_api("get_bet_matches",
{ 'filters': [{'field': 'tx0_address', 'op': '==', 'value': address}, {'field': 'tx1_address', 'op': '==', 'value': address},],
'filterop': 'or',
'order_by': 'tx0_block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['dividends'] = util.call_jsonrpc_api("get_dividends",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['cancels'] = util.call_jsonrpc_api("get_cancels",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['callbacks'] = util.call_jsonrpc_api("get_callbacks",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['bet_expirations'] = util.call_jsonrpc_api("get_bet_expirations",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['order_expirations'] = util.call_jsonrpc_api("get_order_expirations",
{ 'filters': [{'field': 'source', 'op': '==', 'value': address},],
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['bet_match_expirations'] = util.call_jsonrpc_api("get_bet_match_expirations",
{ 'filters': [{'field': 'tx0_address', 'op': '==', 'value': address}, {'field': 'tx1_address', 'op': '==', 'value': address},],
'filterop': 'or',
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
address_dict['order_match_expirations'] = util.call_jsonrpc_api("get_order_match_expirations",
{ 'filters': [{'field': 'tx0_address', 'op': '==', 'value': address}, {'field': 'tx1_address', 'op': '==', 'value': address},],
'filterop': 'or',
'order_by': 'block_index',
'order_dir': 'asc',
'start_block': start_block,
'end_block': end_block,
}, abort_on_error=True)['result']
return address_dict
@dispatcher.add_method
def get_last_n_messages(count=100):
if count > 1000:
raise Exception("The count is too damn high")
message_indexes = range(max(config.LAST_MESSAGE_INDEX - count, 0) + 1, config.LAST_MESSAGE_INDEX+1)
messages = util.call_jsonrpc_api("get_messages_by_index",
{ 'message_indexes': message_indexes }, abort_on_error=True)['result']
for i in xrange(len(messages)):
messages[i] = util.decorate_message_for_feed(messages[i])
return messages
@dispatcher.add_method
def get_raw_transactions(address, start_ts=None, end_ts=None, limit=500):
"""Gets raw transactions for a particular address
@param address: A single address string
@param start_ts: The starting date & time. Should be a unix epoch object. If passed as None, defaults to 60 days before the end_date
@param end_ts: The ending date & time. Should be a unix epoch object. If passed as None, defaults to the current date & time
@param limit: the maximum number of transactions to return; defaults to ten thousand
@return: Returns the data, ordered from newest txn to oldest. If any limit is applied, it will cut back from the oldest results
"""
def get_asset_cached(asset, asset_cache):
if asset in asset_cache:
return asset_cache[asset]
asset_data = mongo_db.tracked_assets.find_one({'asset': asset})
asset_cache[asset] = asset_data
return asset_data
asset_cache = {} #ghetto cache to speed asset lookups within the scope of a function call
now_ts = time.mktime(datetime.datetime.utcnow().timetuple())
if not end_ts: #default to current datetime
end_ts = now_ts
if not start_ts: #default to 60 days before the end date
start_ts = end_ts - (60 * 24 * 60 * 60)
start_block_index, end_block_index = util.get_block_indexes_for_dates(
start_dt=datetime.datetime.utcfromtimestamp(start_ts),
end_dt=datetime.datetime.utcfromtimestamp(end_ts) if now_ts != end_ts else None)
#make API call to litetokensd to get all of the data for the specified address
txns = []
d = _get_address_history(address, start_block=start_block_index, end_block=end_block_index)
#mash it all together
for category, entries in d.iteritems():
if category in ['balances',]:
continue
for e in entries:
e['_category'] = category
e = util.decorate_message(e, for_txn_history=True) #DRY
txns += entries
txns = util.multikeysort(txns, ['-_block_time', '-_tx_index'])
txns = txns[0:limit] #TODO: we can trunk before sorting. check if we can use the messages table and use sql order and limit
#^ won't be a perfect sort since we don't have tx_indexes for cancellations, but better than nothing
#txns.sort(key=operator.itemgetter('block_index'))
return txns
@dispatcher.add_method
def get_base_quote_asset(asset1, asset2):
"""Given two arbitrary assets, returns the base asset and the quote asset.
"""
#DEPRECATED 1.5
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
base_asset_info = mongo_db.tracked_assets.find_one({'asset': base_asset})
quote_asset_info = mongo_db.tracked_assets.find_one({'asset': quote_asset})
pair_name = "%s/%s" % (base_asset, quote_asset)
if not base_asset_info or not quote_asset_info:
raise Exception("Invalid asset(s)")
return {
'base_asset': base_asset,
'quote_asset': quote_asset,
'pair_name': pair_name
}
@dispatcher.add_method
def get_market_price_summary(asset1, asset2, with_last_trades=0):
#DEPRECATED 1.5
result = assets_trading.get_market_price_summary(asset1, asset2, with_last_trades)
return result if result is not None else False
#^ due to current bug in our jsonrpc stack, just return False if None is returned
@dispatcher.add_method
def get_market_cap_history(start_ts=None, end_ts=None):
now_ts = time.mktime(datetime.datetime.utcnow().timetuple())
if not end_ts: #default to current datetime
end_ts = now_ts
if not start_ts: #default to 30 days before the end date
start_ts = end_ts - (30 * 24 * 60 * 60)
data = {}
results = {}
#^ format is result[market_cap_as][asset] = [[block_time, market_cap], [block_time2, market_cap2], ...]
for market_cap_as in (config.XLT, config.LTC):
caps = mongo_db.asset_marketcap_history.aggregate([
{"$match": {
"market_cap_as": market_cap_as,
"block_time": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"asset": 1,
"market_cap": 1,
}},
{"$sort": {"block_time": pymongo.ASCENDING}},
{"$group": {
"_id": {"asset": "$asset", "year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"market_cap": {"$avg": "$market_cap"}, #use the average marketcap during the interval
}},
])
caps = [] if not caps['ok'] else caps['result']
data[market_cap_as] = {}
for e in caps:
interval_time = int(time.mktime(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day'], e['_id']['hour']).timetuple()) * 1000)
data[market_cap_as].setdefault(e['_id']['asset'], [])
data[market_cap_as][e['_id']['asset']].append([interval_time, e['market_cap']])
results[market_cap_as] = []
for asset in data[market_cap_as]:
#for z in data[market_cap_as][asset]: assert z[0] and z[0] > 0 and z[1] and z[1] >= 0
results[market_cap_as].append({'name': asset,
'data': sorted(data[market_cap_as][asset], key=operator.itemgetter(0))})
return results
@dispatcher.add_method
def get_market_info(assets):
assets_market_info = list(mongo_db.asset_market_info.find({'asset': {'$in': assets}}, {'_id': 0}))
extended_asset_info = mongo_db.asset_extended_info.find({'asset': {'$in': assets}})
extended_asset_info_dict = {}
for e in extended_asset_info:
if not e.get('disabled', False): #skip assets marked disabled
extended_asset_info_dict[e['asset']] = e
for a in assets_market_info:
if a['asset'] in extended_asset_info_dict and extended_asset_info_dict[a['asset']].get('processed', False):
extended_info = extended_asset_info_dict[a['asset']]
a['extended_image'] = bool(extended_info.get('image', ''))
a['extended_description'] = extended_info.get('description', '')
a['extended_website'] = extended_info.get('website', '')
a['extended_pgpsig'] = extended_info.get('pgpsig', '')
else:
a['extended_image'] = a['extended_description'] = a['extended_website'] = a['extended_pgpsig'] = ''
return assets_market_info
@dispatcher.add_method
def get_market_info_leaderboard(limit=100):
"""returns market leaderboard data for both the XLT and LTC markets"""
#do two queries because we limit by our sorted results, and we might miss an asset with a high LTC trading value
# but with little or no XLT trading activity, for instance if we just did one query
assets_market_info_xlt = list(mongo_db.asset_market_info.find({}, {'_id': 0}).sort('market_cap_in_{}'.format(config.XLT.lower()), pymongo.DESCENDING).limit(limit))
assets_market_info_ltc = list(mongo_db.asset_market_info.find({}, {'_id': 0}).sort('market_cap_in_{}'.format(config.LTC.lower()), pymongo.DESCENDING).limit(limit))
assets_market_info = {
config.XLT.lower(): [a for a in assets_market_info_xlt if a['price_in_{}'.format(config.XLT.lower())]],
config.LTC.lower(): [a for a in assets_market_info_ltc if a['price_in_{}'.format(config.LTC.lower())]]
}
#throw on extended info, if it exists for a given asset
assets = list(set([a['asset'] for a in assets_market_info[config.XLT.lower()]] + [a['asset'] for a in assets_market_info[config.LTC.lower()]]))
extended_asset_info = mongo_db.asset_extended_info.find({'asset': {'$in': assets}})
extended_asset_info_dict = {}
for e in extended_asset_info:
if not e.get('disabled', False): #skip assets marked disabled
extended_asset_info_dict[e['asset']] = e
for r in (assets_market_info[config.XLT.lower()], assets_market_info[config.LTC.lower()]):
for a in r:
if a['asset'] in extended_asset_info_dict:
extended_info = extended_asset_info_dict[a['asset']]
if 'extended_image' not in a or 'extended_description' not in a or 'extended_website' not in a:
continue #asset has been recognized as having a JSON file description, but has not been successfully processed yet
a['extended_image'] = bool(extended_info.get('image', ''))
a['extended_description'] = extended_info.get('description', '')
a['extended_website'] = extended_info.get('website', '')
else:
a['extended_image'] = a['extended_description'] = a['extended_website'] = ''
return assets_market_info
@dispatcher.add_method
def get_market_price_history(asset1, asset2, start_ts=None, end_ts=None, as_dict=False):
"""Return block-by-block aggregated market history data for the specified asset pair, within the specified date range.
@returns List of lists (or list of dicts, if as_dict is specified).
* If as_dict is False, each embedded list has 8 elements [block time (epoch in MS), open, high, low, close, volume, # trades in block, block index]
* If as_dict is True, each dict in the list has the keys: block_time (epoch in MS), block_index, open, high, low, close, vol, count
Aggregate on an an hourly basis
"""
now_ts = time.mktime(datetime.datetime.utcnow().timetuple())
if not end_ts: #default to current datetime
end_ts = now_ts
if not start_ts: #default to 180 days before the end date
start_ts = end_ts - (180 * 24 * 60 * 60)
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
#get ticks -- open, high, low, close, volume
result = mongo_db.trades.aggregate([
{"$match": {
"base_asset": base_asset,
"quote_asset": quote_asset,
"block_time": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"hour": {"$hour": "$block_time"},
"block_index": 1,
"unit_price": 1,
"base_quantity_normalized": 1 #to derive volume
}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "hour": "$hour"},
"open": {"$first": "$unit_price"},
"high": {"$max": "$unit_price"},
"low": {"$min": "$unit_price"},
"close": {"$last": "$unit_price"},
"vol": {"$sum": "$base_quantity_normalized"},
"count": {"$sum": 1},
}},
{"$sort": SON([("_id.year", pymongo.ASCENDING), ("_id.month", pymongo.ASCENDING), ("_id.day", pymongo.ASCENDING), ("_id.hour", pymongo.ASCENDING)])},
])
if not result['ok'] or not len(result['result']):
return False
result = result['result']
midline = [((r['high'] + r['low']) / 2.0) for r in result]
if as_dict:
for i in xrange(len(result)):
result[i]['interval_time'] = int(time.mktime(datetime.datetime(
result[i]['_id']['year'], result[i]['_id']['month'], result[i]['_id']['day'], result[i]['_id']['hour']).timetuple()) * 1000)
result[i]['midline'] = midline[i]
del result[i]['_id']
return result
else:
list_result = []
for i in xrange(len(result)):
list_result.append([
int(time.mktime(datetime.datetime(
result[i]['_id']['year'], result[i]['_id']['month'], result[i]['_id']['day'], result[i]['_id']['hour']).timetuple()) * 1000),
result[i]['open'], result[i]['high'], result[i]['low'], result[i]['close'], result[i]['vol'],
result[i]['count'], midline[i]
])
return list_result
@dispatcher.add_method
def get_trade_history(asset1=None, asset2=None, start_ts=None, end_ts=None, limit=50):
"""
Gets last N of trades within a specific date range (normally, for a specified asset pair, but this can
be left blank to get any/all trades).
"""
assert (asset1 and asset2) or (not asset1 and not asset2) #cannot have one asset, but not the other
if limit > 500:
raise Exception("Requesting history of too many trades")
now_ts = time.mktime(datetime.datetime.utcnow().timetuple())
if not end_ts: #default to current datetime
end_ts = now_ts
if not start_ts: #default to 30 days before the end date
start_ts = end_ts - (30 * 24 * 60 * 60)
filters = {
"block_time": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}
if asset1 and asset2:
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
filters["base_asset"] = base_asset
filters["quote_asset"] = quote_asset
last_trades = mongo_db.trades.find(filters, {'_id': 0}).sort("block_time", pymongo.DESCENDING).limit(limit)
if not last_trades.count():
return False #no suitable trade data to form a market price
last_trades = list(last_trades)
return last_trades
def _get_order_book(base_asset, quote_asset,
bid_book_min_pct_fee_provided=None, bid_book_min_pct_fee_required=None, bid_book_max_pct_fee_required=None,
ask_book_min_pct_fee_provided=None, ask_book_min_pct_fee_required=None, ask_book_max_pct_fee_required=None):
"""Gets the current order book for a specified asset pair
@param: normalized_fee_required: Only specify if buying LTC. If specified, the order book will be pruned down to only
show orders at and above this fee_required
@param: normalized_fee_provided: Only specify if selling LTC. If specified, the order book will be pruned down to only
show orders at and above this fee_provided
"""
base_asset_info = mongo_db.tracked_assets.find_one({'asset': base_asset})
quote_asset_info = mongo_db.tracked_assets.find_one({'asset': quote_asset})
if not base_asset_info or not quote_asset_info:
raise Exception("Invalid asset(s)")
#TODO: limit # results to 8 or so for each book (we have to sort as well to limit)
base_bid_filters = [
{"field": "get_asset", "op": "==", "value": base_asset},
{"field": "give_asset", "op": "==", "value": quote_asset},
]
base_ask_filters = [
{"field": "get_asset", "op": "==", "value": quote_asset},
{"field": "give_asset", "op": "==", "value": base_asset},
]
if base_asset == config.LTC or quote_asset == config.LTC:
extra_filters = [
{'field': 'give_remaining', 'op': '>', 'value': 0}, #don't show empty LTC orders
{'field': 'get_remaining', 'op': '>', 'value': 0}, #don't show empty LTC orders
{'field': 'fee_required_remaining', 'op': '>=', 'value': 0},
{'field': 'fee_provided_remaining', 'op': '>=', 'value': 0},
]
base_bid_filters += extra_filters
base_ask_filters += extra_filters
base_bid_orders = util.call_jsonrpc_api("get_orders", {
'filters': base_bid_filters,
'show_expired': False,
'status': 'open',
'order_by': 'block_index',
'order_dir': 'asc',
}, abort_on_error=True)['result']
base_ask_orders = util.call_jsonrpc_api("get_orders", {
'filters': base_ask_filters,
'show_expired': False,
'status': 'open',
'order_by': 'block_index',
'order_dir': 'asc',
}, abort_on_error=True)['result']
def get_o_pct(o):
if o['give_asset'] == config.LTC: #NB: fee_provided could be zero here
pct_fee_provided = float(( D(o['fee_provided_remaining']) / D(o['give_quantity']) ))
else: pct_fee_provided = None
if o['get_asset'] == config.LTC: #NB: fee_required could be zero here
pct_fee_required = float(( D(o['fee_required_remaining']) / D(o['get_quantity']) ))
else: pct_fee_required = None
return pct_fee_provided, pct_fee_required
#filter results by pct_fee_provided and pct_fee_required for LTC pairs as appropriate
filtered_base_bid_orders = []
filtered_base_ask_orders = []
if base_asset == config.LTC or quote_asset == config.LTC:
for o in base_bid_orders:
pct_fee_provided, pct_fee_required = get_o_pct(o)
addToBook = True
if bid_book_min_pct_fee_provided is not None and pct_fee_provided is not None and pct_fee_provided < bid_book_min_pct_fee_provided:
addToBook = False
if bid_book_min_pct_fee_required is not None and pct_fee_required is not None and pct_fee_required < bid_book_min_pct_fee_required:
addToBook = False
if bid_book_max_pct_fee_required is not None and pct_fee_required is not None and pct_fee_required > bid_book_max_pct_fee_required:
addToBook = False
if addToBook: filtered_base_bid_orders.append(o)
for o in base_ask_orders:
pct_fee_provided, pct_fee_required = get_o_pct(o)
addToBook = True
if ask_book_min_pct_fee_provided is not None and pct_fee_provided is not None and pct_fee_provided < ask_book_min_pct_fee_provided:
addToBook = False
if ask_book_min_pct_fee_required is not None and pct_fee_required is not None and pct_fee_required < ask_book_min_pct_fee_required:
addToBook = False
if ask_book_max_pct_fee_required is not None and pct_fee_required is not None and pct_fee_required > ask_book_max_pct_fee_required:
addToBook = False
if addToBook: filtered_base_ask_orders.append(o)
else:
filtered_base_bid_orders += base_bid_orders
filtered_base_ask_orders += base_ask_orders
def make_book(orders, isBidBook):
book = {}
for o in orders:
if o['give_asset'] == base_asset:
if base_asset == config.LTC and o['give_quantity'] <= config.ORDER_LTC_DUST_LIMIT_CUTOFF:
continue #filter dust orders, if necessary
give_quantity = util_litecoin.normalize_quantity(o['give_quantity'], base_asset_info['divisible'])
get_quantity = util_litecoin.normalize_quantity(o['get_quantity'], quote_asset_info['divisible'])
unit_price = float(( D(get_quantity) / D(give_quantity) ))
remaining = util_litecoin.normalize_quantity(o['give_remaining'], base_asset_info['divisible'])
else:
if quote_asset == config.LTC and o['give_quantity'] <= config.ORDER_LTC_DUST_LIMIT_CUTOFF:
continue #filter dust orders, if necessary
give_quantity = util_litecoin.normalize_quantity(o['give_quantity'], quote_asset_info['divisible'])
get_quantity = util_litecoin.normalize_quantity(o['get_quantity'], base_asset_info['divisible'])
unit_price = float(( D(give_quantity) / D(get_quantity) ))
remaining = util_litecoin.normalize_quantity(o['get_remaining'], base_asset_info['divisible'])
id = "%s_%s_%s" % (base_asset, quote_asset, unit_price)
#^ key = {base}_{bid}_{unit_price}, values ref entries in book
book.setdefault(id, {'unit_price': unit_price, 'quantity': 0, 'count': 0})
book[id]['quantity'] += remaining #base quantity outstanding
book[id]['count'] += 1 #num orders at this price level
book = sorted(book.itervalues(), key=operator.itemgetter('unit_price'), reverse=isBidBook)
#^ convert to list and sort -- bid book = descending, ask book = ascending
return book
#compile into a single book, at volume tiers
base_bid_book = make_book(filtered_base_bid_orders, True)
base_ask_book = make_book(filtered_base_ask_orders, False)
#get stats like the spread and median
if base_bid_book and base_ask_book:
#don't do abs(), as this is "the amount by which the ask price exceeds the bid", so I guess it could be negative
# if there is overlap in the book (right?)
bid_ask_spread = float(( D(base_ask_book[0]['unit_price']) - D(base_bid_book[0]['unit_price']) ))
bid_ask_median = float(( D( max(base_ask_book[0]['unit_price'], base_bid_book[0]['unit_price']) ) - (D(abs(bid_ask_spread)) / 2) ))
else:
bid_ask_spread = 0
bid_ask_median = 0
#compose depth and round out quantities
bid_depth = D(0)
for o in base_bid_book:
o['quantity'] = float(D(o['quantity']))
bid_depth += D(o['quantity'])
o['depth'] = float(D(bid_depth))
bid_depth = float(D(bid_depth))
ask_depth = D(0)
for o in base_ask_book:
o['quantity'] = float(D(o['quantity']))
ask_depth += D(o['quantity'])
o['depth'] = float(D(ask_depth))
ask_depth = float(D(ask_depth))
#compose raw orders
orders = filtered_base_bid_orders + filtered_base_ask_orders
for o in orders:
#add in the blocktime to help makes interfaces more user-friendly (i.e. avoid displaying block
# indexes and display datetimes instead)
o['block_time'] = time.mktime(util.get_block_time(o['block_index']).timetuple()) * 1000
#for orders where LTC is the give asset, also return online status of the user
for o in orders:
if o['give_asset'] == config.LTC:
r = mongo_db.ltc_open_orders.find_one({'order_tx_hash': o['tx_hash']})
o['_is_online'] = (r['wallet_id'] in siofeeds.onlineClients) if r else False
else:
o['_is_online'] = None #does not apply in this case
result = {
'base_bid_book': base_bid_book,
'base_ask_book': base_ask_book,
'bid_depth': bid_depth,
'ask_depth': ask_depth,
'bid_ask_spread': bid_ask_spread,
'bid_ask_median': bid_ask_median,
'raw_orders': orders,
'base_asset': base_asset,
'quote_asset': quote_asset
}
return result
@dispatcher.add_method
def get_order_book_simple(asset1, asset2, min_pct_fee_provided=None, max_pct_fee_required=None):
#DEPRECATED 1.5
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
result = _get_order_book(base_asset, quote_asset,
bid_book_min_pct_fee_provided=min_pct_fee_provided,
bid_book_max_pct_fee_required=max_pct_fee_required,
ask_book_min_pct_fee_provided=min_pct_fee_provided,
ask_book_max_pct_fee_required=max_pct_fee_required)
return result
@dispatcher.add_method
#DEPRECATED 1.5
def get_order_book_buysell(buy_asset, sell_asset, pct_fee_provided=None, pct_fee_required=None):
base_asset, quote_asset = util.assets_to_asset_pair(buy_asset, sell_asset)
bid_book_min_pct_fee_provided = None
bid_book_min_pct_fee_required = None
bid_book_max_pct_fee_required = None
ask_book_min_pct_fee_provided = None
ask_book_min_pct_fee_required = None
ask_book_max_pct_fee_required = None
if base_asset == config.LTC:
if buy_asset == config.LTC:
#if LTC is base asset and we're buying it, we're buying the BASE. we require a LTC fee (we're on the bid (bottom) book and we want a lower price)
# - show BASE buyers (bid book) that require a LTC fee >= what we require (our side of the book)
# - show BASE sellers (ask book) that provide a LTC fee >= what we require
bid_book_min_pct_fee_required = pct_fee_required #my competition at the given fee required
ask_book_min_pct_fee_provided = pct_fee_required
elif sell_asset == config.LTC:
#if LTC is base asset and we're selling it, we're selling the BASE. we provide a LTC fee (we're on the ask (top) book and we want a higher price)
# - show BASE buyers (bid book) that provide a LTC fee >= what we provide
# - show BASE sellers (ask book) that require a LTC fee <= what we provide (our side of the book)
bid_book_max_pct_fee_required = pct_fee_provided
ask_book_min_pct_fee_provided = pct_fee_provided #my competition at the given fee provided
elif quote_asset == config.LTC:
assert base_asset == config.XLT #only time when this is the case
if buy_asset == config.LTC:
#if LTC is quote asset and we're buying it, we're selling the BASE. we require a LTC fee (we're on the ask (top) book and we want a higher price)
# - show BASE buyers (bid book) that provide a LTC fee >= what we require
# - show BASE sellers (ask book) that require a LTC fee >= what we require (our side of the book)
bid_book_min_pct_fee_provided = pct_fee_required
ask_book_min_pct_fee_required = pct_fee_required #my competition at the given fee required
elif sell_asset == config.LTC:
#if LTC is quote asset and we're selling it, we're buying the BASE. we provide a LTC fee (we're on the bid (bottom) book and we want a lower price)
# - show BASE buyers (bid book) that provide a LTC fee >= what we provide (our side of the book)
# - show BASE sellers (ask book) that require a LTC fee <= what we provide
bid_book_min_pct_fee_provided = pct_fee_provided #my compeitition at the given fee provided
ask_book_max_pct_fee_required = pct_fee_provided
result = _get_order_book(base_asset, quote_asset,
bid_book_min_pct_fee_provided=bid_book_min_pct_fee_provided,
bid_book_min_pct_fee_required=bid_book_min_pct_fee_required,
bid_book_max_pct_fee_required=bid_book_max_pct_fee_required,
ask_book_min_pct_fee_provided=ask_book_min_pct_fee_provided,
ask_book_min_pct_fee_required=ask_book_min_pct_fee_required,
ask_book_max_pct_fee_required=ask_book_max_pct_fee_required)
#filter down raw_orders to be only open sell orders for what the caller is buying
open_sell_orders = []
for o in result['raw_orders']:
if o['give_asset'] == buy_asset:
open_sell_orders.append(o)
result['raw_orders'] = open_sell_orders
return result
@dispatcher.add_method
def get_transaction_stats(start_ts=None, end_ts=None):
now_ts = time.mktime(datetime.datetime.utcnow().timetuple())
if not end_ts: #default to current datetime
end_ts = now_ts
if not start_ts: #default to 360 days before the end date
start_ts = end_ts - (360 * 24 * 60 * 60)
stats = mongo_db.transaction_stats.aggregate([
{"$match": {
"block_time": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}},
{"$project": {
"year": {"$year": "$block_time"},
"month": {"$month": "$block_time"},
"day": {"$dayOfMonth": "$block_time"},
"category": 1,
}},
{"$group": {
"_id": {"year": "$year", "month": "$month", "day": "$day", "category": "$category"},
"count": {"$sum": 1},
}}
#{"$sort": SON([("_id.year", pymongo.ASCENDING), ("_id.month", pymongo.ASCENDING), ("_id.day", pymongo.ASCENDING), ("_id.hour", pymongo.ASCENDING), ("_id.category", pymongo.ASCENDING)])},
])
times = {}
categories = {}
stats = [] if not stats['ok'] else stats['result']
for e in stats:
categories.setdefault(e['_id']['category'], {})
time_val = int(time.mktime(datetime.datetime(e['_id']['year'], e['_id']['month'], e['_id']['day']).timetuple()) * 1000)
times.setdefault(time_val, True)
categories[e['_id']['category']][time_val] = e['count']
times_list = times.keys()
times_list.sort()
#fill in each array with all found timestamps
for e in categories:
a = []
for t in times_list:
a.append([t, categories[e][t] if t in categories[e] else 0])
categories[e] = a #replace with array data
#take out to final data structure
categories_list = []
for k, v in categories.iteritems():
categories_list.append({'name': k, 'data': v})
return categories_list
@dispatcher.add_method
def get_wallet_stats(start_ts=None, end_ts=None):
now_ts = time.mktime(datetime.datetime.utcnow().timetuple())
if not end_ts: #default to current datetime
end_ts = now_ts
if not start_ts: #default to 360 days before the end date
start_ts = end_ts - (360 * 24 * 60 * 60)
num_wallets_mainnet = mongo_db.preferences.find({'network': 'mainnet'}).count()
num_wallets_testnet = mongo_db.preferences.find({'network': 'testnet'}).count()
num_wallets_unknown = mongo_db.preferences.find({'network': None}).count()
wallet_stats = []
for net in ['mainnet', 'testnet']:
filters = {
"when": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
},
'network': net
}
stats = mongo_db.wallet_stats.find(filters).sort('when', pymongo.ASCENDING)
new_wallet_counts = []
login_counts = []
distinct_login_counts = []
for e in stats:
d = int(time.mktime(datetime.datetime(e['when'].year, e['when'].month, e['when'].day).timetuple()) * 1000)
if 'distinct_login_count' in e: distinct_login_counts.append([ d, e['distinct_login_count'] ])
if 'login_count' in e: login_counts.append([ d, e['login_count'] ])
if 'new_count' in e: new_wallet_counts.append([ d, e['new_count'] ])
wallet_stats.append({'name': '%s: Logins' % net.capitalize(), 'data': login_counts})
wallet_stats.append({'name': '%s: Active Wallets' % net.capitalize(), 'data': distinct_login_counts})
wallet_stats.append({'name': '%s: New Wallets' % net.capitalize(), 'data': new_wallet_counts})
return {
'num_wallets_mainnet': num_wallets_mainnet,
'num_wallets_testnet': num_wallets_testnet,
'num_wallets_unknown': num_wallets_unknown,
'wallet_stats': wallet_stats}
@dispatcher.add_method
def get_owned_assets(addresses):
"""Gets a list of owned assets for one or more addresses"""
result = mongo_db.tracked_assets.find({
'owner': {"$in": addresses}
}, {"_id":0}).sort("asset", pymongo.ASCENDING)
return list(result)
@dispatcher.add_method
def get_asset_pair_market_info(asset1=None, asset2=None, limit=50):
"""Given two arbitrary assets, returns the base asset and the quote asset.
"""
#DEPRECATED 1.5
assert (asset1 and asset2) or (asset1 is None and asset2 is None)
if asset1 and asset2:
base_asset, quote_asset = util.assets_to_asset_pair(asset1, asset2)
pair_info = mongo_db.asset_pair_market_info.find({'base_asset': base_asset, 'quote_asset': quote_asset}, {'_id': 0})
else:
pair_info = mongo_db.asset_pair_market_info.find({}, {'_id': 0}).sort('completed_trades_count', pymongo.DESCENDING).limit(limit)
#^ sort by this for now, may want to sort by a market_cap value in the future
return list(pair_info) or []
@dispatcher.add_method
def get_asset_extended_info(asset):
ext_info = mongo_db.asset_extended_info.find_one({'asset': asset}, {'_id': 0})
return ext_info or False
@dispatcher.add_method
def get_asset_history(asset, reverse=False):
"""
Returns a list of changes for the specified asset, from its inception to the current time.
@param asset: The asset to retrieve a history on
@param reverse: By default, the history is returned in the order of oldest to newest. Set this parameter to True
to return items in the order of newest to oldest.
@return:
Changes are returned as a list of dicts, with each dict having the following format:
* type: One of 'created', 'issued_more', 'changed_description', 'locked', 'transferred', 'called_back'
* 'at_block': The block number this change took effect
* 'at_block_time': The block time this change took effect
* IF type = 'created': Has the following fields, as specified when the asset was initially created:
* owner, description, divisible, locked, total_issued, total_issued_normalized
* IF type = 'issued_more':
* 'additional': The additional quantity issued (raw)
* 'additional_normalized': The additional quantity issued (normalized)
* 'total_issued': The total issuance after this change (raw)
* 'total_issued_normalized': The total issuance after this change (normalized)
* IF type = 'changed_description':
* 'prev_description': The old description
* 'new_description': The new description
* IF type = 'locked': NO EXTRA FIELDS
* IF type = 'transferred':
* 'prev_owner': The address the asset was transferred from
* 'new_owner': The address the asset was transferred to
* IF type = 'called_back':
* 'percentage': The percentage of the asset called back (between 0 and 100)
"""
asset = mongo_db.tracked_assets.find_one({ 'asset': asset }, {"_id":0})
if not asset:
raise Exception("Unrecognized asset")
#run down through _history and compose a diff log
history = []
raw = asset['_history'] + [asset,] #oldest to newest. add on the current state
prev = None
for i in xrange(len(raw)): #oldest to newest
if i == 0:
assert raw[i]['_change_type'] == 'created'
history.append({
'type': 'created',
'owner': raw[i]['owner'],
'description': raw[i]['description'],
'divisible': raw[i]['divisible'],
'locked': raw[i]['locked'],
'total_issued': raw[i]['total_issued'],
'total_issued_normalized': raw[i]['total_issued_normalized'],
'at_block': raw[i]['_at_block'],
'at_block_time': time.mktime(raw[i]['_at_block_time'].timetuple()) * 1000,
})
prev = raw[i]
continue
assert prev
if raw[i]['_change_type'] == 'locked':
history.append({
'type': 'locked',
'at_block': raw[i]['_at_block'],
'at_block_time': time.mktime(raw[i]['_at_block_time'].timetuple()) * 1000,
})
elif raw[i]['_change_type'] == 'transferred':
history.append({
'type': 'transferred',
'at_block': raw[i]['_at_block'],
'at_block_time': time.mktime(raw[i]['_at_block_time'].timetuple()) * 1000,
'prev_owner': prev['owner'],
'new_owner': raw[i]['owner'],
})
elif raw[i]['_change_type'] == 'changed_description':
history.append({
'type': 'changed_description',
'at_block': raw[i]['_at_block'],
'at_block_time': time.mktime(raw[i]['_at_block_time'].timetuple()) * 1000,
'prev_description': prev['description'],
'new_description': raw[i]['description'],
})
else: #issue additional
assert raw[i]['total_issued'] - prev['total_issued'] > 0
history.append({
'type': 'issued_more',
'at_block': raw[i]['_at_block'],
'at_block_time': time.mktime(raw[i]['_at_block_time'].timetuple()) * 1000,
'additional': raw[i]['total_issued'] - prev['total_issued'],
'additional_normalized': raw[i]['total_issued_normalized'] - prev['total_issued_normalized'],
'total_issued': raw[i]['total_issued'],
'total_issued_normalized': raw[i]['total_issued_normalized'],
})
prev = raw[i]
#get callbacks externally via the cpd API, and merge in with the asset history we composed
callbacks = util.call_jsonrpc_api("get_callbacks",
{'filters': {'field': 'asset', 'op': '==', 'value': asset['asset']}}, abort_on_error=True)['result']
final_history = []
if len(callbacks):
for e in history: #history goes from earliest to latest
if callbacks[0]['block_index'] < e['at_block']: #throw the callback entry in before this one
block_time = util.get_block_time(callbacks[0]['block_index'])
assert block_time
final_history.append({
'type': 'called_back',
'at_block': callbacks[0]['block_index'],
'at_block_time': time.mktime(block_time.timetuple()) * 1000,
'percentage': callbacks[0]['fraction'] * 100,
})
callbacks.pop(0)
else:
final_history.append(e)
else:
final_history = history
if reverse: final_history.reverse()
return final_history
@dispatcher.add_method
def record_ltc_open_order(wallet_id, order_tx_hash):
"""Records an association between a wallet ID and order TX ID for a trade where LTC is being SOLD, to allow
buyers to see which sellers of the LTC are "online" (which can lead to a better result as a LTCpay will be required
to complete any trades where LTC is involved, and the seller (or at least their wallet) must be online for this to happen"""
#ensure the wallet_id exists
result = mongo_db.preferences.find_one({"wallet_id": wallet_id})
if not result: raise Exception("WalletID does not exist")
mongo_db.ltc_open_orders.insert({
'wallet_id': wallet_id,
'order_tx_hash': order_tx_hash,
'when_created': datetime.datetime.utcnow()
})
return True
@dispatcher.add_method
def cancel_ltc_open_order(wallet_id, order_tx_hash):
#DEPRECATED 1.5
mongo_db.ltc_open_orders.remove({'order_tx_hash': order_tx_hash, 'wallet_id': wallet_id})
#^ wallet_id is used more for security here so random folks can't remove orders from this collection just by tx hash
return True
@dispatcher.add_method
def get_balance_history(asset, addresses, normalize=True, start_ts=None, end_ts=None):
"""Retrieves the ordered balance history for a given address (or list of addresses) and asset pair, within the specified date range
@param normalize: If set to True, return quantities that (if the asset is divisible) have been divided by 100M (satoshi).
@return: A list of tuples, with the first entry of each tuple being the block time (epoch TS), and the second being the new balance
at that block time.
"""
if not isinstance(addresses, list):
raise Exception("addresses must be a list of addresses, even if it just contains one address")
asset_info = mongo_db.tracked_assets.find_one({'asset': asset})
if not asset_info:
raise Exception("Asset does not exist.")
now_ts = time.mktime(datetime.datetime.utcnow().timetuple())
if not end_ts: #default to current datetime
end_ts = now_ts
if not start_ts: #default to 30 days before the end date
start_ts = end_ts - (30 * 24 * 60 * 60)
results = []
for address in addresses:
result = mongo_db.balance_changes.find({
'address': address,
'asset': asset,
"block_time": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}).sort("block_time", pymongo.ASCENDING)
entry = {
'name': address,
'data': [
(time.mktime(r['block_time'].timetuple()) * 1000,
r['new_balance_normalized'] if normalize else r['new_balance']
) for r in result]
}
results.append(entry)
return results
@dispatcher.add_method
def get_num_users_online():
#gets the current number of users attached to the server's chat feed
return len(siofeeds.onlineClients)
@dispatcher.add_method
def is_chat_handle_in_use(handle):
#DEPRECATED 1.5
results = mongo_db.chat_handles.find({ 'handle': { '$regex': '^%s$' % handle, '$options': 'i' } })
return True if results.count() else False
@dispatcher.add_method
def get_chat_handle(wallet_id):
result = mongo_db.chat_handles.find_one({"wallet_id": wallet_id})
if not result: return False #doesn't exist
result['last_touched'] = time.mktime(time.gmtime())
mongo_db.chat_handles.save(result)
data = {
'handle': re.sub('[^\sA-Za-z0-9_-]', "", result['handle']),
'is_op': result.get('is_op', False),
'last_updated': result.get('last_updated', None)
} if result else {}
banned_until = result.get('banned_until', None)
if banned_until != -1 and banned_until is not None:
data['banned_until'] = int(time.mktime(banned_until.timetuple())) * 1000 #convert to epoch ts in ms
else:
data['banned_until'] = banned_until #-1 or None
return data
@dispatcher.add_method
def store_chat_handle(wallet_id, handle):
"""Set or update a chat handle"""
if not isinstance(handle, basestring):
raise Exception("Invalid chat handle: bad data type")
if not re.match(r'^[\sA-Za-z0-9_-]{4,12}$', handle):
raise Exception("Invalid chat handle: bad syntax/length")
#see if this handle already exists (case insensitive)
results = mongo_db.chat_handles.find({ 'handle': { '$regex': '^%s$' % handle, '$options': 'i' } })
if results.count():
if results[0]['wallet_id'] == wallet_id:
return True #handle already saved for this wallet ID
else:
raise Exception("Chat handle already is in use")
mongo_db.chat_handles.update(
{'wallet_id': wallet_id},
{"$set": {
'wallet_id': wallet_id,
'handle': handle,
'last_updated': time.mktime(time.gmtime()),
'last_touched': time.mktime(time.gmtime())
}
}, upsert=True)
#^ last_updated MUST be in UTC, as it will be compaired again other servers
return True
@dispatcher.add_method
def get_chat_history(start_ts=None, end_ts=None, handle=None, limit=1000):
#DEPRECATED 1.5
now_ts = time.mktime(datetime.datetime.utcnow().timetuple())
if not end_ts: #default to current datetime
end_ts = now_ts
if not start_ts: #default to 5 days before the end date
start_ts = end_ts - (30 * 24 * 60 * 60)
if limit >= 5000:
raise Exception("Requesting too many lines (limit too high")
filters = {
"when": {
"$gte": datetime.datetime.utcfromtimestamp(start_ts)
} if end_ts == now_ts else {
"$gte": datetime.datetime.utcfromtimestamp(start_ts),
"$lte": datetime.datetime.utcfromtimestamp(end_ts)
}
}
if handle:
filters['handle'] = handle
chat_history = mongo_db.chat_history.find(filters, {'_id': 0}).sort("when", pymongo.DESCENDING).limit(limit)
if not chat_history.count():
return False #no suitable trade data to form a market price
chat_history = list(chat_history)
return chat_history
@dispatcher.add_method
def is_wallet_online(wallet_id):
return wallet_id in siofeeds.onlineClients
@dispatcher.add_method
def get_preferences(wallet_id, for_login=False, network=None):
"""Gets stored wallet preferences
@param network: only required if for_login is specified. One of: 'mainnet' or 'testnet'
"""
if network not in (None, 'mainnet', 'testnet'):
raise Exception("Invalid network parameter setting")
if for_login and network is None:
raise Exception("network parameter required if for_login is set")
result = mongo_db.preferences.find_one({"wallet_id": wallet_id})
if not result: return False #doesn't exist
last_touched_date = datetime.datetime.utcfromtimestamp(result['last_touched']).date()
now = datetime.datetime.utcnow()
if for_login: #record user login
ip = flask.request.headers.get('X-Real-Ip', flask.request.remote_addr)
ua = flask.request.headers.get('User-Agent', '')
mongo_db.login_history.insert({'wallet_id': wallet_id, 'when': now, 'network': network, 'action': 'login', 'ip': ip, 'ua': ua})
result['last_touched'] = time.mktime(time.gmtime())
mongo_db.preferences.save(result)
return {
'preferences': json.loads(result['preferences']),
'last_updated': result.get('last_updated', None)
}
@dispatcher.add_method
def store_preferences(wallet_id, preferences, for_login=False, network=None, referer=None):
"""Stores freeform wallet preferences
@param network: only required if for_login is specified. One of: 'mainnet' or 'testnet'
"""
if network not in (None, 'mainnet', 'testnet'):
raise Exception("Invalid network parameter setting")
if for_login and network is None:
raise Exception("network parameter required if for_login is set")
if not isinstance(preferences, dict):
raise Exception("Invalid preferences object")
try:
preferences_json = json.dumps(preferences)
except:
raise Exception("Cannot dump preferences to JSON")
now = datetime.datetime.utcnow()
#sanity check around max size
if len(preferences_json) >= PREFERENCES_MAX_LENGTH:
raise Exception("Preferences object is too big.")
if for_login: #mark this as a new signup IF the wallet doesn't exist already
existing_record = mongo_db.login_history.find({'wallet_id': wallet_id, 'network': network, 'action': 'create'})
if existing_record.count() == 0:
ip = flask.request.headers.get('X-Real-Ip', flask.request.remote_addr)
ua = flask.request.headers.get('User-Agent', '')
mongo_db.login_history.insert({'wallet_id': wallet_id, 'when': now,
'network': network, 'action': 'create', 'referer': referer, 'ip': ip, 'ua': ua})
mongo_db.login_history.insert({'wallet_id': wallet_id, 'when': now,
'network': network, 'action': 'login', 'ip': ip, 'ua': ua}) #also log a wallet login
now_ts = time.mktime(time.gmtime())
mongo_db.preferences.update(
{'wallet_id': wallet_id},
{'$set': {
'wallet_id': wallet_id,
'preferences': preferences_json,
'last_updated': now_ts,
'last_touched': now_ts },
'$setOnInsert': {'when_created': now_ts, 'network': network}
}, upsert=True)
#^ last_updated MUST be in GMT, as it will be compaired again other servers
return True
@dispatcher.add_method
def proxy_to_litetokensd(method='', params=[]):
if method=='sql': raise Exception("Invalid method")
result = None
cache_key = None
if redis_client: #check for a precached result and send that back instead
cache_key = method + '||' + base64.b64encode(json.dumps(params).encode()).decode()
#^ must use encoding (e.g. base64) since redis doesn't allow spaces in its key names
# (also shortens the hashing key for better performance)
result = redis_client.get(cache_key)
if result:
try:
result = json.loads(result)
except Exception, e:
logging.warn("Error loading JSON from cache: %s, cached data: '%s'" % (e, result))
result = None #skip from reading from cache and just make the API call
if result is None: #cache miss or cache disabled
result = util.call_jsonrpc_api(method, params)
if redis_client: #cache miss
redis_client.setex(cache_key, DEFAULT_COUNTERPARTYD_API_CACHE_PERIOD, json.dumps(result))
#^TODO: we may want to have different cache periods for different types of data
if 'error' in result:
if result['error'].get('data', None):
errorMsg = result['error']['data'].get('message', result['error']['message'])
else:
errorMsg = json.dumps(result['error'])
raise Exception(errorMsg.encode('ascii','ignore'))
#decode out unicode for now (json-rpc lib was made for python 3.3 and does str(errorMessage) internally,
# which messes up w/ unicode under python 2.x)
return result['result']
@dispatcher.add_method
def get_bets(bet_type, feed_address, deadline, target_value=None, leverage=5040):
bets = betting.find_bets(bet_type, feed_address, deadline, target_value=target_value, leverage=leverage)
return bets
@dispatcher.add_method
def get_user_bets(addresses = [], status="open"):
bets = betting.find_user_bets(mongo_db, addresses, status)
return bets
@dispatcher.add_method
def get_feed(address_or_url = ''):
feed = betting.find_feed(mongo_db, address_or_url)
return feed
@dispatcher.add_method
def get_feeds_by_source(addresses = []):
feed = betting.get_feeds_by_source(mongo_db, addresses)
return feed
@dispatcher.add_method
def parse_base64_feed(base64_feed):
feed = betting.parse_base64_feed(base64_feed)
return feed
@dispatcher.add_method
def get_open_rps_count(possible_moves = 3, exclude_addresses = []):
return rps.get_open_rps_count(possible_moves, exclude_addresses)
@dispatcher.add_method
def get_user_rps(addresses):
return rps.get_user_rps(addresses)
@dispatcher.add_method
def get_users_pairs(addresses=[], max_pairs=12):
return dex.get_users_pairs(addresses, max_pairs, quote_assets=['XLT', 'XLTC'])
@dispatcher.add_method
def get_market_orders(asset1, asset2, addresses=[], min_fee_provided=0.95, max_fee_required=0.95):
return dex.get_market_orders(asset1, asset2, addresses, None, min_fee_provided, max_fee_required)
@dispatcher.add_method
def get_market_trades(asset1, asset2, addresses=[], limit=50):
return dex.get_market_trades(asset1, asset2, addresses, limit)
@dispatcher.add_method
def get_markets_list(quote_asset = None, order_by=None):
return dex.get_markets_list(mongo_db, quote_asset=quote_asset, order_by=order_by)
@dispatcher.add_method
def get_market_details(asset1, asset2, min_fee_provided=0.95, max_fee_required=0.95):
return dex.get_market_details(asset1, asset2, min_fee_provided, max_fee_required, mongo_db)
@dispatcher.add_method
def get_vennd_machine():
# https://gist.github.com/JahPowerBit/655bee2b35d9997ac0af
if config.VENDING_MACHINE_PROVIDER is not None:
return util.get_url(config.VENDING_MACHINE_PROVIDER)
else:
return []
@dispatcher.add_method
def get_pubkey_for_address(address):
#returns None if the address has made 0 transactions (as we wouldn't be able to get the public key)
return blockchain.get_pubkey_for_address(address) or False
@dispatcher.add_method
def create_armory_utx(unsigned_tx_hex, public_key_hex):
if not config.ARMORY_UTXSVR_ENABLE:
raise Exception("Support for this feature is not enabled on this system")
endpoint = "http://127.0.0.1:%s/" % (
config.ARMORY_UTXSVR_PORT_MAINNET if not config.TESTNET else config.ARMORY_UTXSVR_PORT_TESTNET)
params = {'unsigned_tx_hex': unsigned_tx_hex, 'public_key_hex': public_key_hex}
utx_ascii = util.call_jsonrpc_api("serialize_unsigned_tx", params=params, endpoint=endpoint, abort_on_error=True)['result']
return utx_ascii
@dispatcher.add_method
def convert_armory_signedtx_to_raw_hex(signed_tx_ascii):
if not config.ARMORY_UTXSVR_ENABLE:
raise Exception("Support for this feature is not enabled on this system")
endpoint = "http://127.0.0.1:%s/" % (
config.ARMORY_UTXSVR_PORT_MAINNET if not config.TESTNET else config.ARMORY_UTXSVR_PORT_TESTNET)
params = {'signed_tx_ascii': signed_tx_ascii}
raw_tx_hex = util.call_jsonrpc_api("convert_signed_tx_to_raw_hex", params=params, endpoint=endpoint, abort_on_error=True)['result']
return raw_tx_hex
@dispatcher.add_method
def create_support_case(name, from_email, problem, screenshot=None, addtl_info=''):
"""create an email with the information received
@param screenshot: The base64 text of the screenshot itself, prefixed with data=image/png ...,
@param addtl_info: A JSON-encoded string of a dict with additional information to include in the support request
"""
import smtplib
import email.utils
from email.header import Header
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.mime.image import MIMEImage
if not config.SUPPORT_EMAIL:
raise Exception("Sending of support emails are disabled on the server: no SUPPORT_EMAIL address set")
if not email.utils.parseaddr(from_email)[1]: #should have been validated in the form
raise Exception("Invalid support email address")
try:
if screenshot:
screenshot_data = screenshot.split(',', 1)[1]
screenshot_data_decoded = base64.b64decode(screenshot_data)
except:
raise Exception("screenshot data format unexpected")
try:
addtl_info = json.loads(addtl_info)
addtl_info = json.dumps(addtl_info, indent=1, sort_keys=False)
except:
raise Exception("addtl_info data format unexpected")
from_email_formatted = email.utils.formataddr((name, from_email))
msg = MIMEMultipart()
msg['Subject'] = Header((problem[:75] + '...') if len(problem) > 75 else problem, 'utf-8')
msg['From'] = from_email_formatted
msg['Reply-to'] = from_email_formatted
msg['To'] = config.SUPPORT_EMAIL
msg['Date'] = email.utils.formatdate(localtime=True)
msg_text = MIMEText("""Problem: %s\n\nAdditional Info:\n%s""" % (problem, addtl_info))
msg.attach(msg_text)
if screenshot:
image = MIMEImage(screenshot_data_decoded, name="screenshot.png")
msg.attach(image)
server = smtplib.SMTP(config.EMAIL_SERVER)
server.sendmail(from_email, config.SUPPORT_EMAIL, msg.as_string())
return True
def _set_cors_headers(response):
if config.RPC_ALLOW_CORS:
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'DNT,X-Mx-ReqToken,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
@app.route('/', methods=["OPTIONS",])
@app.route('/api/', methods=["OPTIONS",])
def handle_options():
response = flask.Response('', 204)
_set_cors_headers(response)
return response
@app.route('/', methods=["GET",])
@app.route('/api/', methods=["GET",])
def handle_get():
if flask.request.headers.get("Content-Type", None) == 'application/csp-report':
try:
data_json = flask.request.get_data().decode('utf-8')
data = json.loads(data_json)
assert 'csp-report' in data
except Exception, e:
obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(data="Invalid JSON-RPC 2.0 request format")
return flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
tx_logger.info("***CSP SECURITY --- %s" % data_json)
return flask.Response('', 200)
#"ping" litetokensd to test
cpd_s = time.time()
cpd_result_valid = True
try:
cpd_status = util.call_jsonrpc_api("get_running_info", abort_on_error=True)['result']
except:
cpd_result_valid = False
cpd_e = time.time()
#"ping" liteblockd to test, as well
cbd_s = time.time()
cbd_result_valid = True
cbd_result_error_code = None
payload = {
"id": 0,
"jsonrpc": "2.0",
"method": "is_ready",
"params": [],
}
try:
url = URL("http://127.0.0.1:%s/api/" % config.RPC_PORT)
client = HTTPClient.from_url(url)
r = client.post(url.request_uri, body=json.dumps(payload), headers={'content-type': 'application/json'})
except Exception, e:
cbd_result_valid = False
cbd_result_error_code = "GOT EXCEPTION: %s" % e
else:
if r.status_code != 200:
cbd_result_valid = False
cbd_result_error_code = "GOT STATUS %s" % r.status_code if r else 'COULD NOT CONTACT'
cbd_result = json.loads(r.read())
if 'error' in r:
cbd_result_valid = False
cbd_result_error_code = "GOT ERROR: %s" % r['error']
finally:
client.close()
cbd_e = time.time()
response_code = 200
if not cpd_result_valid or not cbd_result_valid:
response_code = 500
result = {
'litetokensd': 'OK' if cpd_result_valid else 'NOT OK',
'liteblockd': 'OK' if cbd_result_valid else 'NOT OK',
'liteblockd_error': cbd_result_error_code,
'litetokensd_ver': '%s.%s.%s' % (
cpd_status['version_major'], cpd_status['version_minor'], cpd_status['version_revision']) if cpd_result_valid else '?',
'liteblockd_ver': config.VERSION,
'litetokensd_last_block': cpd_status['last_block'] if cpd_result_valid else '?',
'litetokensd_last_message_index': cpd_status['last_message_index'] if cpd_result_valid else '?',
'litetokensd_check_elapsed': cpd_e - cpd_s,
'liteblockd_check_elapsed': cbd_e - cbd_s,
'local_online_users': len(siofeeds.onlineClients),
}
return flask.Response(json.dumps(result), response_code, mimetype='application/json')
@app.route('/', methods=["POST",])
@app.route('/api/', methods=["POST",])
def handle_post():
#don't do anything if we're not caught up
if not util.is_caught_up_well_enough_for_government_work():
obj_error = jsonrpc.exceptions.JSONRPCServerError(data="Server is not caught up. Please try again later.")
response = flask.Response(obj_error.json.encode(), 525, mimetype='application/json')
#^ 525 is a custom response code we use for this one purpose
_set_cors_headers(response)
return response
try:
request_json = flask.request.get_data().decode('utf-8')
request_data = json.loads(request_json)
assert 'id' in request_data and request_data['jsonrpc'] == "2.0" and request_data['method']
# params may be omitted
except:
obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(data="Invalid JSON-RPC 2.0 request format")
response = flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
_set_cors_headers(response)
return response
#only arguments passed as a dict are supported
if request_data.get('params', None) and not isinstance(request_data['params'], dict):
obj_error = jsonrpc.exceptions.JSONRPCInvalidRequest(
data='Arguments must be passed as a JSON object (list of unnamed arguments not supported)')
response = flask.Response(obj_error.json.encode(), 200, mimetype='application/json')
_set_cors_headers(response)
return response
rpc_response = jsonrpc.JSONRPCResponseManager.handle(request_json, dispatcher)
rpc_response_json = json.dumps(rpc_response.data, default=util.json_dthandler).encode()
#log the request data
try:
assert 'method' in request_data
tx_logger.info("TRANSACTION --- %s ||| REQUEST: %s ||| RESPONSE: %s" % (request_data['method'], request_json, rpc_response_json))
except Exception, e:
logging.info("Could not log transaction: Invalid format: %s" % e)
response = flask.Response(rpc_response_json, 200, mimetype='application/json')
_set_cors_headers(response)
return response
#make a new RotatingFileHandler for the access log.
api_logger = logging.getLogger("api_log")
h = logging_handlers.RotatingFileHandler(os.path.join(config.DATA_DIR, "api.access.log"), 'a', API_MAX_LOG_SIZE, API_MAX_LOG_COUNT)
api_logger.setLevel(logging.INFO)
api_logger.addHandler(h)
api_logger.propagate = False
#hack to allow wsgiserver logging to use python logging module...
def trimlog(log, msg):
log.info(msg.rstrip())
api_logger.write = functools.partial(trimlog, api_logger)
#start up the API listener/handler
server = wsgi.WSGIServer((config.RPC_HOST, int(config.RPC_PORT)), app, log=api_logger)
server.serve_forever()
| mit |
mattrhummel/GermannaCC-WPTheme | node_modules/pangyp/gyp/tools/graphviz.py | 2679 | 2878 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
vFense/vFenseAgent-nix | agent/deps/mac/Python-2.7.5/lib/python2.7/xml/dom/__init__.py | 327 | 3998 | """W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from domreg import getDOMImplementation,registerDOMImplementation
| lgpl-3.0 |
ankitjain87/django-allauth | allauth/socialaccount/providers/tumblr/views.py | 66 | 1295 | import json
from allauth.socialaccount.providers.oauth.client import OAuth
from allauth.socialaccount.providers.oauth.views import (OAuthAdapter,
OAuthLoginView,
OAuthCallbackView)
from .provider import TumblrProvider
class TumblrAPI(OAuth):
url = 'http://api.tumblr.com/v2/user/info'
def get_user_info(self):
data = json.loads(self.query(self.url))
return data['response']['user']
class TumblrOAuthAdapter(OAuthAdapter):
provider_id = TumblrProvider.id
request_token_url = 'https://www.tumblr.com/oauth/request_token'
access_token_url = 'https://www.tumblr.com/oauth/access_token'
authorize_url = 'https://www.tumblr.com/oauth/authorize'
def complete_login(self, request, app, token, response):
client = TumblrAPI(request, app.client_id, app.secret,
self.request_token_url)
extra_data = client.get_user_info()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth_login = OAuthLoginView.adapter_view(TumblrOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(TumblrOAuthAdapter)
| mit |
austinellis/googlepythontutorials | babynames/solution/babynames.py | 212 | 3852 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
# LAB(begin solution)
# The list [year, name_and_rank, name_and_rank, ...] we'll eventually return.
names = []
# Open and read the file.
f = open(filename, 'rU')
text = f.read()
# Could process the file line-by-line, but regex on the whole text
# at once is even easier.
# Get the year.
year_match = re.search(r'Popularity\sin\s(\d\d\d\d)', text)
if not year_match:
# We didn't find a year, so we'll exit with an error message.
sys.stderr.write('Couldn\'t find the year!\n')
sys.exit(1)
year = year_match.group(1)
names.append(year)
# Extract all the data tuples with a findall()
# each tuple is: (rank, boy-name, girl-name)
tuples = re.findall(r'<td>(\d+)</td><td>(\w+)</td>\<td>(\w+)</td>', text)
#print tuples
# Store data into a dict using each name as a key and that
# name's rank number as the value.
# (if the name is already in there, don't add it, since
# this new rank will be bigger than the previous rank).
names_to_rank = {}
for rank_tuple in tuples:
(rank, boyname, girlname) = rank_tuple # unpack the tuple into 3 vars
if boyname not in names_to_rank:
names_to_rank[boyname] = rank
if girlname not in names_to_rank:
names_to_rank[girlname] = rank
# You can also write:
# for rank, boyname, girlname in tuples:
# ...
# To unpack the tuples inside a for-loop.
# Get the names, sorted in the right order
sorted_names = sorted(names_to_rank.keys())
# Build up result list, one element per line
for name in sorted_names:
names.append(name + " " + names_to_rank[name])
return names
# LAB(replace solution)
# return
# LAB(end solution)
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
# LAB(begin solution)
for filename in args:
names = extract_names(filename)
# Make text out of the whole list
text = '\n'.join(names)
if summary:
outf = open(filename + '.summary', 'w')
outf.write(text + '\n')
outf.close()
else:
print text
# LAB(end solution)
if __name__ == '__main__':
main()
| apache-2.0 |
axbaretto/beam | sdks/python/.tox/docs/lib/python2.7/site-packages/setuptools/command/upload.py | 248 | 1172 | import getpass
from distutils.command import upload as orig
class upload(orig.upload):
"""
Override default upload behavior to obtain password
in a variety of different ways.
"""
def finalize_options(self):
orig.upload.finalize_options(self)
self.username = (
self.username or
getpass.getuser()
)
# Attempt to obtain password. Short circuit evaluation at the first
# sign of success.
self.password = (
self.password or
self._load_password_from_keyring() or
self._prompt_for_password()
)
def _load_password_from_keyring(self):
"""
Attempt to load password from keyring. Suppress Exceptions.
"""
try:
keyring = __import__('keyring')
return keyring.get_password(self.repository, self.username)
except Exception:
pass
def _prompt_for_password(self):
"""
Prompt for a password on the tty. Suppress Exceptions.
"""
try:
return getpass.getpass()
except (Exception, KeyboardInterrupt):
pass
| apache-2.0 |
PopCap/GameIdea | Engine/Source/ThirdParty/HTML5/emsdk/emscripten/1.30.0/third_party/ply/example/BASIC/basinterp.py | 166 | 17284 | # This file provides the runtime support for running a basic program
# Assumes the program has been parsed using basparse.py
import sys
import math
import random
class BasicInterpreter:
# Initialize the interpreter. prog is a dictionary
# containing (line,statement) mappings
def __init__(self,prog):
self.prog = prog
self.functions = { # Built-in function table
'SIN' : lambda z: math.sin(self.eval(z)),
'COS' : lambda z: math.cos(self.eval(z)),
'TAN' : lambda z: math.tan(self.eval(z)),
'ATN' : lambda z: math.atan(self.eval(z)),
'EXP' : lambda z: math.exp(self.eval(z)),
'ABS' : lambda z: abs(self.eval(z)),
'LOG' : lambda z: math.log(self.eval(z)),
'SQR' : lambda z: math.sqrt(self.eval(z)),
'INT' : lambda z: int(self.eval(z)),
'RND' : lambda z: random.random()
}
# Collect all data statements
def collect_data(self):
self.data = []
for lineno in self.stat:
if self.prog[lineno][0] == 'DATA':
self.data = self.data + self.prog[lineno][1]
self.dc = 0 # Initialize the data counter
# Check for end statements
def check_end(self):
has_end = 0
for lineno in self.stat:
if self.prog[lineno][0] == 'END' and not has_end:
has_end = lineno
if not has_end:
print("NO END INSTRUCTION")
self.error = 1
return
if has_end != lineno:
print("END IS NOT LAST")
self.error = 1
# Check loops
def check_loops(self):
for pc in range(len(self.stat)):
lineno = self.stat[pc]
if self.prog[lineno][0] == 'FOR':
forinst = self.prog[lineno]
loopvar = forinst[1]
for i in range(pc+1,len(self.stat)):
if self.prog[self.stat[i]][0] == 'NEXT':
nextvar = self.prog[self.stat[i]][1]
if nextvar != loopvar: continue
self.loopend[pc] = i
break
else:
print("FOR WITHOUT NEXT AT LINE %s" % self.stat[pc])
self.error = 1
# Evaluate an expression
def eval(self,expr):
etype = expr[0]
if etype == 'NUM': return expr[1]
elif etype == 'GROUP': return self.eval(expr[1])
elif etype == 'UNARY':
if expr[1] == '-': return -self.eval(expr[2])
elif etype == 'BINOP':
if expr[1] == '+': return self.eval(expr[2])+self.eval(expr[3])
elif expr[1] == '-': return self.eval(expr[2])-self.eval(expr[3])
elif expr[1] == '*': return self.eval(expr[2])*self.eval(expr[3])
elif expr[1] == '/': return float(self.eval(expr[2]))/self.eval(expr[3])
elif expr[1] == '^': return abs(self.eval(expr[2]))**self.eval(expr[3])
elif etype == 'VAR':
var,dim1,dim2 = expr[1]
if not dim1 and not dim2:
if var in self.vars:
return self.vars[var]
else:
print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc]))
raise RuntimeError
# May be a list lookup or a function evaluation
if dim1 and not dim2:
if var in self.functions:
# A function
return self.functions[var](dim1)
else:
# A list evaluation
if var in self.lists:
dim1val = self.eval(dim1)
if dim1val < 1 or dim1val > len(self.lists[var]):
print("LIST INDEX OUT OF BOUNDS AT LINE %s" % self.stat[self.pc])
raise RuntimeError
return self.lists[var][dim1val-1]
if dim1 and dim2:
if var in self.tables:
dim1val = self.eval(dim1)
dim2val = self.eval(dim2)
if dim1val < 1 or dim1val > len(self.tables[var]) or dim2val < 1 or dim2val > len(self.tables[var][0]):
print("TABLE INDEX OUT OUT BOUNDS AT LINE %s" % self.stat[self.pc])
raise RuntimeError
return self.tables[var][dim1val-1][dim2val-1]
print("UNDEFINED VARIABLE %s AT LINE %s" % (var, self.stat[self.pc]))
raise RuntimeError
# Evaluate a relational expression
def releval(self,expr):
etype = expr[1]
lhs = self.eval(expr[2])
rhs = self.eval(expr[3])
if etype == '<':
if lhs < rhs: return 1
else: return 0
elif etype == '<=':
if lhs <= rhs: return 1
else: return 0
elif etype == '>':
if lhs > rhs: return 1
else: return 0
elif etype == '>=':
if lhs >= rhs: return 1
else: return 0
elif etype == '=':
if lhs == rhs: return 1
else: return 0
elif etype == '<>':
if lhs != rhs: return 1
else: return 0
# Assignment
def assign(self,target,value):
var, dim1, dim2 = target
if not dim1 and not dim2:
self.vars[var] = self.eval(value)
elif dim1 and not dim2:
# List assignment
dim1val = self.eval(dim1)
if not var in self.lists:
self.lists[var] = [0]*10
if dim1val > len(self.lists[var]):
print ("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
raise RuntimeError
self.lists[var][dim1val-1] = self.eval(value)
elif dim1 and dim2:
dim1val = self.eval(dim1)
dim2val = self.eval(dim2)
if not var in self.tables:
temp = [0]*10
v = []
for i in range(10): v.append(temp[:])
self.tables[var] = v
# Variable already exists
if dim1val > len(self.tables[var]) or dim2val > len(self.tables[var][0]):
print("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
raise RuntimeError
self.tables[var][dim1val-1][dim2val-1] = self.eval(value)
# Change the current line number
def goto(self,linenum):
if not linenum in self.prog:
print("UNDEFINED LINE NUMBER %d AT LINE %d" % (linenum, self.stat[self.pc]))
raise RuntimeError
self.pc = self.stat.index(linenum)
# Run it
def run(self):
self.vars = { } # All variables
self.lists = { } # List variables
self.tables = { } # Tables
self.loops = [ ] # Currently active loops
self.loopend= { } # Mapping saying where loops end
self.gosub = None # Gosub return point (if any)
self.error = 0 # Indicates program error
self.stat = list(self.prog) # Ordered list of all line numbers
self.stat.sort()
self.pc = 0 # Current program counter
# Processing prior to running
self.collect_data() # Collect all of the data statements
self.check_end()
self.check_loops()
if self.error: raise RuntimeError
while 1:
line = self.stat[self.pc]
instr = self.prog[line]
op = instr[0]
# END and STOP statements
if op == 'END' or op == 'STOP':
break # We're done
# GOTO statement
elif op == 'GOTO':
newline = instr[1]
self.goto(newline)
continue
# PRINT statement
elif op == 'PRINT':
plist = instr[1]
out = ""
for label,val in plist:
if out:
out += ' '*(15 - (len(out) % 15))
out += label
if val:
if label: out += " "
eval = self.eval(val)
out += str(eval)
sys.stdout.write(out)
end = instr[2]
if not (end == ',' or end == ';'):
sys.stdout.write("\n")
if end == ',': sys.stdout.write(" "*(15-(len(out) % 15)))
if end == ';': sys.stdout.write(" "*(3-(len(out) % 3)))
# LET statement
elif op == 'LET':
target = instr[1]
value = instr[2]
self.assign(target,value)
# READ statement
elif op == 'READ':
for target in instr[1]:
if self.dc < len(self.data):
value = ('NUM',self.data[self.dc])
self.assign(target,value)
self.dc += 1
else:
# No more data. Program ends
return
elif op == 'IF':
relop = instr[1]
newline = instr[2]
if (self.releval(relop)):
self.goto(newline)
continue
elif op == 'FOR':
loopvar = instr[1]
initval = instr[2]
finval = instr[3]
stepval = instr[4]
# Check to see if this is a new loop
if not self.loops or self.loops[-1][0] != self.pc:
# Looks like a new loop. Make the initial assignment
newvalue = initval
self.assign((loopvar,None,None),initval)
if not stepval: stepval = ('NUM',1)
stepval = self.eval(stepval) # Evaluate step here
self.loops.append((self.pc,stepval))
else:
# It's a repeat of the previous loop
# Update the value of the loop variable according to the step
stepval = ('NUM',self.loops[-1][1])
newvalue = ('BINOP','+',('VAR',(loopvar,None,None)),stepval)
if self.loops[-1][1] < 0: relop = '>='
else: relop = '<='
if not self.releval(('RELOP',relop,newvalue,finval)):
# Loop is done. Jump to the NEXT
self.pc = self.loopend[self.pc]
self.loops.pop()
else:
self.assign((loopvar,None,None),newvalue)
elif op == 'NEXT':
if not self.loops:
print("NEXT WITHOUT FOR AT LINE %s" % line)
return
nextvar = instr[1]
self.pc = self.loops[-1][0]
loopinst = self.prog[self.stat[self.pc]]
forvar = loopinst[1]
if nextvar != forvar:
print("NEXT DOESN'T MATCH FOR AT LINE %s" % line)
return
continue
elif op == 'GOSUB':
newline = instr[1]
if self.gosub:
print("ALREADY IN A SUBROUTINE AT LINE %s" % line)
return
self.gosub = self.stat[self.pc]
self.goto(newline)
continue
elif op == 'RETURN':
if not self.gosub:
print("RETURN WITHOUT A GOSUB AT LINE %s" % line)
return
self.goto(self.gosub)
self.gosub = None
elif op == 'FUNC':
fname = instr[1]
pname = instr[2]
expr = instr[3]
def eval_func(pvalue,name=pname,self=self,expr=expr):
self.assign((pname,None,None),pvalue)
return self.eval(expr)
self.functions[fname] = eval_func
elif op == 'DIM':
for vname,x,y in instr[1]:
if y == 0:
# Single dimension variable
self.lists[vname] = [0]*x
else:
# Double dimension variable
temp = [0]*y
v = []
for i in range(x):
v.append(temp[:])
self.tables[vname] = v
self.pc += 1
# Utility functions for program listing
def expr_str(self,expr):
etype = expr[0]
if etype == 'NUM': return str(expr[1])
elif etype == 'GROUP': return "(%s)" % self.expr_str(expr[1])
elif etype == 'UNARY':
if expr[1] == '-': return "-"+str(expr[2])
elif etype == 'BINOP':
return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3]))
elif etype == 'VAR':
return self.var_str(expr[1])
def relexpr_str(self,expr):
return "%s %s %s" % (self.expr_str(expr[2]),expr[1],self.expr_str(expr[3]))
def var_str(self,var):
varname,dim1,dim2 = var
if not dim1 and not dim2: return varname
if dim1 and not dim2: return "%s(%s)" % (varname, self.expr_str(dim1))
return "%s(%s,%s)" % (varname, self.expr_str(dim1),self.expr_str(dim2))
# Create a program listing
def list(self):
stat = list(self.prog) # Ordered list of all line numbers
stat.sort()
for line in stat:
instr = self.prog[line]
op = instr[0]
if op in ['END','STOP','RETURN']:
print("%s %s" % (line, op))
continue
elif op == 'REM':
print("%s %s" % (line, instr[1]))
elif op == 'PRINT':
_out = "%s %s " % (line, op)
first = 1
for p in instr[1]:
if not first: _out += ", "
if p[0] and p[1]: _out += '"%s"%s' % (p[0],self.expr_str(p[1]))
elif p[1]: _out += self.expr_str(p[1])
else: _out += '"%s"' % (p[0],)
first = 0
if instr[2]: _out += instr[2]
print(_out)
elif op == 'LET':
print("%s LET %s = %s" % (line,self.var_str(instr[1]),self.expr_str(instr[2])))
elif op == 'READ':
_out = "%s READ " % line
first = 1
for r in instr[1]:
if not first: _out += ","
_out += self.var_str(r)
first = 0
print(_out)
elif op == 'IF':
print("%s IF %s THEN %d" % (line,self.relexpr_str(instr[1]),instr[2]))
elif op == 'GOTO' or op == 'GOSUB':
print("%s %s %s" % (line, op, instr[1]))
elif op == 'FOR':
_out = "%s FOR %s = %s TO %s" % (line,instr[1],self.expr_str(instr[2]),self.expr_str(instr[3]))
if instr[4]: _out += " STEP %s" % (self.expr_str(instr[4]))
print(_out)
elif op == 'NEXT':
print("%s NEXT %s" % (line, instr[1]))
elif op == 'FUNC':
print("%s DEF %s(%s) = %s" % (line,instr[1],instr[2],self.expr_str(instr[3])))
elif op == 'DIM':
_out = "%s DIM " % line
first = 1
for vname,x,y in instr[1]:
if not first: _out += ","
first = 0
if y == 0:
_out += "%s(%d)" % (vname,x)
else:
_out += "%s(%d,%d)" % (vname,x,y)
print(_out)
elif op == 'DATA':
_out = "%s DATA " % line
first = 1
for v in instr[1]:
if not first: _out += ","
first = 0
_out += v
print(_out)
# Erase the current program
def new(self):
self.prog = {}
# Insert statements
def add_statements(self,prog):
for line,stat in prog.items():
self.prog[line] = stat
# Delete a statement
def del_line(self,lineno):
try:
del self.prog[lineno]
except KeyError:
pass
| bsd-2-clause |
mou4e/zirconium | tools/telemetry/telemetry/core/platform/cros_interface_unittest.py | 8 | 6570 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(nduca): Rewrite what some of these tests to use mocks instead of
# actually talking to the device. This would improve our coverage quite
# a bit.
import socket
import tempfile
import unittest
from telemetry.core import forwarders
from telemetry.core.forwarders import cros_forwarder
from telemetry.core.platform import cros_interface
from telemetry import decorators
from telemetry.unittest_util import options_for_unittests
class CrOSInterfaceTest(unittest.TestCase):
def _GetCRI(self):
remote = options_for_unittests.GetCopy().cros_remote
remote_ssh_port = options_for_unittests.GetCopy().cros_remote_ssh_port
return cros_interface.CrOSInterface(
remote, remote_ssh_port,
options_for_unittests.GetCopy().cros_ssh_identity)
@decorators.Enabled('cros-chrome')
def testPushContents(self):
with self._GetCRI() as cri:
cri.RunCmdOnDevice(['rm', '-rf', '/tmp/testPushContents'])
cri.PushContents('hello world', '/tmp/testPushContents')
contents = cri.GetFileContents('/tmp/testPushContents')
self.assertEquals(contents, 'hello world')
@decorators.Enabled('cros-chrome')
def testExists(self):
with self._GetCRI() as cri:
self.assertTrue(cri.FileExistsOnDevice('/proc/cpuinfo'))
self.assertTrue(cri.FileExistsOnDevice('/etc/passwd'))
self.assertFalse(cri.FileExistsOnDevice('/etc/sdlfsdjflskfjsflj'))
@decorators.Enabled('linux')
def testExistsLocal(self):
with cros_interface.CrOSInterface() as cri:
self.assertTrue(cri.FileExistsOnDevice('/proc/cpuinfo'))
self.assertTrue(cri.FileExistsOnDevice('/etc/passwd'))
self.assertFalse(cri.FileExistsOnDevice('/etc/sdlfsdjflskfjsflj'))
@decorators.Enabled('cros-chrome')
def testGetFileContents(self): # pylint: disable=R0201
with self._GetCRI() as cri:
hosts = cri.GetFileContents('/etc/lsb-release')
self.assertTrue('CHROMEOS' in hosts)
@decorators.Enabled('cros-chrome')
def testGetFileContentsNonExistent(self):
with self._GetCRI() as cri:
f = tempfile.NamedTemporaryFile()
cri.PushContents('testGetFileNonExistent', f.name)
cri.RmRF(f.name)
self.assertRaises(
OSError,
lambda: cri.GetFileContents(f.name))
@decorators.Enabled('cros-chrome')
def testGetFile(self): # pylint: disable=R0201
with self._GetCRI() as cri:
f = tempfile.NamedTemporaryFile()
cri.GetFile('/etc/lsb-release', f.name)
with open(f.name, 'r') as f2:
res = f2.read()
self.assertTrue('CHROMEOS' in res)
@decorators.Enabled('cros-chrome')
def testGetFileNonExistent(self):
with self._GetCRI() as cri:
f = tempfile.NamedTemporaryFile()
cri.PushContents('testGetFileNonExistent', f.name)
cri.RmRF(f.name)
self.assertRaises(
OSError,
lambda: cri.GetFile(f.name))
@decorators.Enabled('cros-chrome')
def testIsServiceRunning(self):
with self._GetCRI() as cri:
self.assertTrue(cri.IsServiceRunning('openssh-server'))
@decorators.Enabled('linux')
def testIsServiceRunningLocal(self):
with cros_interface.CrOSInterface() as cri:
self.assertTrue(cri.IsServiceRunning('dbus'))
@decorators.Enabled('cros-chrome')
def testGetRemotePortAndIsHTTPServerRunningOnPort(self):
with self._GetCRI() as cri:
# Create local server.
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.listen(0)
# Get remote port and ensure that it was unused.
remote_port = cri.GetRemotePort()
self.assertFalse(cri.IsHTTPServerRunningOnPort(remote_port))
# Forward local server's port to remote device's remote_port.
forwarder = cros_forwarder.CrOsForwarderFactory(cri).Create(
forwarders.PortPairs(http=forwarders.PortPair(port, remote_port),
https=None, dns=None))
# At this point, remote device should be able to connect to local server.
self.assertTrue(cri.IsHTTPServerRunningOnPort(remote_port))
# Next remote port shouldn't be the same as remote_port, since remote_port
# is now in use.
self.assertTrue(cri.GetRemotePort() != remote_port)
# Close forwarder and local server ports.
forwarder.Close()
sock.close()
# Device should no longer be able to connect to remote_port since it is no
# longer in use.
self.assertFalse(cri.IsHTTPServerRunningOnPort(remote_port))
@decorators.Enabled('cros-chrome')
def testGetRemotePortReservedPorts(self):
with self._GetCRI() as cri:
# Should return 2 separate ports even though the first one isn't
# technically being used yet.
remote_port_1 = cri.GetRemotePort()
remote_port_2 = cri.GetRemotePort()
self.assertTrue(remote_port_1 != remote_port_2)
@decorators.Enabled('cros-chrome')
def testTakeScreenShot(self):
with self._GetCRI() as cri:
def _Cleanup():
cri.RmRF('/var/log/screenshots/test-prefix*')
_Cleanup()
cri.TakeScreenShot('test-prefix')
self.assertTrue(cri.FileExistsOnDevice(
'/var/log/screenshots/test-prefix-0.png'))
_Cleanup()
# TODO(tengs): It would be best if we can filter this test and other tests
# that need to be run locally based on the platform of the system browser.
@decorators.Enabled('linux')
def testEscapeCmdArguments(self):
"""Commands and their arguments that are executed through the cros
interface should follow bash syntax. This test needs to run on remotely
and locally on the device to check for consistency.
"""
options = options_for_unittests.GetCopy()
with cros_interface.CrOSInterface(
options.cros_remote, options.cros_remote_ssh_port,
options.cros_ssh_identity) as cri:
# Check arguments with no special characters
stdout, _ = cri.RunCmdOnDevice(['echo', '--arg1=value1', '--arg2=value2',
'--arg3="value3"'])
assert stdout.strip() == '--arg1=value1 --arg2=value2 --arg3=value3'
# Check argument with special characters escaped
stdout, _ = cri.RunCmdOnDevice(['echo', '--arg=A\\; echo \\"B\\"'])
assert stdout.strip() == '--arg=A; echo "B"'
# Check argument with special characters in quotes
stdout, _ = cri.RunCmdOnDevice(['echo', "--arg='$HOME;;$PATH'"])
assert stdout.strip() == "--arg=$HOME;;$PATH"
| bsd-3-clause |
Pencroff/ai-hackathon-2017 | Backend/venv/lib/python3.6/site-packages/setuptools/command/test.py | 130 | 8816 | import os
import operator
import sys
import contextlib
import itertools
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
from unittest import TestLoader
import six
from six.moves import map, filter
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty(object):
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
"""
Add the indicated paths to the head of the PYTHONPATH environment
variable so that subprocesses will also see the packages at
these paths.
Do this in a context that restores the value on exit.
"""
nothing = object()
orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
current_pythonpath = os.environ.get('PYTHONPATH', '')
try:
prefix = os.pathsep.join(paths)
to_join = filter(None, [prefix, current_pythonpath])
new_path = os.pathsep.join(to_join)
if new_path:
os.environ['PYTHONPATH'] = new_path
yield
finally:
if orig_pythonpath is nothing:
os.environ.pop('PYTHONPATH', None)
else:
os.environ['PYTHONPATH'] = orig_pythonpath
@staticmethod
def install_dists(dist):
"""
Install the requirements indicated by self.distribution and
return an iterable of the dists that were built.
"""
ir_d = dist.fetch_build_eggs(dist.install_requires or [])
tr_d = dist.fetch_build_eggs(dist.tests_require or [])
return itertools.chain(ir_d, tr_d)
def run(self):
installed_dists = self.install_dists(self.distribution)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if six.PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
exit_kwarg = {} if sys.version_info < (2, 7) else {"exit": False}
test = unittest_main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
**exit_kwarg
)
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| mit |
explora26/zephyr | scripts/west_commands/sign.py | 1 | 7203 | # Copyright (c) 2018 Foundries.io
#
# SPDX-License-Identifier: Apache-2.0
import abc
import argparse
import os
import subprocess
from west import cmake
from west import log
from west.build import is_zephyr_build
from west.util import quote_sh_list
from runners.core import BuildConfiguration
from zephyr_ext_common import find_build_dir, Forceable, \
BUILD_DIR_DESCRIPTION, cached_runner_config
SIGN_DESCRIPTION = '''\
This command automates some of the drudgery of creating signed Zephyr
binaries for chain-loading by a bootloader.
In the simplest usage, run this from your build directory:
west sign -t your_tool -- ARGS_FOR_YOUR_TOOL
Assuming your binary was properly built for processing and handling by
tool "your_tool", this creates zephyr.signed.bin and zephyr.signed.hex
files (if supported by "your_tool") which are ready for use by your
bootloader. The "ARGS_FOR_YOUR_TOOL" value can be any additional
arguments you want to pass to the tool, such as the location of a
signing key, a version identifier, etc.
See tool-specific help below for details.'''
SIGN_EPILOG = '''\
imgtool
-------
Currently, MCUboot's 'imgtool' tool is supported. To build a signed
binary you can load with MCUboot using imgtool, run this from your
build directory:
west sign -t imgtool -- --key YOUR_SIGNING_KEY.pem
The image header size, alignment, and slot sizes are determined from
the build directory using board information and the device tree. A
default version number of 0.0.0+0 is used (which can be overridden by
passing "--version x.y.z+w" after "--key"). As shown above, extra
arguments after a '--' are passed to imgtool directly.'''
class ToggleAction(argparse.Action):
def __call__(self, parser, args, ignored, option):
setattr(args, self.dest, not option.startswith('--no-'))
class Sign(Forceable):
def __init__(self):
super(Sign, self).__init__(
'sign',
# Keep this in sync with the string in west-commands.yml.
'sign a Zephyr binary for bootloader chain-loading',
SIGN_DESCRIPTION,
accepts_unknown_args=False)
def do_add_parser(self, parser_adder):
parser = parser_adder.add_parser(
self.name,
epilog=SIGN_EPILOG,
help=self.help,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.description)
parser.add_argument('-d', '--build-dir', help=BUILD_DIR_DESCRIPTION)
self.add_force_arg(parser)
# general options
group = parser.add_argument_group('tool control options')
group.add_argument('-t', '--tool', choices=['imgtool'],
help='image signing tool name')
group.add_argument('-p', '--tool-path', default='imgtool',
help='''path to the tool itself, if needed''')
group.add_argument('tool_args', nargs='*', metavar='tool_opt',
help='extra option(s) to pass to the signing tool')
# bin file options
group = parser.add_argument_group('binary (.bin) file options')
group.add_argument('--bin', '--no-bin', dest='gen_bin', nargs=0,
action=ToggleAction,
help='''produce a signed .bin file?
(default: yes, if supported)''')
group.add_argument('-B', '--sbin', metavar='BIN',
default='zephyr.signed.bin',
help='''signed .bin file name
(default: zephyr.signed.bin)''')
# hex file options
group = parser.add_argument_group('Intel HEX (.hex) file options')
group.add_argument('--hex', '--no-hex', dest='gen_hex', nargs=0,
action=ToggleAction,
help='''produce a signed .hex file?
(default: yes, if supported)''')
group.add_argument('-H', '--shex', metavar='HEX',
default='zephyr.signed.hex',
help='''signed .hex file name
(default: zephyr.signed.hex)''')
# defaults for hex/bin generation
parser.set_defaults(gen_bin=True, gen_hex=True)
return parser
def do_run(self, args, ignored):
if not (args.gen_bin or args.gen_hex):
return
self.check_force(os.path.isdir(args.build_dir),
'no such build directory {}'.format(args.build_dir))
self.check_force(is_zephyr_build(args.build_dir),
"build directory {} doesn't look like a Zephyr build "
'directory'.format(args.build_dir))
if args.tool == 'imgtool':
signer = ImgtoolSigner()
# (Add support for other signers here in elif blocks)
else:
raise RuntimeError("can't happen")
# Provide the build directory if not given, and defer to the signer.
args.build_dir = find_build_dir(args.build_dir)
signer.sign(args)
class Signer(abc.ABC):
'''Common abstract superclass for signers.
To add support for a new tool, subclass this and add support for
it in the Sign.do_run() method.'''
@abc.abstractmethod
def sign(self, args):
'''Abstract method to perform a signature; subclasses must implement.
:param args: parsed arguments from Sign command
'''
class ImgtoolSigner(Signer):
def sign(self, args):
cache = cmake.CMakeCache.from_build_dir(args.build_dir)
runner_config = cached_runner_config(args.build_dir, cache)
bcfg = BuildConfiguration(args.build_dir)
# Build a signed .bin
if args.gen_bin and runner_config.bin_file:
sign_bin = self.sign_cmd(args, bcfg, runner_config.bin_file,
args.sbin)
log.dbg(quote_sh_list(sign_bin))
subprocess.check_call(sign_bin)
# Build a signed .hex
if args.gen_hex and runner_config.hex_file:
sign_hex = self.sign_cmd(args, bcfg, runner_config.hex_file,
args.shex)
log.dbg(quote_sh_list(sign_hex))
subprocess.check_call(sign_hex)
def sign_cmd(self, args, bcfg, infile, outfile):
align = str(bcfg['DT_FLASH_WRITE_BLOCK_SIZE'])
vtoff = str(bcfg['CONFIG_TEXT_SECTION_OFFSET'])
slot_size = str(bcfg['DT_FLASH_AREA_IMAGE_0_SIZE'])
sign_command = [args.tool_path or 'imgtool',
'sign',
'--align', align,
'--header-size', vtoff,
'--slot-size', slot_size,
# We provide a default --version in case the
# user is just messing around and doesn't want
# to set one. It will be overridden if there is
# a --version in args.tool_args.
'--version', '0.0.0+0',
infile,
outfile]
sign_command.extend(args.tool_args)
return sign_command
| apache-2.0 |
alisidd/tensorflow | tensorflow/python/kernel_tests/softmax_op_test.py | 85 | 7224 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SoftmaxOp and LogSoftmaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class SoftmaxTest(test.TestCase):
def _npSoftmax(self, features, dim=-1, log=False):
if dim is -1:
dim = len(features.shape) - 1
one_only_on_dim = list(features.shape)
one_only_on_dim[dim] = 1
e = np.exp(features - np.reshape(
np.amax(
features, axis=dim), one_only_on_dim))
softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim)
if log:
return np.log(softmax)
else:
return softmax
def _testSoftmax(self, np_features, dim=-1, log=False, use_gpu=False):
# A previous version of the code checked the op name rather than the op type
# to distinguish between log and non-log. Use an arbitrary name to catch
# this bug in future.
name = "arbitrary"
np_softmax = self._npSoftmax(np_features, dim=dim, log=log)
with self.test_session(use_gpu=use_gpu):
if log:
tf_softmax = nn_ops.log_softmax(np_features, dim=dim, name=name)
else:
tf_softmax = nn_ops.softmax(np_features, dim=dim, name=name)
out = tf_softmax.eval()
self.assertAllCloseAccordingToType(np_softmax, out)
self.assertShapeEqual(np_softmax, tf_softmax)
if not log:
# Bonus check: the softmaxes should add to one in dimension dim.
sum_along_dim = np.sum(out, axis=dim)
self.assertAllCloseAccordingToType(
np.ones(sum_along_dim.shape), sum_along_dim)
def _testAll(self, features):
self._testSoftmax(features, use_gpu=False)
self._testSoftmax(features, log=True, use_gpu=False)
self._testSoftmax(features, use_gpu=True)
self._testSoftmax(features, log=True, use_gpu=True)
self._testOverflow(use_gpu=True)
def testNpSoftmax(self):
features = [[1., 1., 1., 1.], [1., 2., 3., 4.]]
# Batch 0: All exps are 1. The expected result is
# Softmaxes = [0.25, 0.25, 0.25, 0.25]
# LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294]
#
# Batch 1:
# exps = [1., 2.718, 7.389, 20.085]
# sum = 31.192
# Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426]
# LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019]
np_sm = self._npSoftmax(np.array(features))
self.assertAllClose(
np.array([[0.25, 0.25, 0.25, 0.25],
[0.0320586, 0.08714432, 0.23688282, 0.64391426]]),
np_sm,
rtol=1.e-5,
atol=1.e-5)
np_lsm = self._npSoftmax(np.array(features), log=True)
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[-3.4401897, -2.4401897, -1.4401897, -0.4401897]]),
np_lsm,
rtol=1.e-5,
atol=1.e-5)
def _testOverflow(self, use_gpu=False):
if use_gpu:
type = np.float32
else:
type = np.float64
max = np.finfo(type).max
features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type)
with self.test_session(use_gpu=use_gpu):
tf_log_softmax = nn_ops.log_softmax(features)
out = tf_log_softmax.eval()
self.assertAllClose(
np.array([[-1.386294, -1.386294, -1.386294, -1.386294],
[0, -max, -max, -max]]),
out,
rtol=1.e-5,
atol=1.e-5)
def testFloat(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32))
def testHalf(self):
self._testAll(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16))
def testDouble(self):
self._testSoftmax(
np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64))
self._testOverflow()
def test1DTesnorAsInput(self):
self._testSoftmax(
np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False)
self._testOverflow(use_gpu=False)
def test3DTensorAsInput(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongFirstDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=0,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testAlongSecondDimension(self):
self._testSoftmax(
np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32),
dim=1,
use_gpu=False)
self._testOverflow(use_gpu=False)
def testShapeInference(self):
op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]],
[[2., 3., 4., 5.], [6., 7., 8., 9.]],
[[5., 4., 3., 2.], [1., 2., 3., 4.]]])
self.assertEqual([3, 2, 4], op.get_shape())
def testEmptyInput(self):
with self.test_session():
x = constant_op.constant([[]], shape=[0, 3])
self.assertEqual(0, array_ops.size(x).eval())
# reshape would raise if logits is empty
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax(x, dim=0).eval()
def testDimTooLarge(self):
with self.test_session():
with self.assertRaises(errors_impl.InvalidArgumentError):
nn_ops.softmax([1., 2., 3., 4.], dim=100).eval()
def testLargeDims(self):
# Make sure that we properly handle large inputs. See
# https://github.com/tensorflow/tensorflow/issues/4425 for details
for dims in [129, 256]:
ones = np.random.rand(dims, dims).astype(np.float32)
np_softmax = self._npSoftmax(ones)
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu) as sess:
x = array_ops.placeholder(dtypes.float32)
y = nn_ops.softmax(x)
tf_softmax = sess.run(y, feed_dict={x: ones})
self.assertAllClose(tf_softmax, np_softmax)
if __name__ == "__main__":
test.main()
| apache-2.0 |
hortonworks/hortonworks-sandbox | desktop/core/ext-py/ctypes-1.0.2/ctypes/test/test_returnfuncptrs.py | 66 | 1449 | import unittest
from ctypes import *
import _ctypes_test
class ReturnFuncPtrTestCase(unittest.TestCase):
def test_with_prototype(self):
# The _ctypes_test shared lib/dll exports quite some functions for testing.
# The get_strchr function returns a *pointer* to the C strchr function.
dll = CDLL(_ctypes_test.__file__)
get_strchr = dll.get_strchr
get_strchr.restype = CFUNCTYPE(c_char_p, c_char_p, c_char)
strchr = get_strchr()
self.failUnlessEqual(strchr("abcdef", "b"), "bcdef")
self.failUnlessEqual(strchr("abcdef", "x"), None)
self.assertRaises(ArgumentError, strchr, "abcdef", 3)
self.assertRaises(TypeError, strchr, "abcdef")
def test_without_prototype(self):
dll = CDLL(_ctypes_test.__file__)
get_strchr = dll.get_strchr
# the default 'c_int' would not work on systems where sizeof(int) != sizeof(void *)
get_strchr.restype = c_void_p
addr = get_strchr()
# _CFuncPtr instances are now callable with an integer argument
# which denotes a function address:
strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)(addr)
self.failUnless(strchr("abcdef", "b"), "bcdef")
self.failUnlessEqual(strchr("abcdef", "x"), None)
self.assertRaises(ArgumentError, strchr, "abcdef", 3)
self.assertRaises(TypeError, strchr, "abcdef")
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
josh-willis/pycbc | pycbc/inference/sampler/multinest.py | 4 | 15466 | # Copyright (C) 2018 Daniel Finstad
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This modules provides classes and functions for using the Multinest sampler
packages for parameter estimation.
"""
from __future__ import absolute_import
import logging
import sys
import numpy
from pycbc.inference.io import (MultinestFile, validate_checkpoint_files)
from pycbc.distributions import read_constraints_from_config
from pycbc.pool import is_main_process
from pycbc.transforms import apply_transforms
from .base import (BaseSampler, setup_output)
from .base_mcmc import get_optional_arg_from_config
#
# =============================================================================
#
# Samplers
#
# =============================================================================
#
class MultinestSampler(BaseSampler):
"""This class is used to construct a nested sampler from
the Multinest package.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
nlivepoints : int
Number of live points to use in sampler.
"""
name = "multinest"
_io = MultinestFile
def __init__(self, model, nlivepoints, checkpoint_interval=1000,
importance_nested_sampling=False,
evidence_tolerance=0.1, sampling_efficiency=0.01,
constraints=None):
try:
loglevel = logging.getLogger().getEffectiveLevel()
logging.getLogger().setLevel(logging.WARNING)
from pymultinest import Analyzer, run
self.run_multinest = run
self.analyzer = Analyzer
logging.getLogger().setLevel(loglevel)
except ImportError:
raise ImportError("pymultinest is not installed.")
super(MultinestSampler, self).__init__(model)
self._constraints = constraints
self._nlivepoints = nlivepoints
self._ndim = len(model.variable_params)
self._random_state = numpy.random.get_state()
self._checkpoint_interval = checkpoint_interval
self._ztol = evidence_tolerance
self._eff = sampling_efficiency
self._ins = importance_nested_sampling
self._samples = None
self._itercount = None
self._logz = None
self._dlogz = None
self._importance_logz = None
self._importance_dlogz = None
self.is_main_process = is_main_process()
@property
def io(self):
return self._io
@property
def niterations(self):
"""Get the current number of iterations.
"""
itercount = self._itercount
if itercount is None:
itercount = 0
return itercount
@property
def checkpoint_interval(self):
"""Get the number of iterations between checkpoints.
"""
return self._checkpoint_interval
@property
def nlivepoints(self):
"""Get the number of live points used in sampling.
"""
return self._nlivepoints
@property
def logz(self):
"""Get the current estimate of the log evidence.
"""
return self._logz
@property
def dlogz(self):
"""Get the current error estimate of the log evidence.
"""
return self._dlogz
@property
def importance_logz(self):
"""Get the current importance weighted estimate of the log
evidence.
"""
return self._importance_logz
@property
def importance_dlogz(self):
"""Get the current error estimate of the importance
weighted log evidence.
"""
return self._importance_dlogz
@property
def samples(self):
"""A dict mapping ``variable_params`` to arrays of samples currently
in memory.
"""
samples_dict = {p: self._samples[:, i] for i, p in
enumerate(self.model.variable_params)}
return samples_dict
@property
def model_stats(self):
"""A dict mapping the model's ``default_stats`` to arrays of values.
"""
stats = []
for sample in self._samples:
params = dict(zip(self.model.variable_params, sample))
if self.model.sampling_transforms is not None:
params = self.model.sampling_transforms.apply(params)
self.model.update(**params)
self.model.logposterior
stats.append(self.model.get_current_stats())
stats = numpy.array(stats)
return {s: stats[:, i] for i, s in enumerate(self.model.default_stats)}
def get_posterior_samples(self):
"""Read posterior samples from ASCII output file created by
multinest.
"""
post_file = self.backup_file[:-9]+'-post_equal_weights.dat'
return numpy.loadtxt(post_file, ndmin=2)
def check_if_finished(self):
"""Estimate remaining evidence to see if desired evidence-tolerance
stopping criterion has been reached.
"""
resume_file = self.backup_file[:-9] + '-resume.dat'
current_vol, _, _ = numpy.loadtxt(
resume_file, skiprows=6, unpack=True)
maxloglike = max(self.get_posterior_samples()[:, -1])
logz_remain = numpy.exp(maxloglike +
numpy.log(current_vol) - self.logz)
logging.info("Estimate of remaining logZ is %s", logz_remain)
done = logz_remain < self._ztol
return done
def set_initial_conditions(self, initial_distribution=None,
samples_file=None):
"""Sets the initial starting point for the sampler.
If a starting samples file is provided, will also load the random
state from it.
"""
# use samples file to set the state of the sampler
if samples_file is not None:
self.set_state_from_file(samples_file)
def resume_from_checkpoint(self):
"""Resume sampler from checkpoint
"""
pass
def set_state_from_file(self, filename):
"""Sets the state of the sampler back to the instance saved in a file.
"""
with self.io(filename, 'r') as f_p:
rstate = f_p.read_random_state()
# set the numpy random state
numpy.random.set_state(rstate)
# set sampler's generator to the same state
self._random_state = rstate
def loglikelihood(self, cube, *extra_args):
"""Log likelihood evaluator that gets passed to multinest.
"""
params = {p: v for p, v in zip(self.model.variable_params, cube)}
# apply transforms
if self.model.sampling_transforms is not None:
params = self.model.sampling_transforms.apply(params)
if self.model.waveform_transforms is not None:
params = apply_transforms(params, self.model.waveform_transforms)
# apply constraints
if (self._constraints is not None and
not all([c(params) for c in self._constraints])):
return -numpy.inf
self.model.update(**params)
return self.model.loglikelihood
def transform_prior(self, cube, *extra_args):
"""Transforms the unit hypercube that multinest makes its draws
from, into the prior space defined in the config file.
"""
dict_cube = dict(zip(self.model.variable_params, cube))
inv = self.model.prior_distribution.cdfinv(**dict_cube)
for i, param in enumerate(self.model.variable_params):
cube[i] = inv[param]
return cube
def run(self):
"""Runs the sampler until the specified evidence tolerance
is reached.
"""
if self.new_checkpoint:
self._itercount = 0
else:
self.set_initial_conditions(samples_file=self.checkpoint_file)
with self.io(self.checkpoint_file, "r") as f_p:
self._itercount = f_p.niterations
outputfiles_basename = self.backup_file[:-9] + '-'
analyzer = self.analyzer(self._ndim,
outputfiles_basename=outputfiles_basename)
iterinterval = self.checkpoint_interval
done = False
while not done:
logging.info("Running sampler for %s to %s iterations",
self.niterations, self.niterations + iterinterval)
# run multinest
self.run_multinest(self.loglikelihood, self.transform_prior,
self._ndim, n_live_points=self.nlivepoints,
evidence_tolerance=self._ztol,
sampling_efficiency=self._eff,
importance_nested_sampling=self._ins,
max_iter=iterinterval,
n_iter_before_update=iterinterval,
seed=numpy.random.randint(0, 1e6),
outputfiles_basename=outputfiles_basename,
multimodal=False, verbose=True)
# parse results from multinest output files
nest_stats = analyzer.get_mode_stats()
self._logz = nest_stats["nested sampling global log-evidence"]
self._dlogz = nest_stats[
"nested sampling global log-evidence error"]
if self._ins:
self._importance_logz = nest_stats[
"nested importance sampling global log-evidence"]
self._importance_dlogz = nest_stats[
"nested importance sampling global log-evidence error"]
self._samples = self.get_posterior_samples()[:, :-1]
logging.info("Have %s posterior samples", self._samples.shape[0])
# update the itercounter
self._itercount += iterinterval
# make sure there's at least 1 posterior sample
if self._samples.shape[0] == 0:
continue
# dump the current results
if self.is_main_process:
self.checkpoint()
# check if we're finished
done = self.check_if_finished()
if not self.is_main_process:
sys.exit()
def write_results(self, filename):
"""Writes samples, model stats, acceptance fraction, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as f_p:
# write samples
f_p.write_samples(self.samples, self.model.variable_params)
# write stats
f_p.write_samples(self.model_stats)
# write evidence
f_p.write_logevidence(self.logz, self.dlogz,
self.importance_logz,
self.importance_dlogz)
# write random state (use default numpy.random_state)
f_p.write_random_state()
def checkpoint(self):
"""Dumps current samples to the checkpoint file."""
logging.info("Writing samples to files")
for f_n in [self.checkpoint_file, self.backup_file]:
self.write_results(f_n)
with self.io(f_n, "a") as f_p:
f_p.write_niterations(self.niterations)
logging.info("Validating checkpoint and backup files")
checkpoint_valid = validate_checkpoint_files(
self.checkpoint_file, self.backup_file, check_nsamples=False)
if not checkpoint_valid:
raise IOError("error writing to checkpoint file")
def setup_output(self, output_file):
"""Sets up the sampler's checkpoint and output files.
The checkpoint file has the same name as the output file, but with
``.checkpoint`` appended to the name. A backup file will also be
created.
Parameters
----------
sampler : sampler instance
Sampler
output_file : str
Name of the output file.
"""
if self.is_main_process:
setup_output(self, output_file)
else:
# child processes just store filenames
checkpoint_file = output_file + '.checkpoint'
backup_file = output_file + '.bkup'
self.checkpoint_file = checkpoint_file
self.backup_file = backup_file
self.checkpoint_valid = True
self.new_checkpoint = True
def finalize(self):
"""All data is written by the last checkpoint in the run method, so
this just passes."""
pass
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""Loads the sampler from the given config file."""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
# get the number of live points to use
nlivepoints = int(cp.get(section, "nlivepoints"))
# get the checkpoint interval, if it's specified
checkpoint = get_optional_arg_from_config(
cp, section, 'checkpoint-interval', dtype=int)
# get the evidence tolerance, if specified
ztol = get_optional_arg_from_config(cp, section, 'evidence-tolerance',
dtype=float)
# get the sampling efficiency, if specified
eff = get_optional_arg_from_config(cp, section, 'sampling-efficiency',
dtype=float)
# get importance nested sampling setting, if specified
ins = get_optional_arg_from_config(cp, section,
'importance-nested-sampling',
dtype=bool)
# get constraints since we can't use the joint prior distribution
constraints = read_constraints_from_config(cp)
# build optional kwarg dict
kwarg_names = ['evidence_tolerance', 'sampling_efficiency',
'importance_nested_sampling',
'checkpoint_interval']
optional_kwargs = {k: v for k, v in
zip(kwarg_names, [ztol, eff, ins, checkpoint]) if
v is not None}
obj = cls(model, nlivepoints, constraints=constraints,
**optional_kwargs)
obj.setup_output(output_file)
return obj
| gpl-3.0 |
florentx/OpenUpgrade | addons/hr_payroll_account/__openerp__.py | 120 | 1776 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payroll Accounting',
'version': '1.0',
'category': 'Human Resources',
'description': """
Generic Payroll system Integrated with Accounting.
==================================================
* Expense Encoding
* Payment Encoding
* Company Contribution Management
""",
'author':'OpenERP SA',
'website':'http://www.openerp.com',
'images': ['images/hr_employee_payslip.jpeg'],
'depends': [
'hr_payroll',
'account',
'hr_expense'
],
'data': ['hr_payroll_account_view.xml'],
'demo': ['hr_payroll_account_demo.xml'],
'test': ['test/hr_payroll_account.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ilo10/scikit-learn | sklearn/dummy.py | 208 | 17370 | # Author: Mathieu Blondel <[email protected]>
# Arnaud Joly <[email protected]>
# Maheshakya Wijewardena <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
import scipy.sparse as sp
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .utils import check_random_state
from .utils.validation import check_array
from .utils.validation import check_consistent_length
from .utils.random import random_choice_csc
from .utils.stats import _weighted_percentile
from .utils.multiclass import class_distribution
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "prior": always predicts the class that maximizes the class prior
(like "most_frequent") and ``predict_proba`` returns the class prior.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
constant : int or str or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
classes_ : array or list of array of shape = [n_classes]
Class labels for each output.
n_classes_ : array or list of array of shape = [n_classes]
Number of label for each output.
class_prior_ : array or list of array of shape = [n_classes]
Probability of each class for each output.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
sparse_output_ : bool,
True if the array returned from predict is to be in sparse CSC format.
Is automatically set to True if the input y is passed in sparse format.
"""
def __init__(self, strategy="stratified", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y, sample_weight=None):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant", "prior"):
raise ValueError("Unknown strategy type.")
if self.strategy == "uniform" and sp.issparse(y):
y = y.toarray()
warnings.warn('A local copy of the target data has been converted '
'to a numpy array. Predicting on sparse target data '
'with the uniform strategy would not save memory '
'and would be slower.',
UserWarning)
self.sparse_output_ = sp.issparse(y)
if not self.sparse_output_:
y = np.atleast_1d(y)
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if self.strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
(self.classes_,
self.n_classes_,
self.class_prior_) = class_distribution(y, sample_weight)
if (self.strategy == "constant" and
any(constant[k] not in self.classes_[k]
for k in range(self.n_outputs_))):
# Checking in case of constant strategy if the constant
# provided by the user is in y.
raise ValueError("The constant target value must be "
"present in training data")
if self.n_outputs_ == 1 and not self.output_2d_:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self.strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
if self.sparse_output_:
class_prob = None
if self.strategy in ("most_frequent", "prior"):
classes_ = [np.array([cp.argmax()]) for cp in class_prior_]
elif self.strategy == "stratified":
class_prob = class_prior_
elif self.strategy == "uniform":
raise ValueError("Sparse target prediction is not "
"supported with the uniform strategy")
elif self.strategy == "constant":
classes_ = [np.array([c]) for c in constant]
y = random_choice_csc(n_samples, classes_, class_prob,
self.random_state)
else:
if self.strategy in ("most_frequent", "prior"):
y = np.tile([classes_[k][class_prior_[k].argmax()] for
k in range(self.n_outputs_)], [n_samples, 1])
elif self.strategy == "stratified":
y = np.vstack(classes_[k][proba[k].argmax(axis=1)] for
k in range(self.n_outputs_)).T
elif self.strategy == "uniform":
ret = [classes_[k][rs.randint(n_classes_[k], size=n_samples)]
for k in range(self.n_outputs_)]
y = np.vstack(ret).T
elif self.strategy == "constant":
y = np.tile(self.constant, (n_samples, 1))
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1 and not self.output_2d_:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in range(self.n_outputs_):
if self.strategy == "most_frequent":
ind = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self.strategy == "prior":
out = np.ones((n_samples, 1)) * class_prior_[k]
elif self.strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
elif self.strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self.strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1 and not self.output_2d_:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-like of shape = [n_samples, n_classes]
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
class DummyRegressor(BaseEstimator, RegressorMixin):
"""
DummyRegressor is a regressor that makes predictions using
simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Read more in the :ref:`User Guide <dummy_estimators>`.
Parameters
----------
strategy : str
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "quantile": always predicts a specified quantile of the training set,
provided with the quantile parameter.
* "constant": always predicts a constant value that is provided by
the user.
constant : int or float or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
quantile : float in [0.0, 1.0]
The quantile to predict using the "quantile" strategy. A quantile of
0.5 corresponds to the median, while 0.0 to the minimum and 1.0 to the
maximum.
Attributes
----------
constant_ : float or array of shape [n_outputs]
Mean or median or quantile of the training targets or constant value
given by the user.
n_outputs_ : int,
Number of outputs.
outputs_2d_ : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="mean", constant=None, quantile=None):
self.strategy = strategy
self.constant = constant
self.quantile = quantile
def fit(self, X, y, sample_weight=None):
"""Fit the random regressor.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("mean", "median", "quantile", "constant"):
raise ValueError("Unknown strategy type: %s, expected "
"'mean', 'median', 'quantile' or 'constant'"
% self.strategy)
y = check_array(y, ensure_2d=False)
if len(y) == 0:
raise ValueError("y must not be empty.")
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
check_consistent_length(X, y, sample_weight)
if self.strategy == "mean":
self.constant_ = np.average(y, axis=0, weights=sample_weight)
elif self.strategy == "median":
if sample_weight is None:
self.constant_ = np.median(y, axis=0)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=50.)
for k in range(self.n_outputs_)]
elif self.strategy == "quantile":
if self.quantile is None or not np.isscalar(self.quantile):
raise ValueError("Quantile must be a scalar in the range "
"[0.0, 1.0], but got %s." % self.quantile)
percentile = self.quantile * 100.0
if sample_weight is None:
self.constant_ = np.percentile(y, axis=0, q=percentile)
else:
self.constant_ = [_weighted_percentile(y[:, k], sample_weight,
percentile=percentile)
for k in range(self.n_outputs_)]
elif self.strategy == "constant":
if self.constant is None:
raise TypeError("Constant target value has to be specified "
"when the constant strategy is used.")
self.constant = check_array(self.constant,
accept_sparse=['csr', 'csc', 'coo'],
ensure_2d=False, ensure_min_samples=0)
if self.output_2d_ and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have "
"shape (%d, 1)." % y.shape[1])
self.constant_ = self.constant
self.constant_ = np.reshape(self.constant_, (1, -1))
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "constant_"):
raise ValueError("DummyRegressor not fitted.")
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples = X.shape[0]
y = np.ones((n_samples, 1)) * self.constant_
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
| bsd-3-clause |
rickerc/neutron_audit | neutron/plugins/linuxbridge/common/constants.py | 5 | 1569 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
FLAT_VLAN_ID = -1
LOCAL_VLAN_ID = -2
# Values for network_type
TYPE_FLAT = 'flat'
TYPE_VLAN = 'vlan'
TYPE_VXLAN = 'vxlan'
TYPE_LOCAL = 'local'
TYPE_NONE = 'none'
# Supported VXLAN features
VXLAN_NONE = 'not_supported'
VXLAN_MCAST = 'multicast_flooding'
VXLAN_UCAST = 'unicast_flooding'
# Corresponding minimal kernel versions requirements
MIN_VXLAN_KVER = {VXLAN_MCAST: '3.8', VXLAN_UCAST: '3.11'}
# TODO(rkukura): Eventually remove this function, which provides
# temporary backward compatibility with pre-Havana RPC and DB vlan_id
# encoding.
def interpret_vlan_id(vlan_id):
"""Return (network_type, segmentation_id) tuple for encoded vlan_id."""
if vlan_id == LOCAL_VLAN_ID:
return (TYPE_LOCAL, None)
elif vlan_id == FLAT_VLAN_ID:
return (TYPE_FLAT, None)
else:
return (TYPE_VLAN, vlan_id)
| apache-2.0 |
Elico-Corp/odoo_OCB | addons/l10n_hr/__openerp__.py | 18 | 1702 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Author: Goran Kliska
# mail: goran.kliska(AT)slobodni-programi.hr
# Copyright (C) 2011- Slobodni programi d.o.o., Zagreb
# Contributions:
# Tomislav Bošnjaković, Storm Computers d.o.o. :
# - account types
{
"name": "Croatia - Accounting (RRIF 2012)",
"description": """
Croatian localisation.
======================
Author: Goran Kliska, Slobodni programi d.o.o., Zagreb
https://www.slobodni-programi.hr
Contributions:
Tomislav Bošnjaković, Storm Computers: tipovi konta
Ivan Vađić, Slobodni programi: tipovi konta
Description:
Croatian Chart of Accounts (RRIF ver.2012)
RRIF-ov računski plan za poduzetnike za 2012.
Vrste konta
Kontni plan prema RRIF-u, dorađen u smislu kraćenja naziva i dodavanja analitika
Porezne grupe prema poreznoj prijavi
Porezi PDV obrasca
Ostali porezi
Osnovne fiskalne pozicije
Izvori podataka:
https://www.rrif.hr/dok/preuzimanje/rrif-rp2011.rar
https://www.rrif.hr/dok/preuzimanje/rrif-rp2012.rar
""",
"version": "13.0",
"author": "OpenERP Croatian Community",
'category': 'Localization',
"website": "https://code.launchpad.net/openobject-croatia",
'depends': [
'account',
],
'data': [
'data/account_chart_template.xml',
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account_chart_tag.xml',
'data/account.tax.template.csv',
'data/fiscal_position_template.xml',
'data/account_chart_template.yml',
],
"demo": [],
'test': [],
"active": False,
"installable": True,
}
| agpl-3.0 |
OPU-Surveillance-System/monitoring | master/scripts/planner/map_converter.py | 1 | 12283 | """
Convert the GUI's map into a grid.
"""
import utm
import numpy as np
import math
from shapely.geometry import Polygon, Point
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib import cm
from sys import path
from tqdm import tqdm
import pickle
import datetime
import time
import random
path.append("..")
import settings
import astar
def project_to_virtual(point):
"""
Project a given point into a virtual space aligned with east.
Keyword arguments:
point: Point expressed by Latitude/Longitude values
Output expressed by UTM values
"""
point = utm.from_latlon(point[0], point[1])
x = math.cos(settings.ANGLE) * (point[0] - settings.RP_UTM[0]) - math.sin(settings.ANGLE) * (point[1] - settings.RP_UTM[1]) + settings.RP_UTM[0]
y = math.sin(settings.ANGLE) * (point[0] - settings.RP_UTM[0]) + math.cos(settings.ANGLE) * (point[1] - settings.RP_UTM[1]) + settings.RP_UTM[1]
return x, y #Easting, Northing
def project_to_original(point):
"""
Project a given point into the real space.
Keyword arguments:
point: Point expressed by UTM values
Output expressed by Latitude/Longitude values
"""
x = math.cos(-settings.ANGLE) * (point[0] - settings.RP_UTM[0]) - math.sin(-settings.ANGLE) * (point[1] - settings.RP_UTM[1]) + settings.RP_UTM[0]
y = math.sin(-settings.ANGLE) * (point[0] - settings.RP_UTM[0]) + math.cos(-settings.ANGLE) * (point[1] - settings.RP_UTM[1]) + settings.RP_UTM[1]
point = utm.to_latlon(x, y, 53, "S")
return point[0], point[1] #Latitude, Longitude
class Mapper():
"""
Represent the environment as grids.
"""
def __init__(self, limits, starting_point, obstacles, default_targets):
"""
Instantiate a Mapper object.
Keyword arguments:
limits: environment boundaries
starting_point: drones' patrol starting point
obstacles: array of non admissible zones
"""
#Environment boundaries
self.limits = limits
self.projected_limits = self.project_limits()
#Coefficients for index calculations
self.X = self.projected_limits[0][0]
self.Y = self.projected_limits[2][0]
self.Z = self.Y - self.X
self.A = self.projected_limits[0][1]
self.B = self.projected_limits[2][1]
self.C = self.A - self.B
#Environment elements
self.starting_point = [self.latlong_to_index(s) for s in starting_point]
self.obstacles = obstacles
self.default_targets = [self.latlong_to_index(t) for t in default_targets]
#self.default_targets = self.get_random_target_points(50)
for s in self.starting_point:
self.default_targets.append(s)
self.world = self.create_world()
for d in self.default_targets:
self.world[d[1]][d[0]] = 3
for s in self.starting_point:
self.world[s[1]][s[0]] = 2
print("Computing shortest paths to default targets...")
self.paths = {(d1, d2):astar.astar(self.world, tuple(reversed(d1)), tuple(reversed(d2))) for d1 in tqdm(self.default_targets) for d2 in self.default_targets if d1 != d2}
for s in self.starting_point:
self.default_targets.remove(s)
#self.default_targets = self.default_targets[:-1] #Removing the starting point from target points list
print("Paths computed")
self.mapped_paths = np.copy(self.world)
for k in self.paths:
if self.paths[k][0]:
for c in self.paths[k][0]:
self.mapped_paths[c[0]][c[1]] = 4
for p in self.paths:
if self.paths[p][1] == 1:
print(p, self.paths[p])
#Environment uncertainty
self.uncertainty_grid = np.ones((settings.Y_SIZE, settings.X_SIZE))
creation_date = datetime.datetime.now()
self.last_visit = [[creation_date for x in range(settings.X_SIZE)] for y in range(settings.Y_SIZE)]
#time.sleep(1)
#self.update_uncertainty_grid()
#self.plot_uncertainty_grid()
#self.plot_world_default_targets()
def get_random_target_points(self, num):
"""
"""
i = 0
proj_obs = [[self.latlong_to_index(o) for o in obs] for obs in self.obstacles]
poly_obs = [Polygon(o) for o in proj_obs]
random_target_points = []
while i < num:
is_inadmissible = True
while is_inadmissible:
x = random.randint(0, settings.X_SIZE - 1)
y = random.randint(0, settings.Y_SIZE - 1)
if not self.is_non_admissible((x, y), poly_obs):
is_inadmissible = False
random_target_points.append((x, y))
i += 1
return random_target_points
def project_limits(self):
"""
Represent the environment's limits into the projected space and rectangularize the environment.
"""
#TODO: Find a better way to have a rectangular representation of the map (think security)
top_left = project_to_virtual(self.limits[0])
top_left = list(top_left)
bottom_left = project_to_virtual(self.limits[1])
bottom_left = list(bottom_left)
bottom_right = project_to_virtual(self.limits[2])
bottom_right = list(bottom_right)
top_right = project_to_virtual(self.limits[3])
top_right = list(top_right)
top_right[1] = top_left[1]
top_right[0] = bottom_right[0]
bottom_left[0] = top_left[0]
bottom_left[1] = bottom_right[1]
return top_left, bottom_left, bottom_right, top_right
def latlong_to_index(self, point):
"""
Convert a given geographical point (expressed by latitude and longitude values) in a world's index.
Keyword arguments:
point: point in latitude/longitude
"""
x, y = project_to_virtual(point)
x = int(((x - self.X) / self.Z) * settings.X_SIZE)
y = int(((self.A - y) / self.C) * settings.Y_SIZE)
return x, y
def index_to_latlong(self, point):
"""
Convert a given world's index (expressed by x and y values) in latitude and longitude values.
Keyword arguments:
point: point index
"""
x = point[0]
y = point[1]
x = ((x * self.Z) / settings.X_SIZE) + self.X
y = self.A - ((y * self.C) / settings.Y_SIZE)
lat, long = project_to_original((x, y))
return lat, long
def is_non_admissible(self, point, obs_poly):
"""
Check if a given point is out of campus limits or is an obstacle.
"""
check = False
x = point[0]
y = point[1]
pnt_poly = Point(x, y)
for p in obs_poly:
if pnt_poly.intersects(p) == True:
check = True
break
return check
def create_world(self):
"""
Create a grid representing the given environment.
Grid values:
0: admissible cell
1: non admissible cell
"""
print("Creating world")
world = np.zeros((settings.Y_SIZE, settings.X_SIZE))
proj_obs = [[self.latlong_to_index(o) for o in obs] for obs in self.obstacles]
poly_obs = [Polygon(o) for o in proj_obs]
for j in tqdm(range(0, settings.Y_SIZE)):
for i in range(0, settings.X_SIZE):
if self.is_non_admissible((i, j), poly_obs):
world[j][i] = 1
else:
world[j][i] = 0
print("World created")
return world
def convert_plan(self, plan, nb_drone):
"""
Convert the given plan's points into latitude/longitude points.
Keyword arguments:
plan: A plan of paths
nb_drone: Number of drones
"""
converted_plan = list(plan)
for d in range(nb_drone):
for p in range(len(plan[d])):
for pt in range(len(plan[d][p])):
converted_plan[d][p][pt] = self.index_to_latlong(plan[d][p][pt])
return converted_plan
def update_visit_history(self, visit_list):
"""
Update the visit's register.
Keyword arguments:
visit_list: A dictionnary associating dates of visit to points (index).
"""
print("Updating visit history")
for visited_point in visit_list:
self.last_visit[visited_point[1]][visited_point[0]] = visit_list[visited_point]
def update_uncertainty_grid(self):
"""
Update the uncertainty level.
"""
timeshot = datetime.datetime.now()
print("Updating uncertainty grid")
for y in tqdm(range(0, settings.Y_SIZE)):
for x in range(0, settings.X_SIZE):
diff = timeshot - self.last_visit[y][x]
self.uncertainty_grid[y][x] = 1 - math.exp(settings.LAMBDA * diff.seconds)
#self.uncertainty_grid[y][x] = random.random()
#self.uncertainty_grid[0][0] = 0.01
#self.uncertainty_grid[10][10] = 0.39
def plot_world_default_targets(self, show=True):
"""
Plot default targets.
"""
print("Ploting default targets")
fig, ax = plt.subplots()
cmap = colors.ListedColormap(['white', 'black', 'red', 'orange'])
ax.imshow(self.world, interpolation="none", cmap=cmap)
for t in self.default_targets:
circle1 = plt.Circle((t[0], t[1]), 10, color='grey')
ax.add_artist(circle1)
ax.scatter(self.starting_point[0][0], self.starting_point[0][1], marker="*", s=30)
save = True
if show:
plt.show()
save = False
if save:
#plt.savefig('data/plot/world/map_grid_' + str(settings.X_SIZE) + 'x' + str(settings.Y_SIZE) + '.png', dpi=800)
plt.savefig('data/plot/world/map_grid_target_points' + str(settings.X_SIZE) + 'x' + str(settings.Y_SIZE) + '.png')
def plot_world(self, show=True):
"""
Plot the environment.
"""
print("Ploting world")
cmap = colors.ListedColormap(['white', 'black', 'red', 'orange'])
plt.imshow(self.world, interpolation="none", cmap=cmap)
save = True
if show:
plt.show()
save = False
if save:
#plt.savefig('data/plot/world/map_grid_' + str(settings.X_SIZE) + 'x' + str(settings.Y_SIZE) + '.png', dpi=800)
plt.savefig('data/plot/world/map_grid_' + str(settings.X_SIZE) + 'x' + str(settings.Y_SIZE) + '.png')
def plot_paths(self, show=True):
"""
Plot the environment.
"""
print("Ploting paths")
fig, ax = plt.subplots()
cmap = colors.ListedColormap(['white', 'black', 'red', 'orange', 'blue'])
cax = ax.imshow(self.mapped_paths, interpolation="none", cmap=cmap)
for t in self.default_targets:
circ = plt.Circle((t[0], t[1]), radius=2, color='red')
ax.add_patch(circ)
save = True
if show:
plt.show()
save = False
if save:
#plt.savefig('data/plot/paths/map_grid_' + str(settings.X_SIZE) + 'x' + str(settings.Y_SIZE) + '.png', dpi=800)
plt.savefig('data/plot/paths/map_grid_' + str(settings.X_SIZE) + 'x' + str(settings.Y_SIZE) + '.png')
def plot_uncertainty_grid(self):
"""
Plot the uncertainty level.
"""
print("Ploting uncertainty grid")
i,j = np.unravel_index(self.uncertainty_grid.argmax(), self.uncertainty_grid.shape)
max_proba = self.uncertainty_grid[i, j]
i,j = np.unravel_index(self.uncertainty_grid.argmin(), self.uncertainty_grid.shape)
min_proba = self.uncertainty_grid[i, j]
middle_proba = max_proba / 2
fig, ax = plt.subplots()
cax = ax.imshow(self.uncertainty_grid, interpolation="Nearest", cmap=cm.Greys)
ax.set_title('Uncertainty Grid')
cbar = fig.colorbar(cax, ticks=[min_proba, middle_proba, max_proba])
cbar.ax.set_yticklabels([str(int(min_proba * 100)) + '%', str(int(middle_proba * 100)) + '%', str(int(max_proba * 100)) + '%'])
plt.show()
| mit |
XiaodunServerGroup/medicalmooc | lms/djangoapps/wechat/features/login.py | 28 | 1604 | #pylint: disable=C0111
#pylint: disable=W0621
from lettuce import step, world
from django.contrib.auth.models import User
@step('I am an unactivated user$')
def i_am_an_unactivated_user(step):
user_is_an_unactivated_user('robot')
@step('I am an activated user$')
def i_am_an_activated_user(step):
user_is_an_activated_user('robot')
@step('I submit my credentials on the login form')
def i_submit_my_credentials_on_the_login_form(step):
fill_in_the_login_form('email', '[email protected]')
fill_in_the_login_form('password', 'test')
def submit_login_form():
login_form = world.browser.find_by_css('form#login-form')
login_form.find_by_name('submit').click()
world.retry_on_exception(submit_login_form)
@step(u'I should see the login error message "([^"]*)"$')
def i_should_see_the_login_error_message(step, msg):
login_error_div = world.browser.find_by_css('.submission-error.is-shown')
assert (msg in login_error_div.text)
@step(u'click the dropdown arrow$')
def click_the_dropdown(step):
world.css_click('.dropdown')
#### helper functions
def user_is_an_unactivated_user(uname):
u = User.objects.get(username=uname)
u.is_active = False
u.save()
def user_is_an_activated_user(uname):
u = User.objects.get(username=uname)
u.is_active = True
u.save()
def fill_in_the_login_form(field, value):
def fill_login_form():
login_form = world.browser.find_by_css('form#login-form')
form_field = login_form.find_by_name(field)
form_field.fill(value)
world.retry_on_exception(fill_login_form)
| agpl-3.0 |
fengthedroid/s2protocol | protocol28272.py | 21 | 27047 | # Copyright (c) 2013 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,5)]), #2
('_int',[(0,6)]), #3
('_int',[(0,14)]), #4
('_int',[(0,22)]), #5
('_int',[(0,32)]), #6
('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7
('_struct',[[('m_userId',2,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11
('_int',[(0,3)]), #12
('_bool',[]), #13
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4)]]), #14
('_fourcc',[]), #15
('_blob',[(0,7)]), #16
('_int',[(0,64)]), #17
('_struct',[[('m_region',10,0),('m_programId',15,1),('m_realm',6,2),('m_name',16,3),('m_id',17,4)]]), #18
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #19
('_int',[(0,2)]), #20
('_optional',[10]), #21
('_struct',[[('m_name',9,0),('m_toon',18,1),('m_race',9,2),('m_color',19,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',20,7),('m_result',20,8),('m_workingSetSlotId',21,9)]]), #22
('_array',[(0,5),22]), #23
('_optional',[23]), #24
('_blob',[(0,10)]), #25
('_blob',[(0,11)]), #26
('_struct',[[('m_file',26,0)]]), #27
('_optional',[13]), #28
('_int',[(-9223372036854775808,64)]), #29
('_blob',[(0,12)]), #30
('_blob',[(40,0)]), #31
('_array',[(0,6),31]), #32
('_optional',[32]), #33
('_array',[(0,6),26]), #34
('_optional',[34]), #35
('_struct',[[('m_playerList',24,0),('m_title',25,1),('m_difficulty',9,2),('m_thumbnail',27,3),('m_isBlizzardMap',13,4),('m_restartAsTransitionMap',28,16),('m_timeUTC',29,5),('m_timeLocalOffset',29,6),('m_description',30,7),('m_imageFilePath',26,8),('m_campaignIndex',10,15),('m_mapFileName',26,9),('m_cacheHandles',33,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',35,14)]]), #36
('_optional',[9]), #37
('_optional',[31]), #38
('_optional',[6]), #39
('_struct',[[('m_race',21,-1)]]), #40
('_struct',[[('m_team',21,-1)]]), #41
('_struct',[[('m_name',9,-13),('m_clanTag',37,-12),('m_clanLogo',38,-11),('m_highestLeague',21,-10),('m_combinedRaceLevels',39,-9),('m_randomSeed',6,-8),('m_racePreference',40,-7),('m_teamPreference',41,-6),('m_testMap',13,-5),('m_testAuto',13,-4),('m_examine',13,-3),('m_customInterface',13,-2),('m_observe',20,-1)]]), #42
('_array',[(0,5),42]), #43
('_struct',[[('m_lockTeams',13,-12),('m_teamsTogether',13,-11),('m_advancedSharedControl',13,-10),('m_randomRaces',13,-9),('m_battleNet',13,-8),('m_amm',13,-7),('m_competitive',13,-6),('m_noVictoryOrDefeat',13,-5),('m_fog',20,-4),('m_observers',20,-3),('m_userDifficulty',20,-2),('m_clientDebugFlags',17,-1)]]), #44
('_int',[(1,4)]), #45
('_int',[(1,8)]), #46
('_bitarray',[(0,6)]), #47
('_bitarray',[(0,8)]), #48
('_bitarray',[(0,2)]), #49
('_bitarray',[(0,7)]), #50
('_struct',[[('m_allowedColors',47,-6),('m_allowedRaces',48,-5),('m_allowedDifficulty',47,-4),('m_allowedControls',48,-3),('m_allowedObserveTypes',49,-2),('m_allowedAIBuilds',50,-1)]]), #51
('_array',[(0,5),51]), #52
('_struct',[[('m_randomValue',6,-26),('m_gameCacheName',25,-25),('m_gameOptions',44,-24),('m_gameSpeed',12,-23),('m_gameType',12,-22),('m_maxUsers',2,-21),('m_maxObservers',2,-20),('m_maxPlayers',2,-19),('m_maxTeams',45,-18),('m_maxColors',3,-17),('m_maxRaces',46,-16),('m_maxControls',10,-15),('m_mapSizeX',10,-14),('m_mapSizeY',10,-13),('m_mapFileSyncChecksum',6,-12),('m_mapFileName',26,-11),('m_mapAuthorName',9,-10),('m_modFileSyncChecksum',6,-9),('m_slotDescriptions',52,-8),('m_defaultDifficulty',3,-7),('m_defaultAIBuild',0,-6),('m_cacheHandles',32,-5),('m_hasExtensionMod',13,-4),('m_isBlizzardMap',13,-3),('m_isPremadeFFA',13,-2),('m_isCoopMode',13,-1)]]), #53
('_optional',[1]), #54
('_optional',[2]), #55
('_struct',[[('m_color',55,-1)]]), #56
('_array',[(0,6),6]), #57
('_array',[(0,9),6]), #58
('_struct',[[('m_control',10,-13),('m_userId',54,-12),('m_teamId',1,-11),('m_colorPref',56,-10),('m_racePref',40,-9),('m_difficulty',3,-8),('m_aiBuild',0,-7),('m_handicap',0,-6),('m_observe',20,-5),('m_workingSetSlotId',21,-4),('m_rewards',57,-3),('m_toonHandle',16,-2),('m_licenses',58,-1)]]), #59
('_array',[(0,5),59]), #60
('_struct',[[('m_phase',12,-10),('m_maxUsers',2,-9),('m_maxObservers',2,-8),('m_slots',60,-7),('m_randomSeed',6,-6),('m_hostUserId',54,-5),('m_isSinglePlayer',13,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',0,-1)]]), #61
('_struct',[[('m_userInitialData',43,-3),('m_gameDescription',53,-2),('m_lobbyState',61,-1)]]), #62
('_struct',[[('m_syncLobbyState',62,-1)]]), #63
('_struct',[[('m_name',16,-1)]]), #64
('_blob',[(0,6)]), #65
('_struct',[[('m_name',65,-1)]]), #66
('_struct',[[('m_name',65,-3),('m_type',6,-2),('m_data',16,-1)]]), #67
('_struct',[[('m_type',6,-3),('m_name',65,-2),('m_data',30,-1)]]), #68
('_array',[(0,5),10]), #69
('_struct',[[('m_signature',69,-2),('m_toonHandle',16,-1)]]), #70
('_struct',[[('m_gameFullyDownloaded',13,-8),('m_developmentCheatsEnabled',13,-7),('m_multiplayerCheatsEnabled',13,-6),('m_syncChecksummingEnabled',13,-5),('m_isMapToMapTransition',13,-4),('m_startingRally',13,-3),('m_debugPauseEnabled',13,-2),('m_baseBuildNum',6,-1)]]), #71
('_struct',[[]]), #72
('_int',[(0,16)]), #73
('_struct',[[('x',73,-2),('y',73,-1)]]), #74
('_struct',[[('m_which',12,-2),('m_target',74,-1)]]), #75
('_struct',[[('m_fileName',26,-5),('m_automatic',13,-4),('m_overwrite',13,-3),('m_name',9,-2),('m_description',25,-1)]]), #76
('_int',[(-2147483648,32)]), #77
('_struct',[[('x',77,-2),('y',77,-1)]]), #78
('_struct',[[('m_point',78,-4),('m_time',77,-3),('m_verb',25,-2),('m_arguments',25,-1)]]), #79
('_struct',[[('m_data',79,-1)]]), #80
('_int',[(0,20)]), #81
('_struct',[[('m_abilLink',73,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',21,-1)]]), #82
('_optional',[82]), #83
('_null',[]), #84
('_struct',[[('x',81,-3),('y',81,-2),('z',77,-1)]]), #85
('_struct',[[('m_targetUnitFlags',10,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',73,-4),('m_snapshotControlPlayerId',54,-3),('m_snapshotUpkeepPlayerId',54,-2),('m_snapshotPoint',85,-1)]]), #86
('_choice',[(0,2),{0:('None',84),1:('TargetPoint',85),2:('TargetUnit',86),3:('Data',6)}]), #87
('_struct',[[('m_cmdFlags',81,-4),('m_abil',83,-3),('m_data',87,-2),('m_otherUnit',39,-1)]]), #88
('_int',[(0,9)]), #89
('_bitarray',[(0,9)]), #90
('_array',[(0,9),89]), #91
('_choice',[(0,2),{0:('None',84),1:('Mask',90),2:('OneIndices',91),3:('ZeroIndices',91)}]), #92
('_struct',[[('m_unitLink',73,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',89,-1)]]), #93
('_array',[(0,9),93]), #94
('_struct',[[('m_subgroupIndex',89,-4),('m_removeMask',92,-3),('m_addSubgroups',94,-2),('m_addUnitTags',58,-1)]]), #95
('_struct',[[('m_controlGroupId',1,-2),('m_delta',95,-1)]]), #96
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',20,-2),('m_mask',92,-1)]]), #97
('_struct',[[('m_count',89,-6),('m_subgroupCount',89,-5),('m_activeSubgroupIndex',89,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #98
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',98,-1)]]), #99
('_array',[(0,3),77]), #100
('_struct',[[('m_recipientId',1,-2),('m_resources',100,-1)]]), #101
('_struct',[[('m_chatMessage',25,-1)]]), #102
('_int',[(-128,8)]), #103
('_struct',[[('x',77,-3),('y',77,-2),('z',77,-1)]]), #104
('_struct',[[('m_beacon',103,-9),('m_ally',103,-8),('m_flags',103,-7),('m_build',103,-6),('m_targetUnitTag',6,-5),('m_targetUnitSnapshotUnitLink',73,-4),('m_targetUnitSnapshotUpkeepPlayerId',103,-3),('m_targetUnitSnapshotControlPlayerId',103,-2),('m_targetPoint',104,-1)]]), #105
('_struct',[[('m_speed',12,-1)]]), #106
('_struct',[[('m_delta',103,-1)]]), #107
('_struct',[[('m_point',78,-3),('m_unit',6,-2),('m_pingedMinimap',13,-1)]]), #108
('_struct',[[('m_verb',25,-2),('m_arguments',25,-1)]]), #109
('_struct',[[('m_alliance',6,-2),('m_control',6,-1)]]), #110
('_struct',[[('m_unitTag',6,-1)]]), #111
('_struct',[[('m_unitTag',6,-2),('m_flags',10,-1)]]), #112
('_struct',[[('m_conversationId',77,-2),('m_replyId',77,-1)]]), #113
('_optional',[16]), #114
('_struct',[[('m_gameUserId',1,-6),('m_observe',20,-5),('m_name',9,-4),('m_toonHandle',114,-3),('m_clanTag',37,-2),('m_clanLogo',38,-1)]]), #115
('_array',[(0,5),115]), #116
('_int',[(0,1)]), #117
('_struct',[[('m_userInfos',116,-2),('m_method',117,-1)]]), #118
('_struct',[[('m_purchaseItemId',77,-1)]]), #119
('_struct',[[('m_difficultyLevel',77,-1)]]), #120
('_choice',[(0,3),{0:('None',84),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',77),4:('TextChanged',26),5:('MouseButton',6)}]), #121
('_struct',[[('m_controlId',77,-3),('m_eventType',77,-2),('m_eventData',121,-1)]]), #122
('_struct',[[('m_soundHash',6,-2),('m_length',6,-1)]]), #123
('_array',[(0,7),6]), #124
('_struct',[[('m_soundHash',124,-2),('m_length',124,-1)]]), #125
('_struct',[[('m_syncInfo',125,-1)]]), #126
('_struct',[[('m_sound',6,-1)]]), #127
('_struct',[[('m_transmissionId',77,-2),('m_thread',6,-1)]]), #128
('_struct',[[('m_transmissionId',77,-1)]]), #129
('_optional',[74]), #130
('_optional',[73]), #131
('_optional',[103]), #132
('_struct',[[('m_target',130,-5),('m_distance',131,-4),('m_pitch',131,-3),('m_yaw',131,-2),('m_reason',132,-1)]]), #133
('_struct',[[('m_skipType',117,-1)]]), #134
('_int',[(0,11)]), #135
('_struct',[[('x',135,-2),('y',135,-1)]]), #136
('_struct',[[('m_button',6,-5),('m_down',13,-4),('m_posUI',136,-3),('m_posWorld',85,-2),('m_flags',103,-1)]]), #137
('_struct',[[('m_posUI',136,-3),('m_posWorld',85,-2),('m_flags',103,-1)]]), #138
('_struct',[[('m_achievementLink',73,-1)]]), #139
('_struct',[[('m_abilLink',73,-3),('m_abilCmdIndex',2,-2),('m_state',103,-1)]]), #140
('_struct',[[('m_soundtrack',6,-1)]]), #141
('_struct',[[('m_planetId',77,-1)]]), #142
('_struct',[[('m_key',103,-2),('m_flags',103,-1)]]), #143
('_struct',[[('m_resources',100,-1)]]), #144
('_struct',[[('m_fulfillRequestId',77,-1)]]), #145
('_struct',[[('m_cancelRequestId',77,-1)]]), #146
('_struct',[[('m_researchItemId',77,-1)]]), #147
('_struct',[[('m_mercenaryId',77,-1)]]), #148
('_struct',[[('m_battleReportId',77,-2),('m_difficultyLevel',77,-1)]]), #149
('_struct',[[('m_battleReportId',77,-1)]]), #150
('_int',[(0,19)]), #151
('_struct',[[('m_decrementMs',151,-1)]]), #152
('_struct',[[('m_portraitId',77,-1)]]), #153
('_struct',[[('m_functionName',16,-1)]]), #154
('_struct',[[('m_result',77,-1)]]), #155
('_struct',[[('m_gameMenuItemIndex',77,-1)]]), #156
('_struct',[[('m_purchaseCategoryId',77,-1)]]), #157
('_struct',[[('m_button',73,-1)]]), #158
('_struct',[[('m_cutsceneId',77,-2),('m_bookmarkName',16,-1)]]), #159
('_struct',[[('m_cutsceneId',77,-1)]]), #160
('_struct',[[('m_cutsceneId',77,-3),('m_conversationLine',16,-2),('m_altConversationLine',16,-1)]]), #161
('_struct',[[('m_cutsceneId',77,-2),('m_conversationLine',16,-1)]]), #162
('_struct',[[('m_observe',20,-5),('m_name',9,-4),('m_toonHandle',114,-3),('m_clanTag',37,-2),('m_clanLogo',38,-1)]]), #163
('_struct',[[('m_recipient',12,-2),('m_string',26,-1)]]), #164
('_struct',[[('m_recipient',12,-2),('m_point',78,-1)]]), #165
('_struct',[[('m_progress',77,-1)]]), #166
('_struct',[[('m_scoreValueMineralsCurrent',77,0),('m_scoreValueVespeneCurrent',77,1),('m_scoreValueMineralsCollectionRate',77,2),('m_scoreValueVespeneCollectionRate',77,3),('m_scoreValueWorkersActiveCount',77,4),('m_scoreValueMineralsUsedInProgressArmy',77,5),('m_scoreValueMineralsUsedInProgressEconomy',77,6),('m_scoreValueMineralsUsedInProgressTechnology',77,7),('m_scoreValueVespeneUsedInProgressArmy',77,8),('m_scoreValueVespeneUsedInProgressEconomy',77,9),('m_scoreValueVespeneUsedInProgressTechnology',77,10),('m_scoreValueMineralsUsedCurrentArmy',77,11),('m_scoreValueMineralsUsedCurrentEconomy',77,12),('m_scoreValueMineralsUsedCurrentTechnology',77,13),('m_scoreValueVespeneUsedCurrentArmy',77,14),('m_scoreValueVespeneUsedCurrentEconomy',77,15),('m_scoreValueVespeneUsedCurrentTechnology',77,16),('m_scoreValueMineralsLostArmy',77,17),('m_scoreValueMineralsLostEconomy',77,18),('m_scoreValueMineralsLostTechnology',77,19),('m_scoreValueVespeneLostArmy',77,20),('m_scoreValueVespeneLostEconomy',77,21),('m_scoreValueVespeneLostTechnology',77,22),('m_scoreValueMineralsKilledArmy',77,23),('m_scoreValueMineralsKilledEconomy',77,24),('m_scoreValueMineralsKilledTechnology',77,25),('m_scoreValueVespeneKilledArmy',77,26),('m_scoreValueVespeneKilledEconomy',77,27),('m_scoreValueVespeneKilledTechnology',77,28),('m_scoreValueFoodUsed',77,29),('m_scoreValueFoodMade',77,30),('m_scoreValueMineralsUsedActiveForces',77,31),('m_scoreValueVespeneUsedActiveForces',77,32),('m_scoreValueMineralsFriendlyFireArmy',77,33),('m_scoreValueMineralsFriendlyFireEconomy',77,34),('m_scoreValueMineralsFriendlyFireTechnology',77,35),('m_scoreValueVespeneFriendlyFireArmy',77,36),('m_scoreValueVespeneFriendlyFireEconomy',77,37),('m_scoreValueVespeneFriendlyFireTechnology',77,38)]]), #167
('_struct',[[('m_playerId',1,0),('m_stats',167,1)]]), #168
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',25,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #169
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',54,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',39,5),('m_killerUnitTagRecycle',39,6)]]), #170
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #171
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',25,2)]]), #172
('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',25,1),('m_count',77,2)]]), #173
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #174
('_array',[(0,10),77]), #175
('_struct',[[('m_firstUnitIndex',6,0),('m_items',175,1)]]), #176
('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',39,2),('m_slotId',39,3)]]), #177
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (72, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (71, 'NNet.Game.SUserOptionsEvent'),
9: (64, 'NNet.Game.SBankFileEvent'),
10: (66, 'NNet.Game.SBankSectionEvent'),
11: (67, 'NNet.Game.SBankKeyEvent'),
12: (68, 'NNet.Game.SBankValueEvent'),
13: (70, 'NNet.Game.SBankSignatureEvent'),
14: (75, 'NNet.Game.SCameraSaveEvent'),
21: (76, 'NNet.Game.SSaveGameEvent'),
22: (72, 'NNet.Game.SSaveGameDoneEvent'),
23: (72, 'NNet.Game.SLoadGameDoneEvent'),
26: (80, 'NNet.Game.SGameCheatEvent'),
27: (88, 'NNet.Game.SCmdEvent'),
28: (96, 'NNet.Game.SSelectionDeltaEvent'),
29: (97, 'NNet.Game.SControlGroupUpdateEvent'),
30: (99, 'NNet.Game.SSelectionSyncCheckEvent'),
31: (101, 'NNet.Game.SResourceTradeEvent'),
32: (102, 'NNet.Game.STriggerChatMessageEvent'),
33: (105, 'NNet.Game.SAICommunicateEvent'),
34: (106, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (107, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (108, 'NNet.Game.STriggerPingEvent'),
37: (109, 'NNet.Game.SBroadcastCheatEvent'),
38: (110, 'NNet.Game.SAllianceEvent'),
39: (111, 'NNet.Game.SUnitClickEvent'),
40: (112, 'NNet.Game.SUnitHighlightEvent'),
41: (113, 'NNet.Game.STriggerReplySelectedEvent'),
43: (118, 'NNet.Game.SHijackReplayGameEvent'),
44: (72, 'NNet.Game.STriggerSkippedEvent'),
45: (123, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (127, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (128, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (129, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (133, 'NNet.Game.SCameraUpdateEvent'),
50: (72, 'NNet.Game.STriggerAbortMissionEvent'),
51: (119, 'NNet.Game.STriggerPurchaseMadeEvent'),
52: (72, 'NNet.Game.STriggerPurchaseExitEvent'),
53: (120, 'NNet.Game.STriggerPlanetMissionLaunchedEvent'),
54: (72, 'NNet.Game.STriggerPlanetPanelCanceledEvent'),
55: (122, 'NNet.Game.STriggerDialogControlEvent'),
56: (126, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (134, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (137, 'NNet.Game.STriggerMouseClickedEvent'),
59: (138, 'NNet.Game.STriggerMouseMovedEvent'),
60: (139, 'NNet.Game.SAchievementAwardedEvent'),
62: (140, 'NNet.Game.STriggerTargetModeUpdateEvent'),
63: (72, 'NNet.Game.STriggerPlanetPanelReplayEvent'),
64: (141, 'NNet.Game.STriggerSoundtrackDoneEvent'),
65: (142, 'NNet.Game.STriggerPlanetMissionSelectedEvent'),
66: (143, 'NNet.Game.STriggerKeyPressedEvent'),
67: (154, 'NNet.Game.STriggerMovieFunctionEvent'),
68: (72, 'NNet.Game.STriggerPlanetPanelBirthCompleteEvent'),
69: (72, 'NNet.Game.STriggerPlanetPanelDeathCompleteEvent'),
70: (144, 'NNet.Game.SResourceRequestEvent'),
71: (145, 'NNet.Game.SResourceRequestFulfillEvent'),
72: (146, 'NNet.Game.SResourceRequestCancelEvent'),
73: (72, 'NNet.Game.STriggerResearchPanelExitEvent'),
74: (72, 'NNet.Game.STriggerResearchPanelPurchaseEvent'),
75: (147, 'NNet.Game.STriggerResearchPanelSelectionChangedEvent'),
77: (72, 'NNet.Game.STriggerMercenaryPanelExitEvent'),
78: (72, 'NNet.Game.STriggerMercenaryPanelPurchaseEvent'),
79: (148, 'NNet.Game.STriggerMercenaryPanelSelectionChangedEvent'),
80: (72, 'NNet.Game.STriggerVictoryPanelExitEvent'),
81: (72, 'NNet.Game.STriggerBattleReportPanelExitEvent'),
82: (149, 'NNet.Game.STriggerBattleReportPanelPlayMissionEvent'),
83: (150, 'NNet.Game.STriggerBattleReportPanelPlaySceneEvent'),
84: (150, 'NNet.Game.STriggerBattleReportPanelSelectionChangedEvent'),
85: (120, 'NNet.Game.STriggerVictoryPanelPlayMissionAgainEvent'),
86: (72, 'NNet.Game.STriggerMovieStartedEvent'),
87: (72, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (152, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (153, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (155, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (156, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
93: (119, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseItemChangedEvent'),
94: (157, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseCategoryChangedEvent'),
95: (158, 'NNet.Game.STriggerButtonPressedEvent'),
96: (72, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (159, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (160, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (161, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (162, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
101: (72, 'NNet.Game.SGameUserLeaveEvent'),
102: (163, 'NNet.Game.SGameUserJoinEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (164, 'NNet.Game.SChatMessage'),
1: (165, 'NNet.Game.SPingMessage'),
2: (166, 'NNet.Game.SLoadingProgressMessage'),
3: (72, 'NNet.Game.SServerPingMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
0: (168, 'NNet.Replay.Tracker.SPlayerStatsEvent'),
1: (169, 'NNet.Replay.Tracker.SUnitBornEvent'),
2: (170, 'NNet.Replay.Tracker.SUnitDiedEvent'),
3: (171, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),
4: (172, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),
5: (173, 'NNet.Replay.Tracker.SUpgradeEvent'),
6: (169, 'NNet.Replay.Tracker.SUnitInitEvent'),
7: (174, 'NNet.Replay.Tracker.SUnitDoneEvent'),
8: (176, 'NNet.Replay.Tracker.SUnitPositionsEvent'),
9: (177, 'NNet.Replay.Tracker.SPlayerSetupEvent'),
}
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = 2
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 7
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 14
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 36
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 63
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for k,v in value.iteritems():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip('\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| mit |
systers/hyperkitty | hyperkitty/tests/views/test_mailman.py | 1 | 2114 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016-2017 by the Free Software Foundation, Inc.
#
# This file is part of HyperKitty.
#
# HyperKitty is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# HyperKitty is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# HyperKitty. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aurelien Bompard <[email protected]>
#
from hyperkitty.utils import reverse
from django.contrib.sites.models import Site
from django_mailman3.models import MailDomain
from hyperkitty.tests.utils import TestCase
from hyperkitty.views.mailman import _get_url
class PrivateListTestCase(TestCase):
def test_get_url_no_msgid(self):
self.assertEqual(
_get_url("[email protected]"),
"https://example.com" +
reverse('hk_list_overview', args=["[email protected]"]))
def test_get_url_default_domain(self):
self.assertEqual(
_get_url("[email protected]", "<message-id>"),
"https://example.com" + reverse('hk_message_index', kwargs={
"mlist_fqdn": "[email protected]",
"message_id_hash": "3F32NJAOW2XVHJWKZ73T2EPICEIAB3LI"
}))
def test_get_url_with_domain(self):
site = Site.objects.create(name="Example", domain="lists.example.org")
MailDomain.objects.create(site=site, mail_domain="example.com")
self.assertEqual(
_get_url("[email protected]", "<message-id>"),
"https://lists.example.org" + reverse('hk_message_index', kwargs={
"mlist_fqdn": "[email protected]",
"message_id_hash": "3F32NJAOW2XVHJWKZ73T2EPICEIAB3LI"
}))
| gpl-3.0 |
indictranstech/erpnext | erpnext/patches/v7_0/make_guardian.py | 22 | 1269 | from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists("DocType", "Student"):
student_table_cols = frappe.db.get_table_columns("Student")
if "father_name" in student_table_cols:
# 'Schools' module changed to the 'Education'
# frappe.reload_doc("schools", "doctype", "student")
# frappe.reload_doc("schools", "doctype", "guardian")
# frappe.reload_doc("schools", "doctype", "guardian_interest")
frappe.reload_doc("education", "doctype", "student")
frappe.reload_doc("education", "doctype", "guardian")
frappe.reload_doc("education", "doctype", "guardian_interest")
frappe.reload_doc("hr", "doctype", "interest")
fields = ["name", "father_name", "mother_name"]
if "father_email_id" in student_table_cols:
fields += ["father_email_id", "mother_email_id"]
students = frappe.get_all("Student", fields)
for stud in students:
if stud.father_name:
make_guardian(stud.father_name, stud.name, stud.father_email_id)
if stud.mother_name:
make_guardian(stud.mother_name, stud.name, stud.mother_email_id)
def make_guardian(name, student, email=None):
frappe.get_doc({
'doctype': 'Guardian',
'guardian_name': name,
'email': email,
'student': student
}).insert()
| agpl-3.0 |
chazy/reviewboard | reviewboard/site/models.py | 1 | 3194 | #
# models.py -- Models for the "reviewboard.site" app.
#
# Copyright (c) 2010 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class LocalSite(models.Model):
"""
A division within a Review Board installation.
This allows the creation of independent, isolated divisions within a given
server. Users can be designated as members of a LocalSite, and optionally
as admins (which allows them to manipulate the repositories, groups and
users in the site).
Pretty much every other model in this module can all be assigned to a single
LocalSite, at which point only members will be able to see or manipulate
these objects. Access control is performed at every level, and consistency
is enforced through a liberal sprinkling of assertions and unit tests.
"""
name = models.SlugField(_('name'), max_length=32, blank=False, unique=True)
users = models.ManyToManyField(User, blank=True,
related_name='local_site')
admins = models.ManyToManyField(User, blank=True,
related_name='local_site_admins')
def is_accessible_by(self, user):
"""Returns whether or not the user has access to this LocalSite.
This checks that the user is logged in, and that they're listed in the
'users' field.
"""
return (user.is_authenticated() and
self.users.filter(pk=user.pk).exists())
def is_mutable_by(self, user, perm='site.change_localsite'):
"""Returns whether or not a user can modify settings in a LocalSite.
This checks that the user is either staff with the proper permissions,
or that they're listed in the 'admins' field.
By default, this is checking whether the LocalSite itself can be
modified, but a different permission can be passed to check for
another object.
"""
return user.has_perm(perm) or self.admins.filter(pk=user.pk).exists()
def __unicode__(self):
return self.name
| mit |
Bysmyyr/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/crcmod/python2/crcmod/_crcfunpy.py | 68 | 3073 | #-----------------------------------------------------------------------------
# Low level CRC functions for use by crcmod. This version is implemented in
# Python for a couple of reasons. 1) Provide a reference implememtation.
# 2) Provide a version that can be used on systems where a C compiler is not
# available for building extension modules.
#
# Copyright (c) 2004 Raymond L. Buvel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#-----------------------------------------------------------------------------
def _crc8(data, crc, table):
crc = crc & 0xFF
for x in data:
crc = table[ord(x) ^ crc]
return crc
def _crc8r(data, crc, table):
crc = crc & 0xFF
for x in data:
crc = table[ord(x) ^ crc]
return crc
def _crc16(data, crc, table):
crc = crc & 0xFFFF
for x in data:
crc = table[ord(x) ^ ((crc>>8) & 0xFF)] ^ ((crc << 8) & 0xFF00)
return crc
def _crc16r(data, crc, table):
crc = crc & 0xFFFF
for x in data:
crc = table[ord(x) ^ (crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc24(data, crc, table):
crc = crc & 0xFFFFFF
for x in data:
crc = table[ord(x) ^ (int(crc>>16) & 0xFF)] ^ ((crc << 8) & 0xFFFF00)
return crc
def _crc24r(data, crc, table):
crc = crc & 0xFFFFFF
for x in data:
crc = table[ord(x) ^ int(crc & 0xFF)] ^ (crc >> 8)
return crc
def _crc32(data, crc, table):
crc = crc & 0xFFFFFFFFL
for x in data:
crc = table[ord(x) ^ (int(crc>>24) & 0xFF)] ^ ((crc << 8) & 0xFFFFFF00L)
return crc
def _crc32r(data, crc, table):
crc = crc & 0xFFFFFFFFL
for x in data:
crc = table[ord(x) ^ int(crc & 0xFFL)] ^ (crc >> 8)
return crc
def _crc64(data, crc, table):
crc = crc & 0xFFFFFFFFFFFFFFFFL
for x in data:
crc = table[ord(x) ^ (int(crc>>56) & 0xFF)] ^ ((crc << 8) & 0xFFFFFFFFFFFFFF00L)
return crc
def _crc64r(data, crc, table):
crc = crc & 0xFFFFFFFFFFFFFFFFL
for x in data:
crc = table[ord(x) ^ int(crc & 0xFFL)] ^ (crc >> 8)
return crc
| bsd-3-clause |
dmoliveira/networkx | networkx/algorithms/bipartite/tests/test_edgelist.py | 31 | 7069 | """
Unit tests for bipartite edgelists.
"""
from nose.tools import assert_equal, assert_raises, assert_not_equal, raises
import io
import tempfile
import os
import networkx as nx
from networkx.testing import (assert_edges_equal, assert_nodes_equal,
assert_graphs_equal)
from networkx.algorithms import bipartite
class TestEdgelist:
def setUp(self):
self.G=nx.Graph(name="test")
e=[('a','b'),('b','c'),('c','d'),('d','e'),('e','f'),('a','f')]
self.G.add_edges_from(e)
self.G.add_nodes_from(['a','c','e'],bipartite=0)
self.G.add_nodes_from(['b','d','f'],bipartite=1)
self.G.add_node('g',bipartite=0)
self.DG=nx.DiGraph(self.G)
self.MG=nx.MultiGraph()
self.MG.add_edges_from([(1,2),(1,2),(1,2)])
self.MG.add_node(1,bipartite=0)
self.MG.add_node(2,bipartite=1)
def test_read_edgelist_1(self):
s = b"""\
# comment line
1 2
# comment line
2 3
"""
bytesIO = io.BytesIO(s)
G = bipartite.read_edgelist(bytesIO,nodetype=int)
assert_edges_equal(G.edges(),[(1,2),(2,3)])
def test_read_edgelist_3(self):
s = b"""\
# comment line
1 2 {'weight':2.0}
# comment line
2 3 {'weight':3.0}
"""
bytesIO = io.BytesIO(s)
G = bipartite.read_edgelist(bytesIO,nodetype=int,data=False)
assert_edges_equal(G.edges(),[(1,2),(2,3)])
bytesIO = io.BytesIO(s)
G = bipartite.read_edgelist(bytesIO,nodetype=int,data=True)
assert_edges_equal(G.edges(data=True),
[(1,2,{'weight':2.0}),(2,3,{'weight':3.0})])
def test_write_edgelist_1(self):
fh=io.BytesIO()
G=nx.Graph()
G.add_edges_from([(1,2),(2,3)])
G.add_node(1,bipartite=0)
G.add_node(2,bipartite=1)
G.add_node(3,bipartite=0)
bipartite.write_edgelist(G,fh,data=False)
fh.seek(0)
assert_equal(fh.read(),b"1 2\n3 2\n")
def test_write_edgelist_2(self):
fh=io.BytesIO()
G=nx.Graph()
G.add_edges_from([(1,2),(2,3)])
G.add_node(1,bipartite=0)
G.add_node(2,bipartite=1)
G.add_node(3,bipartite=0)
bipartite.write_edgelist(G,fh,data=True)
fh.seek(0)
assert_equal(fh.read(),b"1 2 {}\n3 2 {}\n")
def test_write_edgelist_3(self):
fh=io.BytesIO()
G=nx.Graph()
G.add_edge(1,2,weight=2.0)
G.add_edge(2,3,weight=3.0)
G.add_node(1,bipartite=0)
G.add_node(2,bipartite=1)
G.add_node(3,bipartite=0)
bipartite.write_edgelist(G,fh,data=True)
fh.seek(0)
assert_equal(fh.read(),b"1 2 {'weight': 2.0}\n3 2 {'weight': 3.0}\n")
def test_write_edgelist_4(self):
fh=io.BytesIO()
G=nx.Graph()
G.add_edge(1,2,weight=2.0)
G.add_edge(2,3,weight=3.0)
G.add_node(1,bipartite=0)
G.add_node(2,bipartite=1)
G.add_node(3,bipartite=0)
bipartite.write_edgelist(G,fh,data=[('weight')])
fh.seek(0)
assert_equal(fh.read(),b"1 2 2.0\n3 2 3.0\n")
def test_unicode(self):
G = nx.Graph()
try: # Python 3.x
name1 = chr(2344) + chr(123) + chr(6543)
name2 = chr(5543) + chr(1543) + chr(324)
except ValueError: # Python 2.6+
name1 = unichr(2344) + unichr(123) + unichr(6543)
name2 = unichr(5543) + unichr(1543) + unichr(324)
G.add_edge(name1, 'Radiohead', attr_dict={name2: 3})
G.add_node(name1,bipartite=0)
G.add_node('Radiohead',bipartite=1)
fd, fname = tempfile.mkstemp()
bipartite.write_edgelist(G, fname)
H = bipartite.read_edgelist(fname)
assert_graphs_equal(G, H)
os.close(fd)
os.unlink(fname)
def test_latin1_error(self):
G = nx.Graph()
try: # Python 3.x
name1 = chr(2344) + chr(123) + chr(6543)
name2 = chr(5543) + chr(1543) + chr(324)
except ValueError: # Python 2.6+
name1 = unichr(2344) + unichr(123) + unichr(6543)
name2 = unichr(5543) + unichr(1543) + unichr(324)
G.add_edge(name1, 'Radiohead', attr_dict={name2: 3})
G.add_node(name1,bipartite=0)
G.add_node('Radiohead',bipartite=1)
fd, fname = tempfile.mkstemp()
assert_raises(UnicodeEncodeError,
bipartite.write_edgelist,
G, fname, encoding = 'latin-1')
os.close(fd)
os.unlink(fname)
def test_latin1(self):
G = nx.Graph()
try: # Python 3.x
blurb = chr(1245) # just to trigger the exception
name1 = 'Bj' + chr(246) + 'rk'
name2 = chr(220) + 'ber'
except ValueError: # Python 2.6+
name1 = 'Bj' + unichr(246) + 'rk'
name2 = unichr(220) + 'ber'
G.add_edge(name1, 'Radiohead', attr_dict={name2: 3})
G.add_node(name1,bipartite=0)
G.add_node('Radiohead',bipartite=1)
fd, fname = tempfile.mkstemp()
bipartite.write_edgelist(G, fname, encoding = 'latin-1')
H = bipartite.read_edgelist(fname, encoding = 'latin-1')
assert_graphs_equal(G, H)
os.close(fd)
os.unlink(fname)
def test_edgelist_graph(self):
G=self.G
(fd,fname)=tempfile.mkstemp()
bipartite.write_edgelist(G,fname)
H=bipartite.read_edgelist(fname)
H2=bipartite.read_edgelist(fname)
assert_not_equal(H,H2) # they should be different graphs
G.remove_node('g') # isolated nodes are not written in edgelist
assert_nodes_equal(H.nodes(),G.nodes())
assert_edges_equal(H.edges(),G.edges())
os.close(fd)
os.unlink(fname)
def test_edgelist_integers(self):
G=nx.convert_node_labels_to_integers(self.G)
(fd,fname)=tempfile.mkstemp()
bipartite.write_edgelist(G,fname)
H=bipartite.read_edgelist(fname,nodetype=int)
# isolated nodes are not written in edgelist
G.remove_nodes_from(nx.isolates(G))
assert_nodes_equal(H.nodes(),G.nodes())
assert_edges_equal(H.edges(),G.edges())
os.close(fd)
os.unlink(fname)
def test_edgelist_multigraph(self):
G=self.MG
(fd,fname)=tempfile.mkstemp()
bipartite.write_edgelist(G,fname)
H=bipartite.read_edgelist(fname,nodetype=int,create_using=nx.MultiGraph())
H2=bipartite.read_edgelist(fname,nodetype=int,create_using=nx.MultiGraph())
assert_not_equal(H,H2) # they should be different graphs
assert_nodes_equal(H.nodes(),G.nodes())
assert_edges_equal(H.edges(),G.edges())
os.close(fd)
os.unlink(fname)
@raises(nx.NetworkXNotImplemented)
def test_digraph_fail(self):
bytesIO = io.BytesIO()
bipartite.write_edgelist(nx.DiGraph(),bytesIO)
@raises(AttributeError)
def test_attribute_fail(self):
G = nx.path_graph(4)
bytesIO = io.BytesIO()
bipartite.write_edgelist(G,bytesIO)
| bsd-3-clause |
sergio-incaser/odoo | openerp/addons/test_workflow/tests/test_workflow.py | 392 | 6232 | # -*- coding: utf-8 -*-
import openerp
from openerp import SUPERUSER_ID
from openerp.tests import common
class test_workflows(common.TransactionCase):
def check_activities(self, model_name, i, names):
""" Check that the record i has workitems in the given activity names.
"""
instance = self.registry('workflow.instance')
workitem = self.registry('workflow.workitem')
# Given the workflow instance associated to the record ...
instance_id = instance.search(
self.cr, SUPERUSER_ID,
[('res_type', '=', model_name), ('res_id', '=', i)])
self.assertTrue( instance_id, 'A workflow instance is expected.')
# ... get all its workitems ...
workitem_ids = workitem.search(
self.cr, SUPERUSER_ID,
[('inst_id', '=', instance_id[0])])
self.assertTrue(
workitem_ids,
'The workflow instance should have workitems.')
# ... and check the activity the are in against the provided names.
workitem_records = workitem.browse(
self.cr, SUPERUSER_ID, workitem_ids)
self.assertEqual(
sorted([item.act_id.name for item in workitem_records]),
sorted(names))
def check_value(self, model_name, i, value):
""" Check that the record i has the given value.
"""
model = self.registry(model_name)
record = model.read(self.cr, SUPERUSER_ID, [i], ['value'])[0]
self.assertEqual(record['value'], value)
def test_workflow(self):
model = self.registry('test.workflow.model')
trigger = self.registry('test.workflow.trigger')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
# a -> b is just a signal.
model.signal_workflow(self.cr, SUPERUSER_ID, [i], 'a-b')
self.check_activities(model._name, i, ['b'])
# b -> c is a trigger (which is False),
# so we remain in the b activity.
model.trigger(self.cr, SUPERUSER_ID)
self.check_activities(model._name, i, ['b'])
# b -> c is a trigger (which is set to True).
# so we go in c when the trigger is called.
trigger.write(self.cr, SUPERUSER_ID, [1], {'value': True})
model.trigger(self.cr, SUPERUSER_ID)
self.check_activities(model._name, i, ['c'])
self.assertEqual(
True,
True)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_a(self):
model = self.registry('test.workflow.model.a')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 0)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_b(self):
model = self.registry('test.workflow.model.b')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_c(self):
model = self.registry('test.workflow.model.c')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 0)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_d(self):
model = self.registry('test.workflow.model.d')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_e(self):
model = self.registry('test.workflow.model.e')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['b'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_f(self):
model = self.registry('test.workflow.model.f')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.signal_workflow(self.cr, SUPERUSER_ID, [i], 'a-b')
self.check_activities(model._name, i, ['b'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_g(self):
model = self.registry('test.workflow.model.g')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_h(self):
model = self.registry('test.workflow.model.h')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['b', 'c'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_i(self):
model = self.registry('test.workflow.model.i')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['b'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_j(self):
model = self.registry('test.workflow.model.j')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['a'])
self.check_value(model._name, i, 1)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_k(self):
model = self.registry('test.workflow.model.k')
i = model.create(self.cr, SUPERUSER_ID, {})
# Non-determinisitic: can be b or c
# self.check_activities(model._name, i, ['b'])
# self.check_activities(model._name, i, ['c'])
self.check_value(model._name, i, 2)
model.unlink(self.cr, SUPERUSER_ID, [i])
def test_workflow_l(self):
model = self.registry('test.workflow.model.l')
i = model.create(self.cr, SUPERUSER_ID, {})
self.check_activities(model._name, i, ['c', 'c', 'd'])
self.check_value(model._name, i, 3)
model.unlink(self.cr, SUPERUSER_ID, [i])
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.