repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Suite5/DataColibri | articles/migrations/0001_initial.py | 10 | 13440 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table('articles_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
))
db.send_create_signal('articles', ['Tag'])
# Adding model 'ArticleStatus'
db.create_table('articles_articlestatus', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('ordering', self.gf('django.db.models.fields.IntegerField')(default=0)),
('is_live', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('articles', ['ArticleStatus'])
# Adding model 'Article'
db.create_table('articles_article', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('status', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['articles.ArticleStatus'])),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('keywords', self.gf('django.db.models.fields.TextField')(blank=True)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('markup', self.gf('django.db.models.fields.CharField')(default='h', max_length=1)),
('content', self.gf('django.db.models.fields.TextField')()),
('rendered_content', self.gf('django.db.models.fields.TextField')()),
('publish_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('expiration_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('login_required', self.gf('django.db.models.fields.BooleanField')(default=False)),
('use_addthis_button', self.gf('django.db.models.fields.BooleanField')(default=True)),
('addthis_use_author', self.gf('django.db.models.fields.BooleanField')(default=True)),
('addthis_username', self.gf('django.db.models.fields.CharField')(default=None, max_length=50, blank=True)),
))
db.send_create_signal('articles', ['Article'])
# Adding M2M table for field sites on 'Article'
db.create_table('articles_article_sites', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm['articles.article'], null=False)),
('site', models.ForeignKey(orm['sites.site'], null=False))
))
db.create_unique('articles_article_sites', ['article_id', 'site_id'])
# Adding M2M table for field tags on 'Article'
db.create_table('articles_article_tags', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('article', models.ForeignKey(orm['articles.article'], null=False)),
('tag', models.ForeignKey(orm['articles.tag'], null=False))
))
db.create_unique('articles_article_tags', ['article_id', 'tag_id'])
# Adding M2M table for field followup_for on 'Article'
db.create_table('articles_article_followup_for', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_article', models.ForeignKey(orm['articles.article'], null=False)),
('to_article', models.ForeignKey(orm['articles.article'], null=False))
))
db.create_unique('articles_article_followup_for', ['from_article_id', 'to_article_id'])
# Adding M2M table for field related_articles on 'Article'
db.create_table('articles_article_related_articles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('from_article', models.ForeignKey(orm['articles.article'], null=False)),
('to_article', models.ForeignKey(orm['articles.article'], null=False))
))
db.create_unique('articles_article_related_articles', ['from_article_id', 'to_article_id'])
# Adding model 'Attachment'
db.create_table('articles_attachment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(related_name='attachments', to=orm['articles.Article'])),
('attachment', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('caption', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('articles', ['Attachment'])
def backwards(self, orm):
# Deleting model 'Tag'
db.delete_table('articles_tag')
# Deleting model 'ArticleStatus'
db.delete_table('articles_articlestatus')
# Deleting model 'Article'
db.delete_table('articles_article')
# Removing M2M table for field sites on 'Article'
db.delete_table('articles_article_sites')
# Removing M2M table for field tags on 'Article'
db.delete_table('articles_article_tags')
# Removing M2M table for field followup_for on 'Article'
db.delete_table('articles_article_followup_for')
# Removing M2M table for field related_articles on 'Article'
db.delete_table('articles_article_related_articles')
# Deleting model 'Attachment'
db.delete_table('articles_attachment')
models = {
'articles.article': {
'Meta': {'ordering': "('-publish_date', 'title')", 'object_name': 'Article'},
'addthis_use_author': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'addthis_username': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '50', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followup_for': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followups'", 'blank': 'True', 'to': "orm['articles.Article']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'markup': ('django.db.models.fields.CharField', [], {'default': "'h'", 'max_length': '1'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_articles_rel_+'", 'blank': 'True', 'to': "orm['articles.Article']"}),
'rendered_content': ('django.db.models.fields.TextField', [], {}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sites.Site']", 'symmetrical': 'False', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['articles.ArticleStatus']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['articles.Tag']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'use_addthis_button': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'articles.articlestatus': {
'Meta': {'ordering': "('ordering', 'name')", 'object_name': 'ArticleStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'articles.attachment': {
'Meta': {'ordering': "('-article', 'id')", 'object_name': 'Attachment'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attachments'", 'to': "orm['articles.Article']"}),
'attachment': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'articles.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['articles']
| mit | 8,652,920,357,814,581,000 | 64.242718 | 191 | 0.580952 | false |
pombreda/fs-googledrive | test_googledrivefs.py | 1 | 3955 | # -*- coding: utf-8 -*-
from __future__ import (print_function, division,
absolute_import, unicode_literals)
import unittest
from mock import Mock
from pytest import fixture
from oauth2client.client import OAuth2Credentials
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from fs.tests import FSTestCases
from googledrivefs import GoogleDriveFS
client_config = {
'auth_uri': 'https://accounts.google.com/o/oauth2/auth',
'client_id': '105537897616-oqt2bc3ffgi3l2bd07o1s3feq68ga5m7'
'.apps.googleusercontent.com',
'client_secret': 'sC6ZXdmHf_qXR0bQ0XaLvfSp',
'redirect_uri': 'urn:ietf:wg:oauth:2.0:oob',
'revoke_uri': None,
'token_uri': 'https://accounts.google.com/o/oauth2/token'}
credentials = '{"_module": "oauth2client.client", "token_expiry": "2014-06-07T17:04:26Z", "access_token": "ya29.KgBLjqMlBwNydhoAAACKi7Trb4b3VyN4LZX5JHHTz9wdUeAOqupcFn65q9p0kA", "token_uri": "https://accounts.google.com/o/oauth2/token", "invalid": false, "token_response": {"access_token": "ya29.KgBLjqMlBwNydhoAAACKi7Trb4b3VyN4LZX5JHHTz9wdUeAOqupcFn65q9p0kA", "token_type": "Bearer", "expires_in": 3600, "refresh_token": "1/1Ani7Ovt_KmBPaQxbyc4ZGvhTHMNu4gwVdPiBR8_8BQ"}, "client_id": "105537897616-oqt2bc3ffgi3l2bd07o1s3feq68ga5m7.apps.googleusercontent.com", "id_token": null, "client_secret": "sC6ZXdmHf_qXR0bQ0XaLvfSp", "revoke_uri": "https://accounts.google.com/o/oauth2/revoke", "_class": "OAuth2Credentials", "refresh_token": "1/1Ani7Ovt_KmBPaQxbyc4ZGvhTHMNu4gwVdPiBR8_8BQ", "user_agent": null}'
def cleanup_googledrive(fs):
"""Remove all files and folders from Google Drive"""
for entry in fs.listdir(files_only=True):
fs.remove(entry)
for entry in fs.listdir(dirs_only=True):
fs.removedir(entry, force=True)
fs.client.auth.service.files().emptyTrash().execute()
class TestGoogleDriveFS():
@fixture
def fs(self):
gauth = GoogleAuth()
gauth.credentials = OAuth2Credentials.from_json(credentials)
gauth.client_config = client_config
gauth.settings["client_config_backend"] = "settings"
drive = GoogleDrive(gauth)
return GoogleDriveFS(drive)
def test_map_ids_to_paths(self, fs):
# Arrange
file_list = [
{'parents': [{'id': '0B_lkT', 'isRoot': True}],
'id': '1APq7o', 'title': 'file_at_root.txt'},
{'parents': [{'id': '0B_lkT', 'isRoot': True}],
'id': '1xp13X', 'title': 'folder_at_root'},
{'parents': [{'id': '1xp13X', 'isRoot': False}],
'id': '13PuVd', 'title': 'file1_in_folder.txt'},
{'parents': [{'id': '1xp13X', 'isRoot': False}],
'id': '1ovGwK', 'title': 'file2_in_folder.txt'},
{'parents': [{'id': '1xp13X', 'isRoot': False}],
'id': '0Ap6n5', 'title': 'folder_in_folder'},
]
fs.client.ListFile = Mock()
fs.client.ListFile.return_value.GetList.return_value = file_list
# Act
ids = fs._map_ids_to_paths()
# Assert
assert ids['/file_at_root.txt'] == '1APq7o'
assert ids['/folder_at_root'] == '1xp13X'
assert ids['/folder_at_root/file1_in_folder.txt'] == '13PuVd'
assert ids['/folder_at_root/file2_in_folder.txt'] == '1ovGwK'
assert ids['/folder_at_root/folder_in_folder'] == '0Ap6n5'
class TestExternalGoogleDriveFS(unittest.TestCase, FSTestCases):
"""This will test the GoogleDriveFS implementation against the
base tests defined in PyFilesystem"""
def setUp(self):
gauth = GoogleAuth()
gauth.credentials = OAuth2Credentials.from_json(credentials)
gauth.client_config = client_config
gauth.settings["client_config_backend"] = "settings"
drive = GoogleDrive(gauth)
self.fs = GoogleDriveFS(drive)
def tearDown(self):
cleanup_googledrive(self.fs)
self.fs.close()
| gpl-2.0 | -7,381,709,161,170,457,000 | 44.988372 | 801 | 0.647788 | false |
vaygr/ansible | lib/ansible/module_utils/k8s/raw.py | 30 | 8686 | #
# Copyright 2018 Red Hat | Ansible
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
import copy
from ansible.module_utils.k8s.helper import COMMON_ARG_SPEC, AUTH_ARG_SPEC, OPENSHIFT_ARG_SPEC
from ansible.module_utils.k8s.common import KubernetesAnsibleModule, OpenShiftAnsibleModuleMixin, to_snake
try:
from openshift.helper.exceptions import KubernetesException
except ImportError:
# Exception handled in common
pass
class KubernetesRawModule(KubernetesAnsibleModule):
def __init__(self, *args, **kwargs):
mutually_exclusive = [
('resource_definition', 'src'),
]
KubernetesAnsibleModule.__init__(self, *args,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
**kwargs)
self.kind = self.params.pop('kind')
self.api_version = self.params.pop('api_version')
self.resource_definition = self.params.pop('resource_definition')
self.src = self.params.pop('src')
if self.src:
self.resource_definition = self.load_resource_definition(self.src)
if self.resource_definition:
self.api_version = self.resource_definition.get('apiVersion')
self.kind = self.resource_definition.get('kind')
self.api_version = self.api_version.lower()
self.kind = to_snake(self.kind)
if not self.api_version:
self.fail_json(
msg=("Error: no api_version specified. Use the api_version parameter, or provide it as part of a ",
"resource_definition.")
)
if not self.kind:
self.fail_json(
msg="Error: no kind specified. Use the kind parameter, or provide it as part of a resource_definition"
)
self.helper = self.get_helper(self.api_version, self.kind)
@property
def argspec(self):
argspec = copy.deepcopy(COMMON_ARG_SPEC)
argspec.update(copy.deepcopy(AUTH_ARG_SPEC))
return argspec
def execute_module(self):
if self.resource_definition:
resource_params = self.resource_to_parameters(self.resource_definition)
self.params.update(resource_params)
self.authenticate()
state = self.params.pop('state', None)
force = self.params.pop('force', False)
name = self.params.get('name')
namespace = self.params.get('namespace')
existing = None
self.remove_aliases()
return_attributes = dict(changed=False, result=dict())
if self.helper.base_model_name_snake.endswith('list'):
k8s_obj = self._read(name, namespace)
return_attributes['result'] = k8s_obj.to_dict()
self.exit_json(**return_attributes)
try:
existing = self.helper.get_object(name, namespace)
except KubernetesException as exc:
self.fail_json(msg='Failed to retrieve requested object: {0}'.format(exc.message),
error=exc.value.get('status'))
if state == 'absent':
if not existing:
# The object already does not exist
self.exit_json(**return_attributes)
else:
# Delete the object
if not self.check_mode:
try:
self.helper.delete_object(name, namespace)
except KubernetesException as exc:
self.fail_json(msg="Failed to delete object: {0}".format(exc.message),
error=exc.value.get('status'))
return_attributes['changed'] = True
self.exit_json(**return_attributes)
else:
if not existing:
k8s_obj = self._create(namespace)
return_attributes['result'] = k8s_obj.to_dict()
return_attributes['changed'] = True
self.exit_json(**return_attributes)
if existing and force:
k8s_obj = None
request_body = self.helper.request_body_from_params(self.params)
if not self.check_mode:
try:
k8s_obj = self.helper.replace_object(name, namespace, body=request_body)
except KubernetesException as exc:
self.fail_json(msg="Failed to replace object: {0}".format(exc.message),
error=exc.value.get('status'))
return_attributes['result'] = k8s_obj.to_dict()
return_attributes['changed'] = True
self.exit_json(**return_attributes)
# Check if existing object should be patched
k8s_obj = copy.deepcopy(existing)
try:
self.helper.object_from_params(self.params, obj=k8s_obj)
except KubernetesException as exc:
self.fail_json(msg="Failed to patch object: {0}".format(exc.message))
match, diff = self.helper.objects_match(self.helper.fix_serialization(existing), k8s_obj)
if match:
return_attributes['result'] = existing.to_dict()
self.exit_json(**return_attributes)
# Differences exist between the existing obj and requested params
if not self.check_mode:
try:
k8s_obj = self.helper.patch_object(name, namespace, k8s_obj)
except KubernetesException as exc:
self.fail_json(msg="Failed to patch object: {0}".format(exc.message))
return_attributes['result'] = k8s_obj.to_dict()
return_attributes['changed'] = True
self.exit_json(**return_attributes)
def _create(self, namespace):
request_body = None
k8s_obj = None
try:
request_body = self.helper.request_body_from_params(self.params)
except KubernetesException as exc:
self.fail_json(msg="Failed to create object: {0}".format(exc.message))
if not self.check_mode:
try:
k8s_obj = self.helper.create_object(namespace, body=request_body)
except KubernetesException as exc:
self.fail_json(msg="Failed to create object: {0}".format(exc.message),
error=exc.value.get('status'))
return k8s_obj
def _read(self, name, namespace):
k8s_obj = None
try:
k8s_obj = self.helper.get_object(name, namespace)
except KubernetesException as exc:
self.fail_json(msg='Failed to retrieve requested object',
error=exc.value.get('status'))
return k8s_obj
class OpenShiftRawModule(OpenShiftAnsibleModuleMixin, KubernetesRawModule):
@property
def argspec(self):
args = super(OpenShiftRawModule, self).argspec
args.update(copy.deepcopy(OPENSHIFT_ARG_SPEC))
return args
def _create(self, namespace):
if self.kind.lower() == 'project':
return self._create_project()
return KubernetesRawModule._create(self, namespace)
def _create_project(self):
new_obj = None
k8s_obj = None
try:
new_obj = self.helper.object_from_params(self.params)
except KubernetesException as exc:
self.fail_json(msg="Failed to create object: {0}".format(exc.message))
try:
k8s_obj = self.helper.create_project(metadata=new_obj.metadata,
display_name=self.params.get('display_name'),
description=self.params.get('description'))
except KubernetesException as exc:
self.fail_json(msg='Failed to retrieve requested object',
error=exc.value.get('status'))
return k8s_obj
| gpl-3.0 | 6,600,243,783,054,078,000 | 40.165877 | 118 | 0.586116 | false |
ubc/edx-platform | lms/lib/comment_client/user.py | 144 | 6343 | from .utils import merge_dict, perform_request, CommentClientRequestError
import models
import settings
class User(models.Model):
accessible_fields = [
'username', 'follower_ids', 'upvoted_ids', 'downvoted_ids',
'id', 'external_id', 'subscribed_user_ids', 'children', 'course_id',
'group_id', 'subscribed_thread_ids', 'subscribed_commentable_ids',
'subscribed_course_ids', 'threads_count', 'comments_count',
'default_sort_key'
]
updatable_fields = ['username', 'external_id', 'default_sort_key']
initializable_fields = updatable_fields
metric_tag_fields = ['course_id']
base_url = "{prefix}/users".format(prefix=settings.PREFIX)
default_retrieve_params = {'complete': True}
type = 'user'
@classmethod
def from_django_user(cls, user):
return cls(id=str(user.id),
external_id=str(user.id),
username=user.username)
def follow(self, source):
params = {'source_type': source.type, 'source_id': source.id}
response = perform_request(
'post',
_url_for_subscription(self.id),
params,
metric_action='user.follow',
metric_tags=self._metric_tags + ['target.type:{}'.format(source.type)],
)
def unfollow(self, source):
params = {'source_type': source.type, 'source_id': source.id}
response = perform_request(
'delete',
_url_for_subscription(self.id),
params,
metric_action='user.unfollow',
metric_tags=self._metric_tags + ['target.type:{}'.format(source.type)],
)
def vote(self, voteable, value):
if voteable.type == 'thread':
url = _url_for_vote_thread(voteable.id)
elif voteable.type == 'comment':
url = _url_for_vote_comment(voteable.id)
else:
raise CommentClientRequestError("Can only vote / unvote for threads or comments")
params = {'user_id': self.id, 'value': value}
response = perform_request(
'put',
url,
params,
metric_action='user.vote',
metric_tags=self._metric_tags + ['target.type:{}'.format(voteable.type)],
)
voteable._update_from_response(response)
def unvote(self, voteable):
if voteable.type == 'thread':
url = _url_for_vote_thread(voteable.id)
elif voteable.type == 'comment':
url = _url_for_vote_comment(voteable.id)
else:
raise CommentClientRequestError("Can only vote / unvote for threads or comments")
params = {'user_id': self.id}
response = perform_request(
'delete',
url,
params,
metric_action='user.unvote',
metric_tags=self._metric_tags + ['target.type:{}'.format(voteable.type)],
)
voteable._update_from_response(response)
def active_threads(self, query_params={}):
if not self.course_id:
raise CommentClientRequestError("Must provide course_id when retrieving active threads for the user")
url = _url_for_user_active_threads(self.id)
params = {'course_id': self.course_id.to_deprecated_string()}
params = merge_dict(params, query_params)
response = perform_request(
'get',
url,
params,
metric_action='user.active_threads',
metric_tags=self._metric_tags,
paged_results=True,
)
return response.get('collection', []), response.get('page', 1), response.get('num_pages', 1)
def subscribed_threads(self, query_params={}):
if not self.course_id:
raise CommentClientRequestError("Must provide course_id when retrieving subscribed threads for the user")
url = _url_for_user_subscribed_threads(self.id)
params = {'course_id': self.course_id.to_deprecated_string()}
params = merge_dict(params, query_params)
response = perform_request(
'get',
url,
params,
metric_action='user.subscribed_threads',
metric_tags=self._metric_tags,
paged_results=True
)
return response.get('collection', []), response.get('page', 1), response.get('num_pages', 1)
def _retrieve(self, *args, **kwargs):
url = self.url(action='get', params=self.attributes)
retrieve_params = self.default_retrieve_params.copy()
retrieve_params.update(kwargs)
if self.attributes.get('course_id'):
retrieve_params['course_id'] = self.course_id.to_deprecated_string()
if self.attributes.get('group_id'):
retrieve_params['group_id'] = self.group_id
try:
response = perform_request(
'get',
url,
retrieve_params,
metric_action='model.retrieve',
metric_tags=self._metric_tags,
)
except CommentClientRequestError as e:
if e.status_code == 404:
# attempt to gracefully recover from a previous failure
# to sync this user to the comments service.
self.save()
response = perform_request(
'get',
url,
retrieve_params,
metric_action='model.retrieve',
metric_tags=self._metric_tags,
)
else:
raise
self._update_from_response(response)
def _url_for_vote_comment(comment_id):
return "{prefix}/comments/{comment_id}/votes".format(prefix=settings.PREFIX, comment_id=comment_id)
def _url_for_vote_thread(thread_id):
return "{prefix}/threads/{thread_id}/votes".format(prefix=settings.PREFIX, thread_id=thread_id)
def _url_for_subscription(user_id):
return "{prefix}/users/{user_id}/subscriptions".format(prefix=settings.PREFIX, user_id=user_id)
def _url_for_user_active_threads(user_id):
return "{prefix}/users/{user_id}/active_threads".format(prefix=settings.PREFIX, user_id=user_id)
def _url_for_user_subscribed_threads(user_id):
return "{prefix}/users/{user_id}/subscribed_threads".format(prefix=settings.PREFIX, user_id=user_id)
| agpl-3.0 | 6,615,430,159,723,928,000 | 36.755952 | 117 | 0.583478 | false |
morucci/repoxplorer | repoxplorer/auth/__init__.py | 1 | 7152 | # Copyright 2019, Matthieu Huin
# Copyright 2019, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various authentication engines supported by RepoXplorer."""
import base64
import json
import jwt
from urllib.parse import urljoin
import requests
from pecan import conf
from repoxplorer.exceptions import UnauthorizedException
from repoxplorer import index
from repoxplorer.index import users
class BaseAuthEngine(object):
"""The base auth engine class."""
def is_configured(self) -> bool:
"""Activate the users REST endpoint if authentication is configured."""
return False
def authorize(self, request, uid=None) -> str:
"""Make sure the authenticated user is allowed an action."""
raise UnauthorizedException("Not implemented")
def provision_user(self, request) -> None:
"""If needed, the user can be provisioned based on the user info passed
by the Identity Provider."""
return
class CAuthEngine(BaseAuthEngine):
"""Cauth relies on Apache + mod_auth_authtkt to set a Remote-User header.
User provisioning is done out of the band by Cauth itself, calling the
PUT endpoint on the users API."""
def is_configured(self):
return conf.get('users_endpoint', False)
def authorize(self, request, uid=None):
"""Make sure the request is authorized.
Returns the authorized user's uid or raises if unauthorized."""
if not request.remote_user:
request.remote_user = request.headers.get('Remote-User')
if not request.remote_user:
request.remote_user = request.headers.get('X-Remote-User')
if request.remote_user == '(null)':
if request.headers.get('Authorization'):
auth_header = request.headers.get('Authorization').split()[1]
request.remote_user = base64.b64decode(
auth_header).split(':')[0]
if (request.remote_user == "admin" and
request.headers.get('Admin-Token')):
sent_admin_token = request.headers.get('Admin-Token')
# If remote-user is admin and an admin-token is passed
# authorized if the token is correct
if sent_admin_token == conf.get('admin_token'):
return 'admin'
else:
# If uid targeted by the request is the same
# as the requester then authorize
if uid and uid == request.remote_user:
return uid
if uid and uid != request.remote_user:
raise UnauthorizedException("Admin action only")
raise UnauthorizedException("unauthorized")
class OpenIDConnectEngine(BaseAuthEngine):
"""Expects a Bearer token sent through the 'Authorization' header.
The token is verified against a JWK, pulled from the well-known
configuration of the OIDC provider.
The claims will be used to provision users if authorization is
successful."""
config = conf.get('oidc', {})
def is_configured(self):
return self.config.get('issuer_url', False)
def _get_issuer_info(self):
issuer_url = self.config.get('issuer_url')
verify_ssl = self.config.get('verify_ssl', True)
issuer_info = requests.get(
urljoin(issuer_url, '.well-known/openid-configuration'),
verify=verify_ssl)
if issuer_info.status_code > 399:
raise UnauthorizedException(
"Cannot fetch OpenID provider's configuration")
return issuer_info.json()
def _get_signing_key(self, jwks_uri, key_id):
verify_ssl = self.config.get('verify_ssl', True)
certs = requests.get(jwks_uri, verify=verify_ssl)
if certs.status_code > 399:
raise UnauthorizedException("Cannot fetch JWKS")
for k in certs.json()['keys']:
if k['kid'] == key_id:
return (jwt.algorithms.RSAAlgorithm.from_jwk(json.dumps(k)),
k['alg'])
raise UnauthorizedException("Key %s not found" % key_id)
def _get_raw_token(self, request):
if request.headers.get('Authorization', None) is None:
raise UnauthorizedException('Missing "Authorization" header')
auth_header = request.headers.get('Authorization', None)
if not auth_header.lower().startswith('bearer '):
raise UnauthorizedException('Invalid "Authorization" header')
token = auth_header[len('bearer '):]
return token
def authorize(self, request, uid=None):
token = self._get_raw_token(request)
issuer_info = self._get_issuer_info()
unverified_headers = jwt.get_unverified_header(token)
key_id = unverified_headers.get('kid', None)
if key_id is None:
raise UnauthorizedException("Missing key id in token")
jwks_uri = issuer_info.get('jwks_uri')
if jwks_uri is None:
raise UnauthorizedException("Missing JWKS URI in config")
key, algo = self._get_signing_key(jwks_uri, key_id)
try:
claims = jwt.decode(token, key, algorithms=algo,
issuer=issuer_info['issuer'],
audience=self.config['audience'])
except Exception as e:
raise UnauthorizedException('Invalid access token: %s' % e)
if claims['preferred_username'] == self.config.get('admin_username',
'admin'):
return 'admin'
if uid and uid == claims['preferred_username']:
return uid
if uid and uid != claims['preferred_username']:
raise UnauthorizedException("Only the admin ")
raise UnauthorizedException('unauthorized')
def provision_user(self, request):
raw_token = self._get_raw_token(request)
# verified before so it's totally okay
claims = jwt.decode(raw_token, verify=False)
# TODO assuming the presence of claims, but a specific scope might be
# needed.
# These are expected to be standard though, see
# https://openid.net/specs/openid-connect-core-1_0.html#StandardClaims
email = claims['email']
uid = claims['preferred_username']
name = claims['name']
_users = users.Users(index.Connector(index_suffix='users'))
u = _users.get(uid)
infos = {'uid': uid,
'name': name,
'default-email': email,
'emails': [{'email': email}]}
if u:
_users.update(infos)
else:
_users.create(infos)
| apache-2.0 | -2,327,107,811,699,455,000 | 39.868571 | 79 | 0.623043 | false |
ajose01/rethinkdb | test/interface/metadata_persistence.py | 29 | 2500 | #!/usr/bin/env python
# Copyright 2010-2014 RethinkDB, all rights reserved.
from __future__ import print_function
import sys, os, time
startTime = time.time()
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import driver, scenario_common, utils, vcoptparse
op = vcoptparse.OptParser()
scenario_common.prepare_option_parser_mode_flags(op)
_, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv))
r = utils.import_python_driver()
dbName, tableName = utils.get_test_db_table()
beforeMetaData = None
afterMetaData = None
files = None
# == start first instance of server
print("Starting server (%.2fs)" % (time.time() - startTime))
with driver.Process(console_output=True, output_folder='.', command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) as server:
files = server.files
print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime))
conn = r.connect(host=server.host, port=server.driver_port)
print("Creating db/table %s/%s (%.2fs)" % (dbName, tableName, time.time() - startTime))
if dbName not in r.db_list().run(conn):
r.db_create(dbName).run(conn)
if tableName in r.db(dbName).table_list().run(conn):
r.db(dbName).table_drop(tableName).run(conn)
r.db(dbName).table_create(tableName).run(conn)
print("Collecting metadata for first run (%.2fs)" % (time.time() - startTime))
beforeMetaData = r.db('rethinkdb').table('server_config').get(server.uuid).run(conn)
print("Shutting down server (%.2fs)" % (time.time() - startTime))
print("Restarting server with same files (%.2fs)" % (time.time() - startTime))
with driver.Process(files=files, console_output=True, command_prefix=command_prefix, extra_options=serve_options, wait_until_ready=True) as server:
print("Establishing second ReQL connection (%.2fs)" % (time.time() - startTime))
conn = r.connect(host=server.host, port=server.driver_port)
print("Collecting metadata for second run (%.2fs)" % (time.time() - startTime))
afterMetaData = r.db('rethinkdb').table('server_config').get(server.uuid).run(conn)
assert afterMetaData == beforeMetaData, "The server metadata did not match between runs:\n%s\nvs.\n%s" % (str(beforeMetaData), str(afterMetaData))
print("Cleaning up (%.2fs)" % (time.time() - startTime))
print("Done. (%.2fs)" % (time.time() - startTime))
| agpl-3.0 | 9,140,369,993,927,767,000 | 38.0625 | 153 | 0.6836 | false |
sebrandon1/neutron | neutron/tests/unit/agent/linux/test_ip_link_support.py | 38 | 7364 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.agent.linux import ip_link_support as ip_link
from neutron.tests import base
class TestIpLinkSupport(base.BaseTestCase):
IP_LINK_HELP = """Usage: ip link add [link DEV] [ name ] NAME
[ txqueuelen PACKETS ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ] [index IDX ]
[ numtxqueues QUEUE_COUNT ]
[ numrxqueues QUEUE_COUNT ]
type TYPE [ ARGS ]
ip link delete DEV type TYPE [ ARGS ]
ip link set { dev DEVICE | group DEVGROUP } [ { up | down } ]
[ arp { on | off } ]
[ dynamic { on | off } ]
[ multicast { on | off } ]
[ allmulticast { on | off } ]
[ promisc { on | off } ]
[ trailers { on | off } ]
[ txqueuelen PACKETS ]
[ name NEWNAME ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ]
[ netns PID ]
[ netns NAME ]
[ alias NAME ]
[ vf NUM [ mac LLADDR ]
[ vlan VLANID [ qos VLAN-QOS ] ]
[ rate TXRATE ] ]
[ spoofchk { on | off} ] ]
[ state { auto | enable | disable} ] ]
[ master DEVICE ]
[ nomaster ]
ip link show [ DEVICE | group GROUP ] [up]
TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | macvtap |
can | bridge | bond | ipoib | ip6tnl | ipip | sit |
vxlan | gre | gretap | ip6gre | ip6gretap | vti }
"""
IP_LINK_HELP_NO_STATE = """Usage: ip link add link DEV [ name ] NAME
[ txqueuelen PACKETS ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ]
type TYPE [ ARGS ]
ip link delete DEV type TYPE [ ARGS ]
ip link set DEVICE [ { up | down } ]
[ arp { on | off } ]
[ dynamic { on | off } ]
[ multicast { on | off } ]
[ allmulticast { on | off } ]
[ promisc { on | off } ]
[ trailers { on | off } ]
[ txqueuelen PACKETS ]
[ name NEWNAME ]
[ address LLADDR ]
[ broadcast LLADDR ]
[ mtu MTU ]
[ netns PID ]
[ alias NAME ]
[ vf NUM [ mac LLADDR ]
[ vlan VLANID [ qos VLAN-QOS ] ]
[ rate TXRATE ] ]
ip link show [ DEVICE ]
TYPE := { vlan | veth | vcan | dummy | ifb | macvlan | can }
"""
IP_LINK_HELP_NO_SPOOFCHK = IP_LINK_HELP_NO_STATE
IP_LINK_HELP_NO_VF = """Usage: ip link set DEVICE { up | down |
arp { on | off } |
dynamic { on | off } |
multicast { on | off } |
allmulticast { on | off } |
promisc { on | off } |
trailers { on | off } |
txqueuelen PACKETS |
name NEWNAME |
address LLADDR | broadcast LLADDR |
mtu MTU }
ip link show [ DEVICE ]
"""
def _test_capability(self, capability, subcapability=None,
expected=True, stdout="", stderr=""):
with mock.patch("neutron.agent.linux.utils.execute") as mock_exec:
mock_exec.return_value = (stdout, stderr)
vf_section = ip_link.IpLinkSupport.get_vf_mgmt_section()
capable = ip_link.IpLinkSupport.vf_mgmt_capability_supported(
vf_section, capability, subcapability)
self.assertEqual(expected, capable)
mock_exec.assert_called_once_with(['ip', 'link', 'help'],
check_exit_code=False,
return_stderr=True,
log_fail_as_error=False)
def test_vf_mgmt(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
stderr=self.IP_LINK_HELP)
def test_execute_with_stdout(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
stdout=self.IP_LINK_HELP)
def test_vf_mgmt_no_state(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
expected=False,
stderr=self.IP_LINK_HELP_NO_STATE)
def test_vf_mgmt_no_spoofchk(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_SPOOFCHK,
expected=False,
stderr=self.IP_LINK_HELP_NO_SPOOFCHK)
def test_vf_mgmt_no_vf(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
expected=False,
stderr=self.IP_LINK_HELP_NO_VF)
def test_vf_mgmt_unknown_capability(self):
self._test_capability(
"state1",
expected=False,
stderr=self.IP_LINK_HELP)
def test_vf_mgmt_sub_capability(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN,
ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS,
stderr=self.IP_LINK_HELP)
def test_vf_mgmt_sub_capability_mismatch(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_STATE,
ip_link.IpLinkConstants.IP_LINK_SUB_CAPABILITY_QOS,
expected=False,
stderr=self.IP_LINK_HELP)
def test_vf_mgmt_sub_capability_invalid(self):
self._test_capability(
ip_link.IpLinkConstants.IP_LINK_CAPABILITY_VLAN,
"qos1",
expected=False,
stderr=self.IP_LINK_HELP)
def test_vf_mgmt_error(self):
with mock.patch("neutron.agent.linux.utils.execute") as mock_exec:
mock_exec.side_effect = Exception()
self.assertRaises(
ip_link.UnsupportedIpLinkCommand,
ip_link.IpLinkSupport.get_vf_mgmt_section)
| apache-2.0 | -5,817,444,157,821,414,000 | 39.461538 | 74 | 0.479902 | false |
ATIX-AG/ansible | lib/ansible/modules/monitoring/datadog_monitor.py | 25 | 13345 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Sebastian Kornehl <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: datadog_monitor
short_description: Manages Datadog monitors
description:
- "Manages monitors within Datadog"
- "Options like described on http://docs.datadoghq.com/api/"
version_added: "2.0"
author: "Sebastian Kornehl (@skornehl)"
requirements: [datadog]
options:
api_key:
description: ["Your DataDog API key."]
required: true
app_key:
description: ["Your DataDog app key."]
required: true
state:
description: ["The designated state of the monitor."]
required: true
choices: ['present', 'absent', 'mute', 'unmute']
tags:
description: ["A list of tags to associate with your monitor when creating or updating. This can help you categorize and filter monitors."]
version_added: "2.2"
type:
description:
- "The type of the monitor."
- The 'event alert'is available starting at Ansible 2.1
choices: ['metric alert', 'service check', 'event alert']
query:
description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
name:
description: ["The name of the alert."]
required: true
message:
description:
- A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same
'@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
silenced:
description: ["Dictionary of scopes to timestamps or None. Each scope will be muted until the given POSIX timestamp or forever if the value is None. "]
default: ""
notify_no_data:
description: ["A boolean indicating whether this monitor will notify when data stops reporting.."]
type: bool
default: 'no'
no_data_timeframe:
description:
- The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric
alerts or 2 minutes for service checks.
required: false
default: 2x timeframe for metric, 2 minutes for service
timeout_h:
description: ["The number of hours of the monitor not reporting data before it will automatically resolve from a triggered state."]
renotify_interval:
description:
- The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's
not resolved.
escalation_message:
description:
- A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval
is None
notify_audit:
description: ["A boolean indicating whether tagged users will be notified on changes to this monitor."]
type: bool
default: 'no'
thresholds:
description:
- A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have
multiple thresholds, we don't define them directly in the query."]
default: {'ok': 1, 'critical': 1, 'warning': 1}
locked:
description: ["A boolean indicating whether changes to this monitor should be restricted to the creator or admins."]
type: bool
default: 'no'
version_added: "2.2"
require_full_window:
description:
- A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set this to False for
sparse metrics, otherwise some evaluations will be skipped.
version_added: "2.3"
new_host_delay:
description: ["A positive integer representing the number of seconds to wait before evaluating the monitor for new hosts.
This gives the host time to fully initialize."]
version_added: "2.4"
id:
description: ["The id of the alert. If set, will be used instead of the name to locate the alert."]
version_added: "2.3"
'''
EXAMPLES = '''
# Create a metric monitor
- datadog_monitor:
type: "metric alert"
name: "Test monitor"
state: "present"
query: "datadog.agent.up.over('host:host1').last(2).count_by_status()"
message: "Host [[host.name]] with IP [[host.ip]] is failing to report to datadog."
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Deletes a monitor
- datadog_monitor:
name: "Test monitor"
state: "absent"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Mutes a monitor
- datadog_monitor:
name: "Test monitor"
state: "mute"
silenced: '{"*":None}'
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
# Unmutes a monitor
- datadog_monitor:
name: "Test monitor"
state: "unmute"
api_key: "9775a026f1ca7d1c6c5af9d94d9595a4"
app_key: "87ce4a24b5553d2e482ea8a8500e71b8ad4554ff"
'''
import traceback
# Import Datadog
try:
from datadog import initialize, api
HAS_DATADOG = True
except:
HAS_DATADOG = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec=dict(
api_key=dict(required=True, no_log=True),
app_key=dict(required=True, no_log=True),
state=dict(required=True, choises=['present', 'absent', 'mute', 'unmute']),
type=dict(required=False, choises=['metric alert', 'service check', 'event alert']),
name=dict(required=True),
query=dict(required=False),
message=dict(required=False, default=None),
silenced=dict(required=False, default=None, type='dict'),
notify_no_data=dict(required=False, default=False, type='bool'),
no_data_timeframe=dict(required=False, default=None),
timeout_h=dict(required=False, default=None),
renotify_interval=dict(required=False, default=None),
escalation_message=dict(required=False, default=None),
notify_audit=dict(required=False, default=False, type='bool'),
thresholds=dict(required=False, type='dict', default=None),
tags=dict(required=False, type='list', default=None),
locked=dict(required=False, default=False, type='bool'),
require_full_window=dict(required=False, default=None, type='bool'),
new_host_delay=dict(required=False, default=None),
id=dict(required=False)
)
)
# Prepare Datadog
if not HAS_DATADOG:
module.fail_json(msg='datadogpy required for this module')
options = {
'api_key': module.params['api_key'],
'app_key': module.params['app_key']
}
initialize(**options)
# Check if api_key and app_key is correct or not
# if not, then fail here.
response = api.Monitor.get_all()
if isinstance(response, dict):
msg = response.get('errors', None)
if msg:
module.fail_json(msg="Failed to connect Datadog server using given app_key and api_key : {0}".format(msg[0]))
if module.params['state'] == 'present':
install_monitor(module)
elif module.params['state'] == 'absent':
delete_monitor(module)
elif module.params['state'] == 'mute':
mute_monitor(module)
elif module.params['state'] == 'unmute':
unmute_monitor(module)
def _fix_template_vars(message):
if message:
return message.replace('[[', '{{').replace(']]', '}}')
return message
def _get_monitor(module):
if module.params['id'] is not None:
monitor = api.Monitor.get(module.params['id'])
if 'errors' in monitor:
module.fail_json(msg="Failed to retrieve monitor with id %s, errors are %s" % (module.params['id'], str(monitor['errors'])))
return monitor
else:
monitors = api.Monitor.get_all()
for monitor in monitors:
if monitor['name'] == module.params['name']:
return monitor
return {}
def _post_monitor(module, options):
try:
kwargs = dict(type=module.params['type'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.create(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
else:
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def _equal_dicts(a, b, ignore_keys):
ka = set(a).difference(ignore_keys)
kb = set(b).difference(ignore_keys)
return ka == kb and all(a[k] == b[k] for k in ka)
def _update_monitor(module, monitor, options):
try:
kwargs = dict(id=monitor['id'], query=module.params['query'],
name=module.params['name'], message=_fix_template_vars(module.params['message']),
options=options)
if module.params['tags'] is not None:
kwargs['tags'] = module.params['tags']
msg = api.Monitor.update(**kwargs)
if 'errors' in msg:
module.fail_json(msg=str(msg['errors']))
elif _equal_dicts(msg, monitor, ['creator', 'overall_state', 'modified', 'matching_downtimes', 'overall_state_modified']):
module.exit_json(changed=False, msg=msg)
else:
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def install_monitor(module):
options = {
"silenced": module.params['silenced'],
"notify_no_data": module.boolean(module.params['notify_no_data']),
"no_data_timeframe": module.params['no_data_timeframe'],
"timeout_h": module.params['timeout_h'],
"renotify_interval": module.params['renotify_interval'],
"escalation_message": module.params['escalation_message'],
"notify_audit": module.boolean(module.params['notify_audit']),
"locked": module.boolean(module.params['locked']),
"require_full_window": module.params['require_full_window'],
"new_host_delay": module.params['new_host_delay']
}
if module.params['type'] == "service check":
options["thresholds"] = module.params['thresholds'] or {'ok': 1, 'critical': 1, 'warning': 1}
if module.params['type'] == "metric alert" and module.params['thresholds'] is not None:
options["thresholds"] = module.params['thresholds']
monitor = _get_monitor(module)
if not monitor:
_post_monitor(module, options)
else:
_update_monitor(module, monitor, options)
def delete_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.exit_json(changed=False)
try:
msg = api.Monitor.delete(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def mute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif monitor['options']['silenced']:
module.fail_json(msg="Monitor is already muted. Datadog does not allow to modify muted alerts, consider unmuting it first.")
elif (module.params['silenced'] is not None and len(set(monitor['options']['silenced']) ^ set(module.params['silenced'])) == 0):
module.exit_json(changed=False)
try:
if module.params['silenced'] is None or module.params['silenced'] == "":
msg = api.Monitor.mute(id=monitor['id'])
else:
msg = api.Monitor.mute(id=monitor['id'], silenced=module.params['silenced'])
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
def unmute_monitor(module):
monitor = _get_monitor(module)
if not monitor:
module.fail_json(msg="Monitor %s not found!" % module.params['name'])
elif not monitor['options']['silenced']:
module.exit_json(changed=False)
try:
msg = api.Monitor.unmute(monitor['id'])
module.exit_json(changed=True, msg=msg)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 | -2,195,777,915,135,692,300 | 38.717262 | 159 | 0.638291 | false |
Arafatk/sympy | sympy/polys/densebasic.py | 92 | 36015 | """Basic tools for dense recursive polynomials in ``K[x]`` or ``K[X]``. """
from __future__ import print_function, division
from sympy.core import igcd
from sympy import oo
from sympy.polys.monomials import monomial_min, monomial_div
from sympy.polys.orderings import monomial_key
from sympy.core.compatibility import range
import random
def poly_LC(f, K):
"""
Return leading coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_LC
>>> poly_LC([], ZZ)
0
>>> poly_LC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
1
"""
if not f:
return K.zero
else:
return f[0]
def poly_TC(f, K):
"""
Return trailing coefficient of ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import poly_TC
>>> poly_TC([], ZZ)
0
>>> poly_TC([ZZ(1), ZZ(2), ZZ(3)], ZZ)
3
"""
if not f:
return K.zero
else:
return f[-1]
dup_LC = dmp_LC = poly_LC
dup_TC = dmp_TC = poly_TC
def dmp_ground_LC(f, u, K):
"""
Return the ground leading coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_LC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_LC(f, 2, ZZ)
1
"""
while u:
f = dmp_LC(f, K)
u -= 1
return dup_LC(f, K)
def dmp_ground_TC(f, u, K):
"""
Return the ground trailing coefficient.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_TC
>>> f = ZZ.map([[[1], [2, 3]]])
>>> dmp_ground_TC(f, 2, ZZ)
3
"""
while u:
f = dmp_TC(f, K)
u -= 1
return dup_TC(f, K)
def dmp_true_LT(f, u, K):
"""
Return the leading term ``c * x_1**n_1 ... x_k**n_k``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_true_LT
>>> f = ZZ.map([[4], [2, 0], [3, 0, 0]])
>>> dmp_true_LT(f, 1, ZZ)
((2, 0), 4)
"""
monom = []
while u:
monom.append(len(f) - 1)
f, u = f[0], u - 1
if not f:
monom.append(0)
else:
monom.append(len(f) - 1)
return tuple(monom), dup_LC(f, K)
def dup_degree(f):
"""
Return the leading degree of ``f`` in ``K[x]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_degree
>>> f = ZZ.map([1, 2, 0, 3])
>>> dup_degree(f)
3
"""
if not f:
return -oo
return len(f) - 1
def dmp_degree(f, u):
"""
Return the leading degree of ``f`` in ``x_0`` in ``K[X]``.
Note that the degree of 0 is negative infinity (the SymPy object -oo).
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree
>>> dmp_degree([[[]]], 2)
-oo
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree(f, 1)
1
"""
if dmp_zero_p(f, u):
return -oo
else:
return len(f) - 1
def _rec_degree_in(g, v, i, j):
"""Recursive helper function for :func:`dmp_degree_in`."""
if i == j:
return dmp_degree(g, v)
v, i = v - 1, i + 1
return max([ _rec_degree_in(c, v, i, j) for c in g ])
def dmp_degree_in(f, j, u):
"""
Return the leading degree of ``f`` in ``x_j`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_in
>>> f = ZZ.map([[2], [1, 2, 3]])
>>> dmp_degree_in(f, 0, 1)
1
>>> dmp_degree_in(f, 1, 1)
2
"""
if not j:
return dmp_degree(f, u)
if j < 0 or j > u:
raise IndexError("0 <= j <= %s expected, got %s" % (u, j))
return _rec_degree_in(f, u, 0, j)
def _rec_degree_list(g, v, i, degs):
"""Recursive helper for :func:`dmp_degree_list`."""
degs[i] = max(degs[i], dmp_degree(g, v))
if v > 0:
v, i = v - 1, i + 1
for c in g:
_rec_degree_list(c, v, i, degs)
def dmp_degree_list(f, u):
"""
Return a list of degrees of ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_degree_list
>>> f = ZZ.map([[1], [1, 2, 3]])
>>> dmp_degree_list(f, 1)
(1, 2)
"""
degs = [-oo]*(u + 1)
_rec_degree_list(f, u, 0, degs)
return tuple(degs)
def dup_strip(f):
"""
Remove leading zeros from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.densebasic import dup_strip
>>> dup_strip([0, 0, 1, 2, 3, 0])
[1, 2, 3, 0]
"""
if not f or f[0]:
return f
i = 0
for cf in f:
if cf:
break
else:
i += 1
return f[i:]
def dmp_strip(f, u):
"""
Remove leading zeros from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_strip
>>> dmp_strip([[], [0, 1, 2], [1]], 1)
[[0, 1, 2], [1]]
"""
if not u:
return dup_strip(f)
if dmp_zero_p(f, u):
return f
i, v = 0, u - 1
for c in f:
if not dmp_zero_p(c, v):
break
else:
i += 1
if i == len(f):
return dmp_zero(u)
else:
return f[i:]
def _rec_validate(f, g, i, K):
"""Recursive helper for :func:`dmp_validate`."""
if type(g) is not list:
if K is not None and not K.of_type(g):
raise TypeError("%s in %s in not of type %s" % (g, f, K.dtype))
return set([i - 1])
elif not g:
return set([i])
else:
j, levels = i + 1, set([])
for c in g:
levels |= _rec_validate(f, c, i + 1, K)
return levels
def _rec_strip(g, v):
"""Recursive helper for :func:`_rec_strip`."""
if not v:
return dup_strip(g)
w = v - 1
return dmp_strip([ _rec_strip(c, w) for c in g ], v)
def dmp_validate(f, K=None):
"""
Return the number of levels in ``f`` and recursively strip it.
Examples
========
>>> from sympy.polys.densebasic import dmp_validate
>>> dmp_validate([[], [0, 1, 2], [1]])
([[1, 2], [1]], 1)
>>> dmp_validate([[1], 1])
Traceback (most recent call last):
...
ValueError: invalid data structure for a multivariate polynomial
"""
levels = _rec_validate(f, f, 0, K)
u = levels.pop()
if not levels:
return _rec_strip(f, u), u
else:
raise ValueError(
"invalid data structure for a multivariate polynomial")
def dup_reverse(f):
"""
Compute ``x**n * f(1/x)``, i.e.: reverse ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_reverse
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_reverse(f)
[3, 2, 1]
"""
return dup_strip(list(reversed(f)))
def dup_copy(f):
"""
Create a new copy of a polynomial ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return list(f)
def dmp_copy(f, u):
"""
Create a new copy of a polynomial ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_copy
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_copy(f, 1)
[[1], [1, 2]]
"""
if not u:
return list(f)
v = u - 1
return [ dmp_copy(c, v) for c in f ]
def dup_to_tuple(f):
"""
Convert `f` into a tuple.
This is needed for hashing. This is similar to dup_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return tuple(f)
def dmp_to_tuple(f, u):
"""
Convert `f` into a nested tuple of tuples.
This is needed for hashing. This is similar to dmp_copy().
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_to_tuple
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_to_tuple(f, 1)
((1,), (1, 2))
"""
if not u:
return tuple(f)
v = u - 1
return tuple(dmp_to_tuple(c, v) for c in f)
def dup_normal(f, K):
"""
Normalize univariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_normal
>>> dup_normal([0, 1.5, 2, 3], ZZ)
[1, 2, 3]
"""
return dup_strip([ K.normal(c) for c in f ])
def dmp_normal(f, u, K):
"""
Normalize a multivariate polynomial in the given domain.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_normal
>>> dmp_normal([[], [0, 1.5, 2]], 1, ZZ)
[[1, 2]]
"""
if not u:
return dup_normal(f, K)
v = u - 1
return dmp_strip([ dmp_normal(c, v, K) for c in f ], u)
def dup_convert(f, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_convert
>>> R, x = ring("x", ZZ)
>>> dup_convert([R(1), R(2)], R.to_domain(), ZZ)
[1, 2]
>>> dup_convert([ZZ(1), ZZ(2)], ZZ, R.to_domain())
[1, 2]
"""
if K0 is not None and K0 == K1:
return f
else:
return dup_strip([ K1.convert(c, K0) for c in f ])
def dmp_convert(f, u, K0, K1):
"""
Convert the ground domain of ``f`` from ``K0`` to ``K1``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_convert
>>> R, x = ring("x", ZZ)
>>> dmp_convert([[R(1)], [R(2)]], 1, R.to_domain(), ZZ)
[[1], [2]]
>>> dmp_convert([[ZZ(1)], [ZZ(2)]], 1, ZZ, R.to_domain())
[[1], [2]]
"""
if not u:
return dup_convert(f, K0, K1)
if K0 is not None and K0 == K1:
return f
v = u - 1
return dmp_strip([ dmp_convert(c, v, K0, K1) for c in f ], u)
def dup_from_sympy(f, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_sympy
>>> dup_from_sympy([S(1), S(2)], ZZ) == [ZZ(1), ZZ(2)]
True
"""
return dup_strip([ K.from_sympy(c) for c in f ])
def dmp_from_sympy(f, u, K):
"""
Convert the ground domain of ``f`` from SymPy to ``K``.
Examples
========
>>> from sympy import S
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_sympy
>>> dmp_from_sympy([[S(1)], [S(2)]], 1, ZZ) == [[ZZ(1)], [ZZ(2)]]
True
"""
if not u:
return dup_from_sympy(f, K)
v = u - 1
return dmp_strip([ dmp_from_sympy(c, v, K) for c in f ], u)
def dup_nth(f, n, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_nth
>>> f = ZZ.map([1, 2, 3])
>>> dup_nth(f, 0, ZZ)
3
>>> dup_nth(f, 4, ZZ)
0
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
return f[dup_degree(f) - n]
def dmp_nth(f, n, u, K):
"""
Return the ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nth
>>> f = ZZ.map([[1], [2], [3]])
>>> dmp_nth(f, 0, 1, ZZ)
[3]
>>> dmp_nth(f, 4, 1, ZZ)
[]
"""
if n < 0:
raise IndexError("'n' must be non-negative, got %i" % n)
elif n >= len(f):
return dmp_zero(u - 1)
else:
return f[dmp_degree(f, u) - n]
def dmp_ground_nth(f, N, u, K):
"""
Return the ground ``n``-th coefficient of ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_ground_nth
>>> f = ZZ.map([[1], [2, 3]])
>>> dmp_ground_nth(f, (0, 1), 1, ZZ)
2
"""
v = u
for n in N:
if n < 0:
raise IndexError("`n` must be non-negative, got %i" % n)
elif n >= len(f):
return K.zero
else:
d = dmp_degree(f, v)
if d == -oo:
d = -1
f, v = f[d - n], v - 1
return f
def dmp_zero_p(f, u):
"""
Return ``True`` if ``f`` is zero in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero_p
>>> dmp_zero_p([[[[[]]]]], 4)
True
>>> dmp_zero_p([[[[[1]]]]], 4)
False
"""
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
return not f
def dmp_zero(u):
"""
Return a multivariate zero.
Examples
========
>>> from sympy.polys.densebasic import dmp_zero
>>> dmp_zero(4)
[[[[[]]]]]
"""
r = []
for i in range(u):
r = [r]
return r
def dmp_one_p(f, u, K):
"""
Return ``True`` if ``f`` is one in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one_p
>>> dmp_one_p([[[ZZ(1)]]], 2, ZZ)
True
"""
return dmp_ground_p(f, K.one, u)
def dmp_one(u, K):
"""
Return a multivariate one over ``K``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_one
>>> dmp_one(2, ZZ)
[[[1]]]
"""
return dmp_ground(K.one, u)
def dmp_ground_p(f, c, u):
"""
Return True if ``f`` is constant in ``K[X]``.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground_p
>>> dmp_ground_p([[[3]]], 3, 2)
True
>>> dmp_ground_p([[[4]]], None, 2)
True
"""
if c is not None and not c:
return dmp_zero_p(f, u)
while u:
if len(f) != 1:
return False
f = f[0]
u -= 1
if c is None:
return len(f) <= 1
else:
return f == [c]
def dmp_ground(c, u):
"""
Return a multivariate constant.
Examples
========
>>> from sympy.polys.densebasic import dmp_ground
>>> dmp_ground(3, 5)
[[[[[[3]]]]]]
>>> dmp_ground(1, -1)
1
"""
if not c:
return dmp_zero(u)
for i in range(u + 1):
c = [c]
return c
def dmp_zeros(n, u, K):
"""
Return a list of multivariate zeros.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_zeros
>>> dmp_zeros(3, 2, ZZ)
[[[[]]], [[[]]], [[[]]]]
>>> dmp_zeros(3, -1, ZZ)
[0, 0, 0]
"""
if not n:
return []
if u < 0:
return [K.zero]*n
else:
return [ dmp_zero(u) for i in range(n) ]
def dmp_grounds(c, n, u):
"""
Return a list of multivariate constants.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_grounds
>>> dmp_grounds(ZZ(4), 3, 2)
[[[[4]]], [[[4]]], [[[4]]]]
>>> dmp_grounds(ZZ(4), 3, -1)
[4, 4, 4]
"""
if not n:
return []
if u < 0:
return [c]*n
else:
return [ dmp_ground(c, u) for i in range(n) ]
def dmp_negative_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is negative.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_negative_p
>>> dmp_negative_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
False
>>> dmp_negative_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
True
"""
return K.is_negative(dmp_ground_LC(f, u, K))
def dmp_positive_p(f, u, K):
"""
Return ``True`` if ``LC(f)`` is positive.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_positive_p
>>> dmp_positive_p([[ZZ(1)], [-ZZ(1)]], 1, ZZ)
True
>>> dmp_positive_p([[-ZZ(1)], [ZZ(1)]], 1, ZZ)
False
"""
return K.is_positive(dmp_ground_LC(f, u, K))
def dup_from_dict(f, K):
"""
Create a ``K[x]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_dict
>>> dup_from_dict({(0,): ZZ(7), (2,): ZZ(5), (4,): ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
>>> dup_from_dict({}, ZZ)
[]
"""
if not f:
return []
n, h = max(f.keys()), []
if type(n) is int:
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
else:
(n,) = n
for k in range(n, -1, -1):
h.append(f.get((k,), K.zero))
return dup_strip(h)
def dup_from_raw_dict(f, K):
"""
Create a ``K[x]`` polynomial from a raw ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_from_raw_dict
>>> dup_from_raw_dict({0: ZZ(7), 2: ZZ(5), 4: ZZ(1)}, ZZ)
[1, 0, 5, 0, 7]
"""
if not f:
return []
n, h = max(f.keys()), []
for k in range(n, -1, -1):
h.append(f.get(k, K.zero))
return dup_strip(h)
def dmp_from_dict(f, u, K):
"""
Create a ``K[X]`` polynomial from a ``dict``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_from_dict
>>> dmp_from_dict({(0, 0): ZZ(3), (0, 1): ZZ(2), (2, 1): ZZ(1)}, 1, ZZ)
[[1, 0], [], [2, 3]]
>>> dmp_from_dict({}, 0, ZZ)
[]
"""
if not u:
return dup_from_dict(f, K)
if not f:
return dmp_zero(u)
coeffs = {}
for monom, coeff in f.items():
head, tail = monom[0], monom[1:]
if head in coeffs:
coeffs[head][tail] = coeff
else:
coeffs[head] = { tail: coeff }
n, v, h = max(coeffs.keys()), u - 1, []
for k in range(n, -1, -1):
coeff = coeffs.get(k)
if coeff is not None:
h.append(dmp_from_dict(coeff, v, K))
else:
h.append(dmp_zero(v))
return dmp_strip(h, u)
def dup_to_dict(f, K=None, zero=False):
"""
Convert ``K[x]`` polynomial to a ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_dict
>>> dup_to_dict([1, 0, 5, 0, 7])
{(0,): 7, (2,): 5, (4,): 1}
>>> dup_to_dict([])
{}
"""
if not f and zero:
return {(0,): K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[(k,)] = f[n - k]
return result
def dup_to_raw_dict(f, K=None, zero=False):
"""
Convert a ``K[x]`` polynomial to a raw ``dict``.
Examples
========
>>> from sympy.polys.densebasic import dup_to_raw_dict
>>> dup_to_raw_dict([1, 0, 5, 0, 7])
{0: 7, 2: 5, 4: 1}
"""
if not f and zero:
return {0: K.zero}
n, result = len(f) - 1, {}
for k in range(0, n + 1):
if f[n - k]:
result[k] = f[n - k]
return result
def dmp_to_dict(f, u, K=None, zero=False):
"""
Convert a ``K[X]`` polynomial to a ``dict````.
Examples
========
>>> from sympy.polys.densebasic import dmp_to_dict
>>> dmp_to_dict([[1, 0], [], [2, 3]], 1)
{(0, 0): 3, (0, 1): 2, (2, 1): 1}
>>> dmp_to_dict([], 0)
{}
"""
if not u:
return dup_to_dict(f, K, zero=zero)
if dmp_zero_p(f, u) and zero:
return {(0,)*(u + 1): K.zero}
n, v, result = dmp_degree(f, u), u - 1, {}
if n == -oo:
n = -1
for k in range(0, n + 1):
h = dmp_to_dict(f[n - k], v)
for exp, coeff in h.items():
result[(k,) + exp] = coeff
return result
def dmp_swap(f, i, j, u, K):
"""
Transform ``K[..x_i..x_j..]`` to ``K[..x_j..x_i..]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_swap
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_swap(f, 0, 1, 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_swap(f, 1, 2, 2, ZZ)
[[[1], [2, 0]], [[]]]
>>> dmp_swap(f, 0, 2, 2, ZZ)
[[[1, 0]], [[2, 0], []]]
"""
if i < 0 or j < 0 or i > u or j > u:
raise IndexError("0 <= i < j <= %s expected" % u)
elif i == j:
return f
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
H[exp[:i] + (exp[j],) +
exp[i + 1:j] +
(exp[i],) + exp[j + 1:]] = coeff
return dmp_from_dict(H, u, K)
def dmp_permute(f, P, u, K):
"""
Return a polynomial in ``K[x_{P(1)},..,x_{P(n)}]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_permute
>>> f = ZZ.map([[[2], [1, 0]], []])
>>> dmp_permute(f, [1, 0, 2], 2, ZZ)
[[[2], []], [[1, 0], []]]
>>> dmp_permute(f, [1, 2, 0], 2, ZZ)
[[[1], []], [[2, 0], []]]
"""
F, H = dmp_to_dict(f, u), {}
for exp, coeff in F.items():
new_exp = [0]*len(exp)
for e, p in zip(exp, P):
new_exp[p] = e
H[tuple(new_exp)] = coeff
return dmp_from_dict(H, u, K)
def dmp_nest(f, l, K):
"""
Return a multivariate value nested ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_nest
>>> dmp_nest([[ZZ(1)]], 2, ZZ)
[[[[1]]]]
"""
if not isinstance(f, list):
return dmp_ground(f, l)
for i in range(l):
f = [f]
return f
def dmp_raise(f, l, u, K):
"""
Return a multivariate polynomial raised ``l``-levels.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_raise
>>> f = ZZ.map([[], [1, 2]])
>>> dmp_raise(f, 2, 1, ZZ)
[[[[]]], [[[1]], [[2]]]]
"""
if not l:
return f
if not u:
if not f:
return dmp_zero(l)
k = l - 1
return [ dmp_ground(c, k) for c in f ]
v = u - 1
return [ dmp_raise(c, l, v, K) for c in f ]
def dup_deflate(f, K):
"""
Map ``x**m`` to ``y`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_deflate
>>> f = ZZ.map([1, 0, 0, 1, 0, 0, 1])
>>> dup_deflate(f, ZZ)
(3, [1, 1, 1])
"""
if dup_degree(f) <= 0:
return 1, f
g = 0
for i in range(len(f)):
if not f[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, f
return g, f[::g]
def dmp_deflate(f, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> dmp_deflate(f, 1, ZZ)
((2, 3), [[1, 2], [3, 4]])
"""
if dmp_zero_p(f, u):
return (1,)*(u + 1), f
F = dmp_to_dict(f, u)
B = [0]*(u + 1)
for M in F.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, f
H = {}
for A, coeff in F.items():
N = [ a // b for a, b in zip(A, B) ]
H[tuple(N)] = coeff
return B, dmp_from_dict(H, u, K)
def dup_multi_deflate(polys, K):
"""
Map ``x**m`` to ``y`` in a set of polynomials in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_multi_deflate
>>> f = ZZ.map([1, 0, 2, 0, 3])
>>> g = ZZ.map([4, 0, 0])
>>> dup_multi_deflate((f, g), ZZ)
(2, ([1, 2, 3], [4, 0]))
"""
G = 0
for p in polys:
if dup_degree(p) <= 0:
return 1, polys
g = 0
for i in range(len(p)):
if not p[-i - 1]:
continue
g = igcd(g, i)
if g == 1:
return 1, polys
G = igcd(G, g)
return G, tuple([ p[::G] for p in polys ])
def dmp_multi_deflate(polys, u, K):
"""
Map ``x_i**m_i`` to ``y_i`` in a set of polynomials in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_multi_deflate
>>> f = ZZ.map([[1, 0, 0, 2], [], [3, 0, 0, 4]])
>>> g = ZZ.map([[1, 0, 2], [], [3, 0, 4]])
>>> dmp_multi_deflate((f, g), 1, ZZ)
((2, 1), ([[1, 0, 0, 2], [3, 0, 0, 4]], [[1, 0, 2], [3, 0, 4]]))
"""
if not u:
M, H = dup_multi_deflate(polys, K)
return (M,), H
F, B = [], [0]*(u + 1)
for p in polys:
f = dmp_to_dict(p, u)
if not dmp_zero_p(p, u):
for M in f.keys():
for i, m in enumerate(M):
B[i] = igcd(B[i], m)
F.append(f)
for i, b in enumerate(B):
if not b:
B[i] = 1
B = tuple(B)
if all(b == 1 for b in B):
return B, polys
H = []
for f in F:
h = {}
for A, coeff in f.items():
N = [ a // b for a, b in zip(A, B) ]
h[tuple(N)] = coeff
H.append(dmp_from_dict(h, u, K))
return B, tuple(H)
def dup_inflate(f, m, K):
"""
Map ``y`` to ``x**m`` in a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_inflate
>>> f = ZZ.map([1, 1, 1])
>>> dup_inflate(f, 3, ZZ)
[1, 0, 0, 1, 0, 0, 1]
"""
if m <= 0:
raise IndexError("'m' must be positive, got %s" % m)
if m == 1 or not f:
return f
result = [f[0]]
for coeff in f[1:]:
result.extend([K.zero]*(m - 1))
result.append(coeff)
return result
def _rec_inflate(g, M, v, i, K):
"""Recursive helper for :func:`dmp_inflate`."""
if not v:
return dup_inflate(g, M[i], K)
if M[i] <= 0:
raise IndexError("all M[i] must be positive, got %s" % M[i])
w, j = v - 1, i + 1
g = [ _rec_inflate(c, M, w, j, K) for c in g ]
result = [g[0]]
for coeff in g[1:]:
for _ in range(1, M[i]):
result.append(dmp_zero(w))
result.append(coeff)
return result
def dmp_inflate(f, M, u, K):
"""
Map ``y_i`` to ``x_i**k_i`` in a polynomial in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inflate
>>> f = ZZ.map([[1, 2], [3, 4]])
>>> dmp_inflate(f, (2, 3), 1, ZZ)
[[1, 0, 0, 2], [], [3, 0, 0, 4]]
"""
if not u:
return dup_inflate(f, M[0], K)
if all(m == 1 for m in M):
return f
else:
return _rec_inflate(f, M, u, 0, K)
def dmp_exclude(f, u, K):
"""
Exclude useless levels from ``f``.
Return the levels excluded, the new excluded ``f``, and the new ``u``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_exclude
>>> f = ZZ.map([[[1]], [[1], [2]]])
>>> dmp_exclude(f, 2, ZZ)
([2], [[1], [1, 2]], 1)
"""
if not u or dmp_ground_p(f, None, u):
return [], f, u
J, F = [], dmp_to_dict(f, u)
for j in range(0, u + 1):
for monom in F.keys():
if monom[j]:
break
else:
J.append(j)
if not J:
return [], f, u
f = {}
for monom, coeff in F.items():
monom = list(monom)
for j in reversed(J):
del monom[j]
f[tuple(monom)] = coeff
u -= len(J)
return J, dmp_from_dict(f, u, K), u
def dmp_include(f, J, u, K):
"""
Include useless levels in ``f``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_include
>>> f = ZZ.map([[1], [1, 2]])
>>> dmp_include(f, [2], 1, ZZ)
[[[1]], [[1], [2]]]
"""
if not J:
return f
F, f = dmp_to_dict(f, u), {}
for monom, coeff in F.items():
monom = list(monom)
for j in J:
monom.insert(j, 0)
f[tuple(monom)] = coeff
u += len(J)
return dmp_from_dict(f, u, K)
def dmp_inject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X][Y]`` to ``K[X,Y]``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_inject
>>> R, x,y = ring("x,y", ZZ)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain())
([[[1]], [[1], [2]]], 2)
>>> dmp_inject([R(1), x + 2], 0, R.to_domain(), front=True)
([[[1]], [[1, 2]]], 2)
"""
f, h = dmp_to_dict(f, u), {}
v = K.ngens - 1
for f_monom, g in f.items():
g = g.to_dict()
for g_monom, c in g.items():
if front:
h[g_monom + f_monom] = c
else:
h[f_monom + g_monom] = c
w = u + v + 1
return dmp_from_dict(h, w, K.dom), w
def dmp_eject(f, u, K, front=False):
"""
Convert ``f`` from ``K[X,Y]`` to ``K[X][Y]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_eject
>>> dmp_eject([[[1]], [[1], [2]]], 2, ZZ['x', 'y'])
[1, x + 2]
"""
f, h = dmp_to_dict(f, u), {}
n = K.ngens
v = u - K.ngens + 1
for monom, c in f.items():
if front:
g_monom, f_monom = monom[:n], monom[n:]
else:
g_monom, f_monom = monom[-n:], monom[:-n]
if f_monom in h:
h[f_monom][g_monom] = c
else:
h[f_monom] = {g_monom: c}
for monom, c in h.items():
h[monom] = K(c)
return dmp_from_dict(h, v - 1, K)
def dup_terms_gcd(f, K):
"""
Remove GCD of terms from ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_terms_gcd
>>> f = ZZ.map([1, 0, 1, 0, 0])
>>> dup_terms_gcd(f, ZZ)
(2, [1, 0, 1])
"""
if dup_TC(f, K) or not f:
return 0, f
i = 0
for c in reversed(f):
if not c:
i += 1
else:
break
return i, f[:-i]
def dmp_terms_gcd(f, u, K):
"""
Remove GCD of terms from ``f`` in ``K[X]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_terms_gcd
>>> f = ZZ.map([[1, 0], [1, 0, 0], [], []])
>>> dmp_terms_gcd(f, 1, ZZ)
((2, 1), [[1], [1, 0]])
"""
if dmp_ground_TC(f, u, K) or dmp_zero_p(f, u):
return (0,)*(u + 1), f
F = dmp_to_dict(f, u)
G = monomial_min(*list(F.keys()))
if all(g == 0 for g in G):
return G, f
f = {}
for monom, coeff in F.items():
f[monomial_div(monom, G)] = coeff
return G, dmp_from_dict(f, u, K)
def _rec_list_terms(g, v, monom):
"""Recursive helper for :func:`dmp_list_terms`."""
d, terms = dmp_degree(g, v), []
if not v:
for i, c in enumerate(g):
if not c:
continue
terms.append((monom + (d - i,), c))
else:
w = v - 1
for i, c in enumerate(g):
terms.extend(_rec_list_terms(c, w, monom + (d - i,)))
return terms
def dmp_list_terms(f, u, K, order=None):
"""
List all non-zero terms from ``f`` in the given order ``order``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_list_terms
>>> f = ZZ.map([[1, 1], [2, 3]])
>>> dmp_list_terms(f, 1, ZZ)
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
>>> dmp_list_terms(f, 1, ZZ, order='grevlex')
[((1, 1), 1), ((1, 0), 1), ((0, 1), 2), ((0, 0), 3)]
"""
def sort(terms, O):
return sorted(terms, key=lambda term: O(term[0]), reverse=True)
terms = _rec_list_terms(f, u, ())
if not terms:
return [((0,)*(u + 1), K.zero)]
if order is None:
return terms
else:
return sort(terms, monomial_key(order))
def dup_apply_pairs(f, g, h, args, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dup_apply_pairs([1, 2, 3], [3, 2, 1], h, (1,), ZZ)
[4, 5, 6]
"""
n, m = len(f), len(g)
if n != m:
if n > m:
g = [K.zero]*(n - m) + g
else:
f = [K.zero]*(m - n) + f
result = []
for a, b in zip(f, g):
result.append(h(a, b, *args))
return dup_strip(result)
def dmp_apply_pairs(f, g, h, args, u, K):
"""
Apply ``h`` to pairs of coefficients of ``f`` and ``g``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dmp_apply_pairs
>>> h = lambda x, y, z: 2*x + y - z
>>> dmp_apply_pairs([[1], [2, 3]], [[3], [2, 1]], h, (1,), 1, ZZ)
[[4], [5, 6]]
"""
if not u:
return dup_apply_pairs(f, g, h, args, K)
n, m, v = len(f), len(g), u - 1
if n != m:
if n > m:
g = dmp_zeros(n - m, v, K) + g
else:
f = dmp_zeros(m - n, v, K) + f
result = []
for a, b in zip(f, g):
result.append(dmp_apply_pairs(a, b, h, args, v, K))
return dmp_strip(result, u)
def dup_slice(f, m, n, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[x]``. """
k = len(f)
if k >= m:
M = k - m
else:
M = 0
if k >= n:
N = k - n
else:
N = 0
f = f[N:M]
if not f:
return []
else:
return f + [K.zero]*m
def dmp_slice(f, m, n, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``K[X]``. """
return dmp_slice_in(f, m, n, 0, u, K)
def dmp_slice_in(f, m, n, j, u, K):
"""Take a continuous subsequence of terms of ``f`` in ``x_j`` in ``K[X]``. """
if j < 0 or j > u:
raise IndexError("-%s <= j < %s expected, got %s" % (u, u, j))
if not u:
return dup_slice(f, m, n, K)
f, g = dmp_to_dict(f, u), {}
for monom, coeff in f.items():
k = monom[j]
if k < m or k >= n:
monom = monom[:j] + (0,) + monom[j + 1:]
if monom in g:
g[monom] += coeff
else:
g[monom] = coeff
return dmp_from_dict(g, u, K)
def dup_random(n, a, b, K):
"""
Return a polynomial of degree ``n`` with coefficients in ``[a, b]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_random
>>> dup_random(3, -10, 10, ZZ) #doctest: +SKIP
[-2, -8, 9, -4]
"""
f = [ K.convert(random.randint(a, b)) for _ in range(0, n + 1) ]
while not f[0]:
f[0] = K.convert(random.randint(a, b))
return f
| bsd-3-clause | -417,864,396,992,419,840 | 18.116242 | 82 | 0.468943 | false |
ALISCIFP/tensorflow-resnet-segmentation | convert_mhd2jpg_png_LUNA16.py | 1 | 3214 | #!/usr/bin/env python
# This script belongs to https://github.com/
# this script convert LUNA 16 mhd file to RGB-jpg file.
__author__ = "Zengming Shen,Email:[email protected]"
import os,glob
import argparse
import numpy as np
import SimpleITK as sitk
from PIL import Image
import cv2
import scipy.misc
DATA_DIRECTORY = '/home/zack/Data/LUNA16/'
OUT_DIRECTORY = "/home/zack/Data/LUNA16/"
def mhd2ndarray(data_file):
itkimg = sitk.ReadImage(data_file)
img=sitk.GetArrayFromImage(itkimg)
img = np.transpose(img,(1,2,0))
return img
def ndarry2jpg_png(data_file,out_dir,subsetIndex,flist):
data_path,fn = os.path.split(data_file)
# img_gt_file= data_path+"output/yes_lesion_no_rescale/seg/"+fn
img_gt_file = data_file.replace("subset"+str(subsetIndex),"output/yes_lesion_no_rescale/subset"+str(subsetIndex)+"/seg")
img = mhd2ndarray(data_file)
img_gt = mhd2ndarray(img_gt_file)
img_pad=np.lib.pad(img, ((0, 0),(0,0),(1,1)), 'constant', constant_values=(-3024, -3024))
# img_pos = img_pad-img_pad.min()
# img_pad = img_pos*(255.0/img_pos.max())
for i in xrange(0,img.shape[2]):
img3c = img_pad[:,:,i:i+3]
try:
scipy.misc.imsave(os.path.join(out_dir+"JPEGImages/subset"+str(subsetIndex),fn+"_"+str(i)+".jpg"), img3c)
except ValueError:
print fn
pass
# im = Image.fromarray(img3c)
# im.save(os.path.join(out_dir+"JPEGImages/subset"+str(subsetIndex),fn+"_"+str(i)+"_"+".jpg"))
cv2.imwrite(os.path.join(out_dir+"PNGImages/subset"+str(subsetIndex),fn+"_"+str(i)+".png"),img_gt[:,:,i])
flist.write("/JPEGImages/subset"+str(subsetIndex)+"/"+fn+"_"+str(i)+".jpg "+"/PNGImages/subset"+str(subsetIndex)+"/"+fn+"_"+str(i)+".png\n")
def convert(data_dir,out_dir):
ftrain = open(data_dir + "dataset/train.txt", 'a')
fval = open(data_dir + "dataset/val.txt", 'w')
for i in xrange(3,10):
print "converting subset "+str(i)
os.chdir(data_dir + "subset" + str(i))
if not os.path.exists(data_dir + "JPEGImages/subset" + str(i)):
os.mkdir(data_dir + "JPEGImages/subset" + str(i))
if not os.path.exists(data_dir + "PNGImages/subset" + str(i)):
os.mkdir(data_dir + "PNGImages/subset" + str(i))
for file in glob.glob("*.mhd"):
if i<8:
ndarry2jpg_png(os.path.join(data_dir + "subset" + str(i),file), out_dir, i,ftrain)
else:
ndarry2jpg_png(os.path.join(data_dir + "subset" + str(i),file), out_dir, i,fval)
ftrain.close()
fval.close()
print "done."
def main():
parser = argparse.ArgumentParser()
parser = argparse.ArgumentParser(description="mdh to jpg-png file converter")
parser.add_argument("--data-dir", type=str, default=DATA_DIRECTORY,
help="Path to the directory containing the ILD dataset.")
parser.add_argument("--out-dir", type=str, default=OUT_DIRECTORY,
help="Path to the directory containing the ILD dataset in jpg and png format.")
args = parser.parse_args()
convert(args.data_dir,args.out_dir)
if __name__ == '__main__':
main()
| mit | -5,409,096,295,083,091,000 | 29.903846 | 148 | 0.615432 | false |
theguardian/KodiDB | cherrypy/_cpdispatch.py | 39 | 24149 | """CherryPy dispatchers.
A 'dispatcher' is the object which looks up the 'page handler' callable
and collects config for the current request based on the path_info, other
request attributes, and the application architecture. The core calls the
dispatcher as early as possible, passing it a 'path_info' argument.
The default dispatcher discovers the page handler by matching path_info
to a hierarchical arrangement of objects, starting at request.app.root.
"""
import string
import sys
import types
try:
classtype = (type, types.ClassType)
except AttributeError:
classtype = type
import cherrypy
from cherrypy._cpcompat import set
class PageHandler(object):
"""Callable which sets response.body."""
def __init__(self, callable, *args, **kwargs):
self.callable = callable
self.args = args
self.kwargs = kwargs
def __call__(self):
try:
return self.callable(*self.args, **self.kwargs)
except TypeError:
x = sys.exc_info()[1]
try:
test_callable_spec(self.callable, self.args, self.kwargs)
except cherrypy.HTTPError:
raise sys.exc_info()[1]
except:
raise x
raise
def test_callable_spec(callable, callable_args, callable_kwargs):
"""
Inspect callable and test to see if the given args are suitable for it.
When an error occurs during the handler's invoking stage there are 2
erroneous cases:
1. Too many parameters passed to a function which doesn't define
one of *args or **kwargs.
2. Too little parameters are passed to the function.
There are 3 sources of parameters to a cherrypy handler.
1. query string parameters are passed as keyword parameters to the handler.
2. body parameters are also passed as keyword parameters.
3. when partial matching occurs, the final path atoms are passed as
positional args.
Both the query string and path atoms are part of the URI. If they are
incorrect, then a 404 Not Found should be raised. Conversely the body
parameters are part of the request; if they are invalid a 400 Bad Request.
"""
show_mismatched_params = getattr(
cherrypy.serving.request, 'show_mismatched_params', False)
try:
(args, varargs, varkw, defaults) = inspect.getargspec(callable)
except TypeError:
if isinstance(callable, object) and hasattr(callable, '__call__'):
(args, varargs, varkw, defaults) = inspect.getargspec(callable.__call__)
else:
# If it wasn't one of our own types, re-raise
# the original error
raise
if args and args[0] == 'self':
args = args[1:]
arg_usage = dict([(arg, 0,) for arg in args])
vararg_usage = 0
varkw_usage = 0
extra_kwargs = set()
for i, value in enumerate(callable_args):
try:
arg_usage[args[i]] += 1
except IndexError:
vararg_usage += 1
for key in callable_kwargs.keys():
try:
arg_usage[key] += 1
except KeyError:
varkw_usage += 1
extra_kwargs.add(key)
# figure out which args have defaults.
args_with_defaults = args[-len(defaults or []):]
for i, val in enumerate(defaults or []):
# Defaults take effect only when the arg hasn't been used yet.
if arg_usage[args_with_defaults[i]] == 0:
arg_usage[args_with_defaults[i]] += 1
missing_args = []
multiple_args = []
for key, usage in arg_usage.items():
if usage == 0:
missing_args.append(key)
elif usage > 1:
multiple_args.append(key)
if missing_args:
# In the case where the method allows body arguments
# there are 3 potential errors:
# 1. not enough query string parameters -> 404
# 2. not enough body parameters -> 400
# 3. not enough path parts (partial matches) -> 404
#
# We can't actually tell which case it is,
# so I'm raising a 404 because that covers 2/3 of the
# possibilities
#
# In the case where the method does not allow body
# arguments it's definitely a 404.
message = None
if show_mismatched_params:
message="Missing parameters: %s" % ",".join(missing_args)
raise cherrypy.HTTPError(404, message=message)
# the extra positional arguments come from the path - 404 Not Found
if not varargs and vararg_usage > 0:
raise cherrypy.HTTPError(404)
body_params = cherrypy.serving.request.body.params or {}
body_params = set(body_params.keys())
qs_params = set(callable_kwargs.keys()) - body_params
if multiple_args:
if qs_params.intersection(set(multiple_args)):
# If any of the multiple parameters came from the query string then
# it's a 404 Not Found
error = 404
else:
# Otherwise it's a 400 Bad Request
error = 400
message = None
if show_mismatched_params:
message="Multiple values for parameters: "\
"%s" % ",".join(multiple_args)
raise cherrypy.HTTPError(error, message=message)
if not varkw and varkw_usage > 0:
# If there were extra query string parameters, it's a 404 Not Found
extra_qs_params = set(qs_params).intersection(extra_kwargs)
if extra_qs_params:
message = None
if show_mismatched_params:
message="Unexpected query string "\
"parameters: %s" % ", ".join(extra_qs_params)
raise cherrypy.HTTPError(404, message=message)
# If there were any extra body parameters, it's a 400 Not Found
extra_body_params = set(body_params).intersection(extra_kwargs)
if extra_body_params:
message = None
if show_mismatched_params:
message="Unexpected body parameters: "\
"%s" % ", ".join(extra_body_params)
raise cherrypy.HTTPError(400, message=message)
try:
import inspect
except ImportError:
test_callable_spec = lambda callable, args, kwargs: None
class LateParamPageHandler(PageHandler):
"""When passing cherrypy.request.params to the page handler, we do not
want to capture that dict too early; we want to give tools like the
decoding tool a chance to modify the params dict in-between the lookup
of the handler and the actual calling of the handler. This subclass
takes that into account, and allows request.params to be 'bound late'
(it's more complicated than that, but that's the effect).
"""
def _get_kwargs(self):
kwargs = cherrypy.serving.request.params.copy()
if self._kwargs:
kwargs.update(self._kwargs)
return kwargs
def _set_kwargs(self, kwargs):
self._kwargs = kwargs
kwargs = property(_get_kwargs, _set_kwargs,
doc='page handler kwargs (with '
'cherrypy.request.params copied in)')
if sys.version_info < (3, 0):
punctuation_to_underscores = string.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, str) or len(t) != 256:
raise ValueError("The translate argument must be a str of len 256.")
else:
punctuation_to_underscores = str.maketrans(
string.punctuation, '_' * len(string.punctuation))
def validate_translator(t):
if not isinstance(t, dict):
raise ValueError("The translate argument must be a dict.")
class Dispatcher(object):
"""CherryPy Dispatcher which walks a tree of objects to find a handler.
The tree is rooted at cherrypy.request.app.root, and each hierarchical
component in the path_info argument is matched to a corresponding nested
attribute of the root object. Matching handlers must have an 'exposed'
attribute which evaluates to True. The special method name "index"
matches a URI which ends in a slash ("/"). The special method name
"default" may match a portion of the path_info (but only when no longer
substring of the path_info matches some other object).
This is the default, built-in dispatcher for CherryPy.
"""
dispatch_method_name = '_cp_dispatch'
"""
The name of the dispatch method that nodes may optionally implement
to provide their own dynamic dispatch algorithm.
"""
def __init__(self, dispatch_method_name=None,
translate=punctuation_to_underscores):
validate_translator(translate)
self.translate = translate
if dispatch_method_name:
self.dispatch_method_name = dispatch_method_name
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
func, vpath = self.find_handler(path_info)
if func:
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.NotFound()
def find_handler(self, path):
"""Return the appropriate page handler, plus any virtual path.
This will return two objects. The first will be a callable,
which can be used to generate page output. Any parameters from
the query string or request body will be sent to that callable
as keyword arguments.
The callable is found by traversing the application's tree,
starting from cherrypy.request.app.root, and matching path
components to successive objects in the tree. For example, the
URL "/path/to/handler" might return root.path.to.handler.
The second object returned will be a list of names which are
'virtual path' components: parts of the URL which are dynamic,
and were not used when looking up the handler.
These virtual path components are passed to the handler as
positional arguments.
"""
request = cherrypy.serving.request
app = request.app
root = app.root
dispatch_name = self.dispatch_method_name
# Get config for the root object/path.
fullpath = [x for x in path.strip('/').split('/') if x] + ['index']
fullpath_len = len(fullpath)
segleft = fullpath_len
nodeconf = {}
if hasattr(root, "_cp_config"):
nodeconf.update(root._cp_config)
if "/" in app.config:
nodeconf.update(app.config["/"])
object_trail = [['root', root, nodeconf, segleft]]
node = root
iternames = fullpath[:]
while iternames:
name = iternames[0]
# map to legal Python identifiers (e.g. replace '.' with '_')
objname = name.translate(self.translate)
nodeconf = {}
subnode = getattr(node, objname, None)
pre_len = len(iternames)
if subnode is None:
dispatch = getattr(node, dispatch_name, None)
if dispatch and hasattr(dispatch, '__call__') and not \
getattr(dispatch, 'exposed', False) and \
pre_len > 1:
#Don't expose the hidden 'index' token to _cp_dispatch
#We skip this if pre_len == 1 since it makes no sense
#to call a dispatcher when we have no tokens left.
index_name = iternames.pop()
subnode = dispatch(vpath=iternames)
iternames.append(index_name)
else:
#We didn't find a path, but keep processing in case there
#is a default() handler.
iternames.pop(0)
else:
#We found the path, remove the vpath entry
iternames.pop(0)
segleft = len(iternames)
if segleft > pre_len:
#No path segment was removed. Raise an error.
raise cherrypy.CherryPyException(
"A vpath segment was added. Custom dispatchers may only "
+ "remove elements. While trying to process "
+ "{0} in {1}".format(name, fullpath)
)
elif segleft == pre_len:
#Assume that the handler used the current path segment, but
#did not pop it. This allows things like
#return getattr(self, vpath[0], None)
iternames.pop(0)
segleft -= 1
node = subnode
if node is not None:
# Get _cp_config attached to this node.
if hasattr(node, "_cp_config"):
nodeconf.update(node._cp_config)
# Mix in values from app.config for this path.
existing_len = fullpath_len - pre_len
if existing_len != 0:
curpath = '/' + '/'.join(fullpath[0:existing_len])
else:
curpath = ''
new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft]
for seg in new_segs:
curpath += '/' + seg
if curpath in app.config:
nodeconf.update(app.config[curpath])
object_trail.append([name, node, nodeconf, segleft])
def set_conf():
"""Collapse all object_trail config into cherrypy.request.config."""
base = cherrypy.config.copy()
# Note that we merge the config from each node
# even if that node was None.
for name, obj, conf, segleft in object_trail:
base.update(conf)
if 'tools.staticdir.dir' in conf:
base['tools.staticdir.section'] = '/' + '/'.join(fullpath[0:fullpath_len - segleft])
return base
# Try successive objects (reverse order)
num_candidates = len(object_trail) - 1
for i in range(num_candidates, -1, -1):
name, candidate, nodeconf, segleft = object_trail[i]
if candidate is None:
continue
# Try a "default" method on the current leaf.
if hasattr(candidate, "default"):
defhandler = candidate.default
if getattr(defhandler, 'exposed', False):
# Insert any extra _cp_config from the default handler.
conf = getattr(defhandler, "_cp_config", {})
object_trail.insert(i+1, ["default", defhandler, conf, segleft])
request.config = set_conf()
# See http://www.cherrypy.org/ticket/613
request.is_index = path.endswith("/")
return defhandler, fullpath[fullpath_len - segleft:-1]
# Uncomment the next line to restrict positional params to "default".
# if i < num_candidates - 2: continue
# Try the current leaf.
if getattr(candidate, 'exposed', False):
request.config = set_conf()
if i == num_candidates:
# We found the extra ".index". Mark request so tools
# can redirect if path_info has no trailing slash.
request.is_index = True
else:
# We're not at an 'index' handler. Mark request so tools
# can redirect if path_info has NO trailing slash.
# Note that this also includes handlers which take
# positional parameters (virtual paths).
request.is_index = False
return candidate, fullpath[fullpath_len - segleft:-1]
# We didn't find anything
request.config = set_conf()
return None, []
class MethodDispatcher(Dispatcher):
"""Additional dispatch based on cherrypy.request.method.upper().
Methods named GET, POST, etc will be called on an exposed class.
The method names must be all caps; the appropriate Allow header
will be output showing all capitalized method names as allowable
HTTP verbs.
Note that the containing class must be exposed, not the methods.
"""
def __call__(self, path_info):
"""Set handler and config for the current request."""
request = cherrypy.serving.request
resource, vpath = self.find_handler(path_info)
if resource:
# Set Allow header
avail = [m for m in dir(resource) if m.isupper()]
if "GET" in avail and "HEAD" not in avail:
avail.append("HEAD")
avail.sort()
cherrypy.serving.response.headers['Allow'] = ", ".join(avail)
# Find the subhandler
meth = request.method.upper()
func = getattr(resource, meth, None)
if func is None and meth == "HEAD":
func = getattr(resource, "GET", None)
if func:
# Grab any _cp_config on the subhandler.
if hasattr(func, "_cp_config"):
request.config.update(func._cp_config)
# Decode any leftover %2F in the virtual_path atoms.
vpath = [x.replace("%2F", "/") for x in vpath]
request.handler = LateParamPageHandler(func, *vpath)
else:
request.handler = cherrypy.HTTPError(405)
else:
request.handler = cherrypy.NotFound()
class RoutesDispatcher(object):
"""A Routes based dispatcher for CherryPy."""
def __init__(self, full_result=False):
"""
Routes dispatcher
Set full_result to True if you wish the controller
and the action to be passed on to the page handler
parameters. By default they won't be.
"""
import routes
self.full_result = full_result
self.controllers = {}
self.mapper = routes.Mapper()
self.mapper.controller_scan = self.controllers.keys
def connect(self, name, route, controller, **kwargs):
self.controllers[name] = controller
self.mapper.connect(name, route, controller=name, **kwargs)
def redirect(self, url):
raise cherrypy.HTTPRedirect(url)
def __call__(self, path_info):
"""Set handler and config for the current request."""
func = self.find_handler(path_info)
if func:
cherrypy.serving.request.handler = LateParamPageHandler(func)
else:
cherrypy.serving.request.handler = cherrypy.NotFound()
def find_handler(self, path_info):
"""Find the right page handler, and set request.config."""
import routes
request = cherrypy.serving.request
config = routes.request_config()
config.mapper = self.mapper
if hasattr(request, 'wsgi_environ'):
config.environ = request.wsgi_environ
config.host = request.headers.get('Host', None)
config.protocol = request.scheme
config.redirect = self.redirect
result = self.mapper.match(path_info)
config.mapper_dict = result
params = {}
if result:
params = result.copy()
if not self.full_result:
params.pop('controller', None)
params.pop('action', None)
request.params.update(params)
# Get config for the root object/path.
request.config = base = cherrypy.config.copy()
curpath = ""
def merge(nodeconf):
if 'tools.staticdir.dir' in nodeconf:
nodeconf['tools.staticdir.section'] = curpath or "/"
base.update(nodeconf)
app = request.app
root = app.root
if hasattr(root, "_cp_config"):
merge(root._cp_config)
if "/" in app.config:
merge(app.config["/"])
# Mix in values from app.config.
atoms = [x for x in path_info.split("/") if x]
if atoms:
last = atoms.pop()
else:
last = None
for atom in atoms:
curpath = "/".join((curpath, atom))
if curpath in app.config:
merge(app.config[curpath])
handler = None
if result:
controller = result.get('controller')
controller = self.controllers.get(controller, controller)
if controller:
if isinstance(controller, classtype):
controller = controller()
# Get config from the controller.
if hasattr(controller, "_cp_config"):
merge(controller._cp_config)
action = result.get('action')
if action is not None:
handler = getattr(controller, action, None)
# Get config from the handler
if hasattr(handler, "_cp_config"):
merge(handler._cp_config)
else:
handler = controller
# Do the last path atom here so it can
# override the controller's _cp_config.
if last:
curpath = "/".join((curpath, last))
if curpath in app.config:
merge(app.config[curpath])
return handler
def XMLRPCDispatcher(next_dispatcher=Dispatcher()):
from cherrypy.lib import xmlrpcutil
def xmlrpc_dispatch(path_info):
path_info = xmlrpcutil.patched_path(path_info)
return next_dispatcher(path_info)
return xmlrpc_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True, **domains):
"""
Select a different handler based on the Host header.
This can be useful when running multiple sites within one CP server.
It allows several domains to point to different parts of a single
website structure. For example::
http://www.domain.example -> root
http://www.domain2.example -> root/domain2/
http://www.domain2.example:443 -> root/secure
can be accomplished via the following config::
[/]
request.dispatch = cherrypy.dispatch.VirtualHost(
**{'www.domain2.example': '/domain2',
'www.domain2.example:443': '/secure',
})
next_dispatcher
The next dispatcher object in the dispatch chain.
The VirtualHost dispatcher adds a prefix to the URL and calls
another dispatcher. Defaults to cherrypy.dispatch.Dispatcher().
use_x_forwarded_host
If True (the default), any "X-Forwarded-Host"
request header will be used instead of the "Host" header. This
is commonly added by HTTP servers (such as Apache) when proxying.
``**domains``
A dict of {host header value: virtual prefix} pairs.
The incoming "Host" request header is looked up in this dict,
and, if a match is found, the corresponding "virtual prefix"
value will be prepended to the URL path before calling the
next dispatcher. Note that you often need separate entries
for "example.com" and "www.example.com". In addition, "Host"
headers may contain the port number.
"""
from cherrypy.lib import httputil
def vhost_dispatch(path_info):
request = cherrypy.serving.request
header = request.headers.get
domain = header('Host', '')
if use_x_forwarded_host:
domain = header("X-Forwarded-Host", domain)
prefix = domains.get(domain, "")
if prefix:
path_info = httputil.urljoin(prefix, path_info)
result = next_dispatcher(path_info)
# Touch up staticdir config. See http://www.cherrypy.org/ticket/614.
section = request.config.get('tools.staticdir.section')
if section:
section = section[len(prefix):]
request.config['tools.staticdir.section'] = section
return result
return vhost_dispatch
| gpl-2.0 | 5,562,146,750,668,525,000 | 36.970126 | 104 | 0.592405 | false |
criccomini/airflow | airflow/example_dags/example_skip_dag.py | 14 | 1865 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import airflow
from airflow.operators.dummy_operator import DummyOperator
from airflow.models import DAG
from airflow.exceptions import AirflowSkipException
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2)
}
# Create some placeholder operators
class DummySkipOperator(DummyOperator):
ui_color = '#e8b7e4'
def execute(self, context):
raise AirflowSkipException
dag = DAG(dag_id='example_skip_dag', default_args=args)
def create_test_pipeline(suffix, trigger_rule, dag):
skip_operator = DummySkipOperator(task_id='skip_operator_{}'.format(suffix), dag=dag)
always_true = DummyOperator(task_id='always_true_{}'.format(suffix), dag=dag)
join = DummyOperator(task_id=trigger_rule, dag=dag, trigger_rule=trigger_rule)
join.set_upstream(skip_operator)
join.set_upstream(always_true)
final = DummyOperator(task_id='final_{}'.format(suffix), dag=dag)
final.set_upstream(join)
create_test_pipeline('1', 'all_success', dag)
create_test_pipeline('2', 'one_success', dag)
| apache-2.0 | -5,657,619,431,137,434,000 | 30.610169 | 89 | 0.738338 | false |
poryfly/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause | 5,534,036,230,444,517,000 | 34.765027 | 79 | 0.616196 | false |
jeffmarcom/checkbox | checkbox_gtk/hyper_text_view.py | 2 | 4689 | #
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
from gi.repository import Gtk, GObject, Pango, Gdk
class HyperTextView(Gtk.TextView):
__gtype_name__ = "HyperTextView"
__gsignals__ = {"anchor-clicked": (GObject.SignalFlags.RUN_LAST, GObject.TYPE_NONE, (str, str, int))}
__gproperties__ = {
"link": (GObject.TYPE_PYOBJECT, "link color", "link color of TextView", GObject.PARAM_READWRITE),
"active":(GObject.TYPE_PYOBJECT, "active color", "active color of TextView", GObject.PARAM_READWRITE),
"hover": (GObject.TYPE_PYOBJECT, "link:hover color", "link:hover color of TextView", GObject.PARAM_READWRITE),
}
def do_get_property(self, prop):
try:
return getattr(self, prop.name)
except AttributeError:
raise AttributeError("unknown property %s" % prop.name)
def do_set_property(self, prop, val):
if prop.name in list(self.__gproperties__.keys()):
setattr(self, prop.name, val)
else:
raise AttributeError("unknown property %s" % prop.name)
def __init__(self, buffer=None):
super(HyperTextView, self).__init__(buffer=buffer)
self.link = {"foreground": "blue", "underline": Pango.Underline.SINGLE}
self.active = {"foreground": "red", "underline": Pango.Underline.SINGLE}
self.hover = {"foreground": "dark blue", "underline": Pango.Underline.SINGLE}
self.set_editable(False)
self.set_cursor_visible(False)
self.__tags = []
self.connect("motion-notify-event", self._motion)
self.connect("focus-out-event", lambda w, e: self.get_buffer().get_tag_table().foreach(self.__tag_reset, e.window))
def insert(self, text, _iter=None):
b = self.get_buffer()
if _iter is None:
_iter = b.get_end_iter()
b.insert(_iter, text)
def insert_with_anchor(self, text, anchor=None, _iter=None):
b = self.get_buffer()
if _iter is None:
_iter = b.get_end_iter()
if anchor is None:
anchor = text
tag = b.create_tag(None, **self.get_property("link"))
tag.set_data("is_anchor", True)
tag.connect("event", self._tag_event, text, anchor)
self.__tags.append(tag)
b.insert_with_tags(_iter, text, tag)
def _motion(self, view, ev):
window = ev.window
_, x, y, _ = window.get_pointer()
x, y = view.window_to_buffer_coords(Gtk.TextWindowType.TEXT, x, y)
tags = view.get_iter_at_location(x, y).get_tags()
for tag in tags:
if tag.get_data("is_anchor"):
for t in set(self.__tags) - set([tag]):
self.__tag_reset(t, window)
self.__set_anchor(window, tag, Gdk.Cursor.new(Gdk.CursorType.HAND2), self.get_property("hover"))
break
else:
tag_table = self.get_buffer().get_tag_table()
tag_table.foreach(self.__tag_reset, window)
def _tag_event(self, tag, view, ev, _iter, text, anchor):
_type = ev.type
if _type == Gdk.EventType.MOTION_NOTIFY:
return
elif _type in [Gdk.EventType.BUTTON_PRESS, Gdk.EventType.BUTTON_RELEASE]:
button = ev.button
cursor = Gdk.Cursor.new(Gdk.CursorType.HAND2)
if _type == Gdk.EventType.BUTTON_RELEASE:
self.emit("anchor-clicked", text, anchor, button.button)
self.__set_anchor(ev.window, tag, cursor, self.get_property("hover"))
elif button in [1, 2]:
self.__set_anchor(ev.window, tag, cursor, self.get_property("active"))
def __tag_reset(self, tag, window):
if tag.get_data("is_anchor"):
self.__set_anchor(window, tag, None, self.get_property("link"))
def __set_anchor(self, window, tag, cursor, prop):
window.set_cursor(cursor)
for key, val in prop.items():
if val is not None:
tag.set_property(key, val)
GObject.type_register(HyperTextView)
| gpl-3.0 | 1,305,901,743,856,613,400 | 40.131579 | 123 | 0.610578 | false |
reyha/zulip | zerver/views/pointer.py | 11 | 1988 | from __future__ import absolute_import
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from six import text_type
from zerver.decorator import to_non_negative_int
from zerver.lib.actions import do_update_pointer
from zerver.lib.request import has_request_variables, JsonableError, REQ
from zerver.lib.response import json_success
from zerver.lib.utils import statsd, generate_random_token
from zerver.models import UserProfile, Message, UserMessage
def get_pointer_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
return json_success({'pointer': user_profile.pointer})
@has_request_variables
def update_pointer_backend(request, user_profile,
pointer=REQ(converter=to_non_negative_int)):
# type: (HttpRequest, UserProfile, int) -> HttpResponse
if pointer <= user_profile.pointer:
return json_success()
try:
UserMessage.objects.get(
user_profile=user_profile,
message__id=pointer
)
except UserMessage.DoesNotExist:
raise JsonableError(_("Invalid message ID"))
request._log_data["extra"] = "[%s]" % (pointer,)
update_flags = (request.client.name.lower() in ['android', "zulipandroid"])
do_update_pointer(user_profile, pointer, update_flags=update_flags)
return json_success()
def generate_client_id():
# type: () -> text_type
return generate_random_token(32)
def get_profile_backend(request, user_profile):
# type: (HttpRequest, UserProfile) -> HttpResponse
result = dict(pointer = user_profile.pointer,
client_id = generate_client_id(),
max_message_id = -1)
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
result['max_message_id'] = messages[0].id
return json_success(result)
| apache-2.0 | -1,106,144,610,460,456,000 | 35.814815 | 97 | 0.692656 | false |
shellyw19-meet/meet2017y1final-proj | showcase.py | 1 | 13266 | ### Shelly's code
import turtle
import random
import time
SIZE_X=1300
SIZE_Y=750
turtle.setup(SIZE_X,SIZE_Y)
UP_EDGE = SIZE_Y/2
DOWN_EDGE = -SIZE_Y/2
RIGHT_EDGE = SIZE_X/2
LEFT_EDGE = -SIZE_X/2
#how far the snake moves
SQUARE_SIZE=40
pos_list=[]
turtle.tracer(1,0)
#def first_screen():
w = turtle.clone()
turtle.bgcolor("dodgerblue")
turtle.pencolor("yellow")
turtle.pensize(4)
turtle.penup()
turtle.goto(350,350)
turtle.pendown()
turtle.goto(-350,350)
turtle.goto(-350,-350)
turtle.goto(350,-350)
turtle.goto(350,350)
turtle.penup()
turtle.goto(0,0)
turtle.hideturtle()
bb = turtle.clone()
bb.goto(100,100)
up_gif = turtle.clone()
up_gif.hideturtle()
turtle.register_shape("arrowup.gif")
up_gif.shape("arrowup.gif")
down_gif = turtle.clone()
down_gif.hideturtle()
turtle.register_shape("arrowdown.gif")
down_gif.shape("arrowdown.gif")
left_gif = turtle.clone()
left_gif.hideturtle()
turtle.register_shape("arrowleft.gif")
left_gif.shape("arrowleft.gif")
right_gif = turtle.clone()
right_gif.hideturtle()
turtle.register_shape("arrowright.gif")
right_gif.shape("arrowright.gif")
turtle.pencolor("black")
turtle.ht()
w.ht()
w.pu()
w.goto(-115, 300)
w.write("To go UP Press: ", font = ("Ariel",20,"normal"))
w.goto(-120,-210)
w.write("To go DOWN Press: ", font = ("Ariel",20,"normal"))
w.goto(-325,63)
w.write("To go LEFT Press: ", font = ("Ariel",20,"normal"))
w.goto(80,63)
w.write("To go RIGHT Press: ", font = ("Ariel",20,"normal"))
up_gif.hideturtle()
up_gif.goto(0,238)
up_gif.stamp()
down_gif.hideturtle()
down_gif.goto(0,-275)
down_gif.stamp()
left_gif.hideturtle()
left_gif.goto(-275,0)
left_gif.stamp()
right_gif.hideturtle()
right_gif.goto(275,0)
right_gif.stamp()
w.pencolor("aliceblue")
w.goto(-290,-130)
w.write("the game will start in 5 seconds", font = ("Ariel",25,"normal","bold"))
s_score = turtle.clone()
s_score.pencolor('yellow')
u_score = turtle.clone()
u_score.pencolor('yellow')
e_score = turtle.clone()
e_score.pencolor('yellow')
k_score = turtle.clone()
k_score.pencolor('yellow')
##################
start_time = 60##chpse how much time till the game ends
##################
kenya=turtle.clone()
egypt=turtle.clone()
uganda=turtle.clone()
syria=turtle.clone()
kenya.penup()
turtle.register_shape("kenya3.gif")
kenya.shape('kenya3.gif')
egypt.penup()
turtle.register_shape("EGYPT1.gif")
egypt.shape('EGYPT1.gif')
uganda.penup()
turtle.register_shape("uganda3.gif")
uganda.shape('uganda3.gif')
syria.penup()
turtle.register_shape("syria2.gif")
syria.shape('syria2.gif')
UP_ARROW='Up'
LEFT_ARROW='Left'
RIGHT_ARROW='Right'
DOWN_ARROW='Down'
TIME_STEP=100
SPACEBAR='spacebar'
UP=0
DOWN=1
LEFT=2
RIGHT=3
turn=4
direction=UP
end_time= time.time()+120
plane=turtle.clone()
#the shape of the plane
turtle.register_shape("photoplane1.gif")
plane.shape("photoplane1.gif")
turtle.hideturtle()
new_pos = plane.pos()
new_x_pos = new_pos[0]
new_y_pos = new_pos[1]
pizza = turtle.clone()
hamburger = turtle.clone()
water = turtle.clone()
cola = turtle.clone()
def game():
global new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, LEFT_EDGE, RIGHT_EDGE, SIZE_X, SIZE_Y, start_time
##################################
#to Carmi
############
#first_screen()
turtle.bgcolor('dodgerblue')
time.sleep(5)
w.clear()
up_gif.clear()
down_gif.clear()
right_gif.clear()
left_gif.clear()
turtle.pencolor("yellow")
turtle.pensize(4)
turtle.penup()
turtle.goto(350,350)
turtle.pendown()
turtle.goto(-350,350)
turtle.goto(-350,-350)
turtle.goto(350,-350)
turtle.goto(350,350)
turtle.penup()
turtle.goto(0,0)
#####################################################
#maya's code
kenya.showturtle()
kenya.goto(-200,200)
egypt.showturtle()
egypt.goto(-200,00)
uganda.showturtle()
uganda.goto(-200,-160)
syria.showturtle()
syria.goto(100,-160)
###############################################
pizza.hideturtle()
turtle.register_shape("Pizza.gif")
pizza.shape("Pizza.gif")
hamburger.hideturtle()
turtle.register_shape("burger_sandwich2.gif")
hamburger.shape("burger_sandwich2.gif")
water.hideturtle()
turtle.register_shape("water4.gif")
water.shape("water4.gif")
cola.hideturtle()
turtle.register_shape("cocacola7.gif")
cola.shape("cocacola7.gif")
##
##hamburger = "burger_sandwich.gif"
##
##turtle.register_shape(hamburger)
##turtle.shape(hamburger)
##
##water = "water.gif"
##
##turtle.register_shape(water)
##turtle.shape(water)
##
##cola = "cocacola.gif"
##
##turtle.register_shape(cola)
##turtle.shape(cola)
pizza.hideturtle()
pizza.goto(280,280)
a = pizza.stamp()
pizza.showturtle()
cola.hideturtle()
cola.goto(200,280)
cola.stamp()
cola.showturtle()
hamburger.hideturtle()
hamburger.goto(120,280)
print(hamburger.pos())
hamburger.stamp()
hamburger.showturtle()
water.hideturtle()
water.goto(40,280)
print(water.pos())
water.stamp()
water.showturtle()
turtle.penup()
plane.showturtle()
timer()#this is basicly activating he timer
c_food_s()
c_food_u()
c_food_e()
c_food_k()
############################################################################
turtle.onkeypress(game, "space")
turtle.listen()
############################################################################################
#####eliass code
#carmis code
food_list = ["hamburger", "pizza", "cola", "water"]
def r_food():
rand_index = random.randint(0, 3)
return food_list[rand_index]
def up():
global direction, new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, RIGHT_EDGE, LEFT_EDGE
direction=UP
new_x_pos = plane.pos()[0]
new_y_pos = plane.pos()[1]
if new_y_pos < UP_EDGE: #and new_y_pos > DOWN_EDGE and new_x_pos < RIGHT_EDGE and new_x_pos > LEFT_EDGE:
move_plane()
print("you pressed up ")
def down():
global direction, new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, RIGHT_EDGE, LEFT_EDGE
direction=DOWN
new_x_pos = plane.pos()[0]
new_y_pos = plane.pos()[1]
if new_y_pos > DOWN_EDGE: #and new_y_pos < UP_EDGEand new_x_pos < RIGHT_EDGE and new_x_pos > LEFT_EDGE:
move_plane()
print("you pressed DOWN ")
def right():
global direction, new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, RIGHT_EDGE, LEFT_EDGE
direction=RIGHT
new_x_pos = plane.pos()[0]
new_y_pos = plane.pos()[1]
if new_x_pos < RIGHT_EDGE: # and new_y_pos < UP_EDGE and new_y_pos > DOWN_EDGE and new_x_pos > LEFT_EDGE:
move_plane()
print("you pressed RIGHT ")
def left():
global direction, new_x_pos, new_y_pos, UP_EDGE, DOWN_EDGE, RIGHT_EDGE, LEFT_EDGE
direction=LEFT
new_x_pos = plane.pos()[0]
new_y_pos = plane.pos()[1]
if new_x_pos > LEFT_EDGE: #and new_y_pos < UP_EDGE and new_y_pos > DOWN_EDGE and new_x_pos < RIGHT_EDGE:
move_plane()
print("you pressed LEFT ")
def turn():
global direction
direction=turn
turtle.right(90)
turtle.onkeypress(up,UP_ARROW)
turtle.onkeypress(right,RIGHT_ARROW)
turtle.onkeypress(left,LEFT_ARROW)
turtle.onkeypress(down,DOWN_ARROW)
turtle.listen()
turtle.goto(200,0)#this is moing the turtle to 200 to write the timer
def timer():#the game timer
global start_time
turtle.goto(-330,310)
turtle.pencolor("navy")
start_time = start_time-1
print(start_time)
turtle.clear()
turtle.write(start_time,font = ("Ariel",23,"normal","bold"))
if start_time==0:
plane.clear()
hamburger.clear()
pizza.clear()
water.clear()
cola.clear()
uganda.clear()
kenya.clear()
egypt.clear()
syria.clear()
score_1.clear()
bb.clear()
u_score.clear()
s_score.clear()
k_score.clear()
e_score.clear()
pizza.clearstamps()
pizza.hideturtle()
cola.clearstamps()
cola.hideturtle()
hamburger.clearstamps()
hamburger.hideturtle()
water.clearstamps()
water.hideturtle()
syria.clearstamps()
syria.hideturtle()
uganda.clearstamps()
uganda.hideturtle()
kenya.clearstamps()
kenya.hideturtle()
egypt.clearstamps()
egypt.hideturtle()
plane.clearstamps()
plane.hideturtle()
turtle.clear()
turtle.bgcolor("dodgerblue")
turtle.pencolor("yellow")
turtle.hideturtle()
turtle.pensize(4)
turtle.penup()
turtle.goto(350,350)
turtle.pendown()
turtle.goto(-350,350)
turtle.goto(-350,-350)
turtle.goto(350,-350)
turtle.goto(350,350)
turtle.penup()
turtle.goto(0,0)
turtle.hideturtle()
turtle.goto(-235,170)
turtle.pencolor("navy")
turtle.write("You ran out of time!", font = ("Ariel",35,"normal"))
turtle.goto(-150,50)
turtle.pencolor("floralwhite")
turtle.write("Your score was: " + str(score), font = ("Ariel",25,"normal"))
turtle.goto(-320,-162)
turtle.pencolor("darkslategray")
turtle.write("GAME OVER :(" , font = ("Ariel",62,"normal","bold"))
time.sleep(5)
quit()
print("you run out of time ")
turtle.ontimer(timer,1000)
def c_food_s():
global s_score, syria_food
syria_food = r_food()
s_score.goto(173,-145)
s_score.clear()
s_score.write('We want ' + syria_food, font = ("Ariel",11,"normal"))
#return syria_food
def c_food_u():
global u_score, uganda_food
uganda_food = r_food()
u_score.goto(-135,-145)
u_score.clear()
u_score.write('We want ' + uganda_food, font = ("Ariel",11,"normal"))
#return uganda_food
def c_food_e():
global e_score, egypt_food
egypt_food = r_food()
e_score.goto(-135,10)
e_score.clear()
e_score.write('We want ' + egypt_food, font = ("Ariel",11,"normal"))
#return egypt_food
def c_food_k():
global k_score, kenya_food
kenya_food = r_food()
k_score.goto(-135,230)
k_score.clear()
k_score.write('We want ' + kenya_food, font = ("Ariel",11,"normal"))
#return kenya_food
score = 0
plane_food = 'aa'
score_1 = turtle.clone()
score_1.color('white')
score_1.goto(300,0)
def move_plane():#how the plane moves
global plane_food, score
my_pos=plane.pos()
x_pos=my_pos[0]
y_pos=my_pos[1]
if direction==RIGHT:
plane.goto(x_pos+SQUARE_SIZE,y_pos)
if direction==DOWN:
plane.goto(x_pos,y_pos-SQUARE_SIZE)
if direction==LEFT:
plane.goto(x_pos-SQUARE_SIZE,y_pos)
if direction==UP:
plane.goto(x_pos,y_pos+SQUARE_SIZE)
if plane.pos() == hamburger.pos():
bb.clear()
plane_food = 'hamburger'
bb.write('you picked up hamburger',font = ("Ariel",20,"normal"))
if plane.pos()== cola.pos():
bb.clear()
plane_food = 'cola'
bb.write('you picked up cola',font = ("Ariel",20,"normal"))
if plane.pos() == pizza.pos():
bb.clear()
plane_food = 'pizza'
bb.write('you picked up pizza',font = ("Ariel",20,"normal"))
if plane.pos() == water.pos():
bb.clear()
plane_food = 'water'
bb.write('you picked up water',font = ("Ariel",20,"normal"))
print("syria food: " + syria_food)
print("plane_food: " + plane_food)
print("plane_pos: " + str(plane.pos()))
#if plane.pos() == syria.pos() and plane_food == syria_food:
if ((plane.pos()[0] - syria.pos()[0])**2 + (plane.pos()[1] - syria.pos()[1])**2)**0.5 < 50 and plane_food == syria_food:
score = score+1
score_1.clear()
score_1.write('score: ' + str(score),font = ("Ariel",20,"normal", 'bold'))
c_food_s()
#if plane.pos() == uganda.pos() and plane_food == uganda_food:
if ((plane.pos()[0] - uganda.pos()[0])**2 + (plane.pos()[1] - uganda.pos()[1])**2)**0.5 < 50 and plane_food == uganda_food:
score = score+1
score_1.clear()
score_1.write('score: ' + str(score),font = ("Ariel",20,"normal", 'bold'))
c_food_u()
#if plane.pos() == egypt.pos() and plane_food == egypt_food:
if ((plane.pos()[0] - egypt.pos()[0])**2 + (plane.pos()[1] - egypt.pos()[1])**2)**0.5 < 50 and plane_food == egypt_food:
score = score+1
score_1.clear()
score_1.write('score: ' + str(score),font = ("Ariel",20,"normal", 'bold'))
c_food_e()
#if plane.pos() == kenya.pos() and plane_food == kenya_food:
if ((plane.pos()[0] - kenya.pos()[0])**2 + (plane.pos()[1] - kenya.pos()[1])**2)**0.5 < 50 and plane_food == kenya_food:
score = score+1
score_1.clear()
score_1.write('score: ' + str(score),font = ("Ariel",20,"normal", 'bold'))
c_food_k()
game()
clear_list = []
turtle.goto(0, 0)
##
##turtle.ontimer(c_food_u ,1200)
##turtle.ontimer(c_food_s, 900)
##turtle.ontimer(c_food_e ,1500)
##turtle.ontimer(c_food_k ,1700)
| mit | 2,420,007,856,340,504,000 | 23.612245 | 127 | 0.586386 | false |
synconics/odoo | addons/account_payment/__init__.py | 436 | 1279 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#----------------------------------------------------------
# Init Sales
#----------------------------------------------------------
import account_payment
import wizard
import account_move_line
import account_invoice
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,362,979,987,094,659,000 | 38.96875 | 78 | 0.573104 | false |
ensemblr/llvm-project-boilerplate | include/llvm/utils/lit/lit/util.py | 3 | 9754 | import errno
import itertools
import math
import os
import platform
import signal
import subprocess
import sys
import threading
def to_bytes(str):
# Encode to UTF-8 to get binary data.
return str.encode('utf-8')
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return to_bytes(bytes)
def convert_string(bytes):
try:
return to_string(bytes.decode('utf-8'))
except AttributeError: # 'str' object has no attribute 'decode'.
return str(bytes)
except UnicodeError:
return str(bytes)
def detectCPUs():
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(capture(['sysctl', '-n', 'hw.ncpu']))
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
# With more than 32 processes, process creation often fails with
# "Too many open files". FIXME: Check if there's a better fix.
return min(ncpus, 32)
return 1 # Default
def mkdir_p(path):
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
will also make directories for any missing parent directories."""
if not path or os.path.exists(path):
return
parent = os.path.dirname(path)
if parent != path:
mkdir_p(parent)
try:
os.mkdir(path)
except OSError:
e = sys.exc_info()[1]
# Ignore EEXIST, which may occur during a race condition.
if e.errno != errno.EEXIST:
raise
def capture(args, env=None):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output. Raises a CalledProcessError if the command
exits with a non-zero status."""
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
out = convert_string(out)
err = convert_string(err)
if p.returncode != 0:
raise subprocess.CalledProcessError(cmd=args,
returncode=p.returncode,
output="{}\n{}".format(out, err))
return out
def which(command, paths = None):
"""which(command, [paths]) - Look up the given command in the paths string
(or the PATH environment variable, if unspecified)."""
if paths is None:
paths = os.environ.get('PATH','')
# Check for absolute match first.
if os.path.isfile(command):
return command
# Would be nice if Python had a lib function for this.
if not paths:
paths = os.defpath
# Get suffixes to search.
# On Cygwin, 'PATHEXT' may exist but it should not be used.
if os.pathsep == ';':
pathext = os.environ.get('PATHEXT', '').split(';')
else:
pathext = ['']
# Search the paths...
for path in paths.split(os.pathsep):
for ext in pathext:
p = os.path.join(path, command + ext)
if os.path.exists(p) and not os.path.isdir(p):
return p
return None
def checkToolsPath(dir, tools):
for tool in tools:
if not os.path.exists(os.path.join(dir, tool)):
return False
return True
def whichTools(tools, paths):
for path in paths.split(os.pathsep):
if checkToolsPath(path, tools):
return path
return None
def printHistogram(items, title = 'Items'):
items.sort(key = lambda item: item[1])
maxValue = max([v for _,v in items])
# Select first "nice" bar height that produces more than 10 bars.
power = int(math.ceil(math.log(maxValue, 10)))
for inc in itertools.cycle((5, 2, 2.5, 1)):
barH = inc * 10**power
N = int(math.ceil(maxValue / barH))
if N > 10:
break
elif inc == 1:
power -= 1
histo = [set() for i in range(N)]
for name,v in items:
bin = min(int(N * v/maxValue), N-1)
histo[bin].add(name)
barW = 40
hr = '-' * (barW + 34)
print('\nSlowest %s:' % title)
print(hr)
for name,value in items[-20:]:
print('%.2fs: %s' % (value, name))
print('\n%s Times:' % title)
print(hr)
pDigits = int(math.ceil(math.log(maxValue, 10)))
pfDigits = max(0, 3-pDigits)
if pfDigits:
pDigits += pfDigits + 1
cDigits = int(math.ceil(math.log(len(items), 10)))
print("[%s] :: [%s] :: [%s]" % ('Range'.center((pDigits+1)*2 + 3),
'Percentage'.center(barW),
'Count'.center(cDigits*2 + 1)))
print(hr)
for i,row in enumerate(histo):
pct = float(len(row)) / len(items)
w = int(barW * pct)
print("[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]" % (
pDigits, pfDigits, i*barH, pDigits, pfDigits, (i+1)*barH,
'*'*w, ' '*(barW-w), cDigits, len(row), cDigits, len(items)))
class ExecuteCommandTimeoutException(Exception):
def __init__(self, msg, out, err, exitCode):
assert isinstance(msg, str)
assert isinstance(out, str)
assert isinstance(err, str)
assert isinstance(exitCode, int)
self.msg = msg
self.out = out
self.err = err
self.exitCode = exitCode
# Close extra file handles on UNIX (on Windows this cannot be done while
# also redirecting input).
kUseCloseFDs = not (platform.system() == 'Windows')
def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
"""
Execute command ``command`` (list of arguments or string)
with
* working directory ``cwd`` (str), use None to use the current
working directory
* environment ``env`` (dict), use None for none
* Input to the command ``input`` (str), use string to pass
no input.
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
Returns a tuple (out, err, exitCode) where
* ``out`` (str) is the standard output of running the command
* ``err`` (str) is the standard error of running the command
* ``exitCode`` (int) is the exitCode of running the command
If the timeout is hit an ``ExecuteCommandTimeoutException``
is raised.
"""
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env, close_fds=kUseCloseFDs)
timerObject = None
# FIXME: Because of the way nested function scopes work in Python 2.x we
# need to use a reference to a mutable object rather than a plain
# bool. In Python 3 we could use the "nonlocal" keyword but we need
# to support Python 2 as well.
hitTimeOut = [False]
try:
if timeout > 0:
def killProcess():
# We may be invoking a shell so we need to kill the
# process and all its children.
hitTimeOut[0] = True
killProcessAndChildren(p.pid)
timerObject = threading.Timer(timeout, killProcess)
timerObject.start()
out,err = p.communicate(input=input)
exitCode = p.wait()
finally:
if timerObject != None:
timerObject.cancel()
# Ensure the resulting output is always of string type.
out = convert_string(out)
err = convert_string(err)
if hitTimeOut[0]:
raise ExecuteCommandTimeoutException(
msg='Reached timeout of {} seconds'.format(timeout),
out=out,
err=err,
exitCode=exitCode
)
# Detect Ctrl-C in subprocess.
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def usePlatformSdkOnDarwin(config, lit_config):
# On Darwin, support relocatable SDKs by providing Clang with a
# default system root path.
if 'darwin' in config.target_triple:
try:
cmd = subprocess.Popen(['xcrun', '--show-sdk-path', '--sdk', 'macosx'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = cmd.communicate()
out = out.strip()
res = cmd.wait()
except OSError:
res = -1
if res == 0 and out:
sdk_path = out
lit_config.note('using SDKROOT: %r' % sdk_path)
config.environment['SDKROOT'] = sdk_path
def killProcessAndChildren(pid):
"""
This function kills a process with ``pid`` and all its
running children (recursively). It is currently implemented
using the psutil module which provides a simple platform
neutral implementation.
TODO: Reimplement this without using psutil so we can
remove our dependency on it.
"""
import psutil
try:
psutilProc = psutil.Process(pid)
# Handle the different psutil API versions
try:
# psutil >= 2.x
children_iterator = psutilProc.children(recursive=True)
except AttributeError:
# psutil 1.x
children_iterator = psutilProc.get_children(recursive=True)
for child in children_iterator:
try:
child.kill()
except psutil.NoSuchProcess:
pass
psutilProc.kill()
except psutil.NoSuchProcess:
pass
| mit | -5,832,508,073,166,578,000 | 32.290102 | 83 | 0.580275 | false |
snava10/sqlRunner | websqlrunner/websqlrunner/views.py | 1 | 3365 | import datetime
import re
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.shortcuts import redirect
from .core.sqlRunner import *
from .core.SqlRunnerThread import *
from .forms import SqlScriptForm
from .forms import RunForm
from .models import SqlScript
from .models import Run
def homepage(request):
if request.method == "POST":
print(request.FILES)
if request.FILES:
print("Files arrived to the server")
form = SqlScriptForm(request.POST, request.FILES)
if form.is_valid():
print("Valid")
sqlscript = form.save(commit=False)
sqlscript.createdby = request.user
sqlscript.save()
return redirect(scripts)
else:
form = SqlScriptForm()
return render(request, "homepage.html", { "form": form })
def scripts(request):
scripts = SqlScript.objects.all()
context = { "scripts" : scripts }
return render(request, "scripts.html", context)
def runs(request):
run_models = Run.objects.all()
context = { "run_models": run_models }
return render(request, "runs.html", context)
def create_run(request, script_id):
script = SqlScript.objects.get(pk=script_id)
form = RunForm(initial={script:script})
context = { "form" : form, "filename" : script.file.name.split('/')[-1] }
return render(request, "run.html", context)
def run(request, script_id):
script = SqlScript.objects.get(pk=script_id)
if request.method == "POST":
form = RunForm(request.POST)
if form.is_valid():
run_model = form.save(commit=False)
run_model.date = datetime.datetime.now()
run_model.user = request.user
run_model.status = "R"
run_model.script = script
run_model.save()
#trigger the script excecution
run_script(script, run_model)
#redirect to the list of runs
return redirect(runs)
else:
return render(request, "run.html", { "form": form, "filename": script.get_file_name() })
form = RunForm()
return render(request, "run.html", { "form": form, "filename": script.get_file_name() })
def run_script(script, run_model):
def success(context):
if context:
run_id = context["runid"]
rmodel = Run.objects.get(pk=run_id)
rmodel.status = "S"
rmodel.save()
def failed(context):
if context:
run_id = context["runid"]
rmodel = Run.objects.get(pk=run_id)
rmodel.status = "F"
rmodel.save()
sql = script.file.read()
conn_strings = list(map(str.strip, run_model.connstrings.split('\n')))
thread_count = 1
threads = []
for conn_string in conn_strings:
sql_runner = SqlRunner.from_sql_server_connection_string(conn_string)
runner_thread = SqlRunnerThread.from_sqlrunner(sql_runner, sql, "thread-%d" % thread_count,
"thread-%d" % thread_count,thread_count)
threads.append(runner_thread)
runner_thread.success_function = success
runner_thread.failed_function = failed
runner_thread.context = { "runid": run_model.id }
runner_thread.start()
| apache-2.0 | 5,661,288,072,208,293,000 | 32.65 | 100 | 0.614264 | false |
liamf/suds | suds/bindings/binding.py | 191 | 19047 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides classes for (WS) SOAP bindings.
"""
from logging import getLogger
from suds import *
from suds.sax import Namespace
from suds.sax.parser import Parser
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sudsobject import Factory, Object
from suds.mx import Content
from suds.mx.literal import Literal as MxLiteral
from suds.umx.basic import Basic as UmxBasic
from suds.umx.typed import Typed as UmxTyped
from suds.bindings.multiref import MultiRef
from suds.xsd.query import TypeQuery, ElementQuery
from suds.xsd.sxbasic import Element as SchemaElement
from suds.options import Options
from suds.plugin import PluginContainer
from copy import deepcopy
log = getLogger(__name__)
envns = ('SOAP-ENV', 'http://schemas.xmlsoap.org/soap/envelope/')
class Binding:
"""
The soap binding class used to process outgoing and imcoming
soap messages per the WSDL port binding.
@cvar replyfilter: The reply filter function.
@type replyfilter: (lambda s,r: r)
@ivar wsdl: The wsdl.
@type wsdl: L{suds.wsdl.Definitions}
@ivar schema: The collective schema contained within the wsdl.
@type schema: L{xsd.schema.Schema}
@ivar options: A dictionary options.
@type options: L{Options}
"""
replyfilter = (lambda s,r: r)
def __init__(self, wsdl):
"""
@param wsdl: A wsdl.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.multiref = MultiRef()
def schema(self):
return self.wsdl.schema
def options(self):
return self.wsdl.options
def unmarshaller(self, typed=True):
"""
Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped}
"""
if typed:
return UmxTyped(self.schema())
else:
return UmxBasic()
def marshaller(self):
"""
Get the appropriate XML encoder.
@return: An L{MxLiteral} marshaller.
@rtype: L{MxLiteral}
"""
return MxLiteral(self.schema(), self.options().xstq)
def param_defs(self, method):
"""
Get parameter definitions.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A servic emethod.
@type method: I{service.Method}
@return: A collection of parameter definitions
@rtype: [I{pdef},..]
"""
raise Exception, 'not implemented'
def get_message(self, method, args, kwargs):
"""
Get the soap message for the specified method, args and soapheaders.
This is the entry point for creating the outbound soap message.
@param method: The method being invoked.
@type method: I{service.Method}
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The soap envelope.
@rtype: L{Document}
"""
content = self.headercontent(method)
header = self.header(content)
content = self.bodycontent(method, args, kwargs)
body = self.body(content)
env = self.envelope(header, body)
if self.options().prefixes:
body.normalizePrefixes()
env.promotePrefixes()
else:
env.refitPrefixes()
return Document(env)
def get_reply(self, method, reply):
"""
Process the I{reply} for the specified I{method} by sax parsing the I{reply}
and then unmarshalling into python object(s).
@param method: The name of the invoked method.
@type method: str
@param reply: The reply XML received after invoking the specified method.
@type reply: str
@return: The unmarshalled reply. The returned value is an L{Object} for a
I{list} depending on whether the service returns a single object or a
collection.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
replyroot = sax.parse(string=reply)
plugins = PluginContainer(self.options().plugins)
plugins.message.parsed(reply=replyroot)
soapenv = replyroot.getChild('Envelope')
soapenv.promotePrefixes()
soapbody = soapenv.getChild('Body')
self.detect_fault(soapbody)
soapbody = self.multiref.process(soapbody)
nodes = self.replycontent(method, soapbody)
rtypes = self.returned_types(method)
if len(rtypes) > 1:
result = self.replycomposite(rtypes, nodes)
return (replyroot, result)
if len(rtypes) == 1:
if rtypes[0].unbounded():
result = self.replylist(rtypes[0], nodes)
return (replyroot, result)
if len(nodes):
unmarshaller = self.unmarshaller()
resolved = rtypes[0].resolve(nobuiltin=True)
result = unmarshaller.process(nodes[0], resolved)
return (replyroot, result)
return (replyroot, None)
def detect_fault(self, body):
"""
Detect I{hidden} soapenv:Fault element in the soap body.
@param body: The soap envelope body.
@type body: L{Element}
@raise WebFault: When found.
"""
fault = body.getChild('Fault', envns)
if fault is None:
return
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, fault)
return self
def replylist(self, rt, nodes):
"""
Construct a I{list} reply. This mehod is called when it has been detected
that the reply is a list.
@param rt: The return I{type}.
@type rt: L{suds.xsd.sxbase.SchemaObject}
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: A list of I{unmarshalled} objects.
@rtype: [L{Object},...]
"""
result = []
resolved = rt.resolve(nobuiltin=True)
unmarshaller = self.unmarshaller()
for node in nodes:
sobject = unmarshaller.process(node, resolved)
result.append(sobject)
return result
def replycomposite(self, rtypes, nodes):
"""
Construct a I{composite} reply. This method is called when it has been
detected that the reply has multiple root nodes.
@param rtypes: A list of known return I{types}.
@type rtypes: [L{suds.xsd.sxbase.SchemaObject},...]
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: The I{unmarshalled} composite object.
@rtype: L{Object},...
"""
dictionary = {}
for rt in rtypes:
dictionary[rt.name] = rt
unmarshaller = self.unmarshaller()
composite = Factory.object('reply')
for node in nodes:
tag = node.name
rt = dictionary.get(tag, None)
if rt is None:
if node.get('id') is None:
raise Exception('<%s/> not mapped to message part' % tag)
else:
continue
resolved = rt.resolve(nobuiltin=True)
sobject = unmarshaller.process(node, resolved)
value = getattr(composite, tag, None)
if value is None:
if rt.unbounded():
value = []
setattr(composite, tag, value)
value.append(sobject)
else:
setattr(composite, tag, sobject)
else:
if not isinstance(value, list):
value = [value,]
setattr(composite, tag, value)
value.append(sobject)
return composite
def get_fault(self, reply):
"""
Extract the fault from the specified soap reply. If I{faults} is True, an
exception is raised. Otherwise, the I{unmarshalled} fault L{Object} is
returned. This method is called when the server raises a I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
faultroot = sax.parse(string=reply)
soapenv = faultroot.getChild('Envelope')
soapbody = soapenv.getChild('Body')
fault = soapbody.getChild('Fault')
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, faultroot)
return (faultroot, p.detail)
def mkparam(self, method, pdef, object):
"""
Builds a parameter for the specified I{method} using the parameter
definition (pdef) and the specified value (object).
@param method: A method name.
@type method: str
@param pdef: A parameter definition.
@type pdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The parameter value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
content = \
Content(tag=pdef[0],
value=object,
type=pdef[1],
real=pdef[1].resolve())
return marshaller.process(content)
def mkheader(self, method, hdef, object):
"""
Builds a soapheader for the specified I{method} using the header
definition (hdef) and the specified value (object).
@param method: A method name.
@type method: str
@param hdef: A header definition.
@type hdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The header value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkheader(method, hdef, item))
return tags
content = Content(tag=hdef[0], value=object, type=hdef[1])
return marshaller.process(content)
def envelope(self, header, body):
"""
Build the B{<Envelope/>} for an soap outbound message.
@param header: The soap message B{header}.
@type header: L{Element}
@param body: The soap message B{body}.
@type body: L{Element}
@return: The soap envelope containing the body and header.
@rtype: L{Element}
"""
env = Element('Envelope', ns=envns)
env.addPrefix(Namespace.xsins[0], Namespace.xsins[1])
env.append(header)
env.append(body)
return env
def header(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The header content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
header = Element('Header', ns=envns)
header.append(content)
return header
def bodycontent(self, method, args, kwargs):
"""
Get the content for the soap I{body} node.
@param method: A service method.
@type method: I{service.Method}
@param args: method parameter values
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
raise Exception, 'not implemented'
def headercontent(self, method):
"""
Get the content for the soap I{Header} node.
@param method: A service method.
@type method: I{service.Method}
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
n = 0
content = []
wsse = self.options().wsse
if wsse is not None:
content.append(wsse.xml())
headers = self.options().soapheaders
if not isinstance(headers, (tuple,list,dict)):
headers = (headers,)
if len(headers) == 0:
return content
pts = self.headpart_types(method)
if isinstance(headers, (tuple,list)):
for header in headers:
if isinstance(header, Element):
content.append(deepcopy(header))
continue
if len(pts) == n: break
h = self.mkheader(method, pts[n], header)
ns = pts[n][1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
n += 1
else:
for pt in pts:
header = headers.get(pt[0])
if header is None:
continue
h = self.mkheader(method, pt, header)
ns = pt[1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
return content
def replycontent(self, method, body):
"""
Get the reply body content.
@param method: A service method.
@type method: I{service.Method}
@param body: The soap body
@type body: L{Element}
@return: the body content
@rtype: [L{Element},...]
"""
raise Exception, 'not implemented'
def body(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The body content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
body = Element('Body', ns=envns)
body.append(content)
return body
def bodypart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
parts = method.soap.input.body.parts
else:
parts = method.soap.output.body.parts
for p in parts:
if p.element is not None:
query = ElementQuery(p.element)
else:
query = TypeQuery(p.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if p.type is not None:
pt = PartElement(p.name, pt)
if input:
if pt.name is None:
result.append((p.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def headpart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
headers = method.soap.input.headers
else:
headers = method.soap.output.headers
for header in headers:
part = header.part
if part.element is not None:
query = ElementQuery(part.element)
else:
query = TypeQuery(part.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if part.type is not None:
pt = PartElement(part.name, pt)
if input:
if pt.name is None:
result.append((part.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def returned_types(self, method):
"""
Get the L{xsd.sxbase.SchemaObject} returned by the I{method}.
@param method: A service method.
@type method: I{service.Method}
@return: The name of the type return by the method.
@rtype: [I{rtype},..]
"""
result = []
for rt in self.bodypart_types(method, input=False):
result.append(rt)
return result
class PartElement(SchemaElement):
"""
A part used to represent a message part when the part
references a schema type and thus assumes to be an element.
@ivar resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
def __init__(self, name, resolved):
"""
@param name: The part name.
@type name: str
@param resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
root = Element('element', ns=Namespace.xsdns)
SchemaElement.__init__(self, resolved.schema, root)
self.__resolved = resolved
self.name = name
self.form_qualified = False
def implany(self):
return self
def optional(self):
return True
def namespace(self, prefix=None):
return Namespace.default
def resolve(self, nobuiltin=False):
if nobuiltin and self.__resolved.builtin():
return self
else:
return self.__resolved
| lgpl-3.0 | -8,751,945,274,539,019,000 | 34.405204 | 87 | 0.569434 | false |
wenjoy/homePage | node_modules/geetest/node_modules/request/node_modules/karma/node_modules/optimist/node_modules/tap/node_modules/yamlish/yamlish-py/test/__init__.py | 161 | 3430 | # -*- coding: utf-8 -*- IGNORE:C0111
from __future__ import absolute_import, print_function, unicode_literals
import logging
import yamlish
import yaml
import tempfile
import textwrap
INPUT = 1
OUTPUT = 2
if yamlish.py3k:
unicode = str
#logging.basicConfig(level=logging.DEBUG)
def _generate_test_name(source):
"""
Clean up human-friendly test name into a method name.
"""
out = source.replace(' ', '_').replace(':', '').replace(',', '').lower()
return "test_%s" % out
def _create_input_test(test_src, tested_function, options=None):
"""
Decorate tested function to be used as a method for TestCase.
"""
def do_test_expected(self):
"""
Execute a test by calling a tested_function on test_src data.
"""
self.maxDiff = None
got = ""
if 'error' in test_src:
self.assertRaises(test_src['error'], tested_function,
test_src['in'], options)
else:
want = test_src['out']
got = tested_function(test_src['in'], options)
logging.debug('got = type %s', type(got))
logging.debug("test_src['out'] = %s",
unicode(test_src['out']))
self.assertEqual(got, want, """Result matches
expected = %s
observed = %s
""" % (want, got))
return do_test_expected
def _create_output_test(test_src, tested_function, options=None):
"""
Decorate tested function to be used as a method for TestCase.
"""
def do_test_expected(self):
"""
Execute a test by calling a tested_function on test_src data.
"""
self.maxDiff = None
# We currently don't throw any exceptions in Writer, so this
# this is always false
if 'error' in test_src:
self.assertRaises(test_src['error'], yamlish.dumps,
test_src['in'], options)
else:
logging.debug("out:\n%s", textwrap.dedent(test_src['out']))
want = yaml.load(textwrap.dedent(test_src['out']))
logging.debug("want:\n%s", want)
with tempfile.NamedTemporaryFile() as test_file:
tested_function(test_src['in'], test_file)
test_file.seek(0)
got_str = test_file.read()
logging.debug("got_str = %s", got_str)
got = yaml.load(got_str)
self.assertEqual(got, want, "Result matches")
return do_test_expected
def generate_testsuite(test_data, test_case_shell, test_fce, direction=INPUT,
options=None):
"""
Generate tests from the test data, class to build upon and function
to use for testing.
"""
for in_test in test_data:
if ('skip' in in_test) and in_test['skip']:
logging.debug("test %s skipped!", in_test['name'])
continue
name = _generate_test_name(in_test['name'])
if direction == INPUT:
test_method = _create_input_test(in_test, test_fce,
options=options)
elif direction == OUTPUT:
test_method = _create_output_test(in_test, test_fce,
options=options)
test_method.__name__ = str('test_%s' % name)
setattr(test_case_shell, test_method.__name__, test_method)
| mit | 7,560,231,898,189,568,000 | 32.300971 | 77 | 0.549854 | false |
pcarrier/linux | tools/perf/scripts/python/compaction-times.py | 958 | 7950 | # report time spent in compaction
# Licensed under the terms of the GNU GPL License version 2
# testing:
# 'echo 1 > /proc/sys/vm/compact_memory' to force compaction of all zones
import os
import sys
import re
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
usage = "usage: perf script report compaction-times.py -- [-h] [-u] [-p|-pv] [-t | [-m] [-fs] [-ms]] [pid|pid-range|comm-regex]\n"
class popt:
DISP_DFL = 0
DISP_PROC = 1
DISP_PROC_VERBOSE=2
class topt:
DISP_TIME = 0
DISP_MIG = 1
DISP_ISOLFREE = 2
DISP_ISOLMIG = 4
DISP_ALL = 7
class comm_filter:
def __init__(self, re):
self.re = re
def filter(self, pid, comm):
m = self.re.search(comm)
return m == None or m.group() == ""
class pid_filter:
def __init__(self, low, high):
self.low = (0 if low == "" else int(low))
self.high = (0 if high == "" else int(high))
def filter(self, pid, comm):
return not (pid >= self.low and (self.high == 0 or pid <= self.high))
def set_type(t):
global opt_disp
opt_disp = (t if opt_disp == topt.DISP_ALL else opt_disp|t)
def ns(sec, nsec):
return (sec * 1000000000) + nsec
def time(ns):
return "%dns" % ns if opt_ns else "%dus" % (round(ns, -3) / 1000)
class pair:
def __init__(self, aval, bval, alabel = None, blabel = None):
self.alabel = alabel
self.blabel = blabel
self.aval = aval
self.bval = bval
def __add__(self, rhs):
self.aval += rhs.aval
self.bval += rhs.bval
return self
def __str__(self):
return "%s=%d %s=%d" % (self.alabel, self.aval, self.blabel, self.bval)
class cnode:
def __init__(self, ns):
self.ns = ns
self.migrated = pair(0, 0, "moved", "failed")
self.fscan = pair(0,0, "scanned", "isolated")
self.mscan = pair(0,0, "scanned", "isolated")
def __add__(self, rhs):
self.ns += rhs.ns
self.migrated += rhs.migrated
self.fscan += rhs.fscan
self.mscan += rhs.mscan
return self
def __str__(self):
prev = 0
s = "%s " % time(self.ns)
if (opt_disp & topt.DISP_MIG):
s += "migration: %s" % self.migrated
prev = 1
if (opt_disp & topt.DISP_ISOLFREE):
s += "%sfree_scanner: %s" % (" " if prev else "", self.fscan)
prev = 1
if (opt_disp & topt.DISP_ISOLMIG):
s += "%smigration_scanner: %s" % (" " if prev else "", self.mscan)
return s
def complete(self, secs, nsecs):
self.ns = ns(secs, nsecs) - self.ns
def increment(self, migrated, fscan, mscan):
if (migrated != None):
self.migrated += migrated
if (fscan != None):
self.fscan += fscan
if (mscan != None):
self.mscan += mscan
class chead:
heads = {}
val = cnode(0);
fobj = None
@classmethod
def add_filter(cls, filter):
cls.fobj = filter
@classmethod
def create_pending(cls, pid, comm, start_secs, start_nsecs):
filtered = 0
try:
head = cls.heads[pid]
filtered = head.is_filtered()
except KeyError:
if cls.fobj != None:
filtered = cls.fobj.filter(pid, comm)
head = cls.heads[pid] = chead(comm, pid, filtered)
if not filtered:
head.mark_pending(start_secs, start_nsecs)
@classmethod
def increment_pending(cls, pid, migrated, fscan, mscan):
head = cls.heads[pid]
if not head.is_filtered():
if head.is_pending():
head.do_increment(migrated, fscan, mscan)
else:
sys.stderr.write("missing start compaction event for pid %d\n" % pid)
@classmethod
def complete_pending(cls, pid, secs, nsecs):
head = cls.heads[pid]
if not head.is_filtered():
if head.is_pending():
head.make_complete(secs, nsecs)
else:
sys.stderr.write("missing start compaction event for pid %d\n" % pid)
@classmethod
def gen(cls):
if opt_proc != popt.DISP_DFL:
for i in cls.heads:
yield cls.heads[i]
@classmethod
def str(cls):
return cls.val
def __init__(self, comm, pid, filtered):
self.comm = comm
self.pid = pid
self.val = cnode(0)
self.pending = None
self.filtered = filtered
self.list = []
def __add__(self, rhs):
self.ns += rhs.ns
self.val += rhs.val
return self
def mark_pending(self, secs, nsecs):
self.pending = cnode(ns(secs, nsecs))
def do_increment(self, migrated, fscan, mscan):
self.pending.increment(migrated, fscan, mscan)
def make_complete(self, secs, nsecs):
self.pending.complete(secs, nsecs)
chead.val += self.pending
if opt_proc != popt.DISP_DFL:
self.val += self.pending
if opt_proc == popt.DISP_PROC_VERBOSE:
self.list.append(self.pending)
self.pending = None
def enumerate(self):
if opt_proc == popt.DISP_PROC_VERBOSE and not self.is_filtered():
for i, pelem in enumerate(self.list):
sys.stdout.write("%d[%s].%d: %s\n" % (self.pid, self.comm, i+1, pelem))
def is_pending(self):
return self.pending != None
def is_filtered(self):
return self.filtered
def display(self):
if not self.is_filtered():
sys.stdout.write("%d[%s]: %s\n" % (self.pid, self.comm, self.val))
def trace_end():
sys.stdout.write("total: %s\n" % chead.str())
for i in chead.gen():
i.display(),
i.enumerate()
def compaction__mm_compaction_migratepages(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, nr_migrated, nr_failed):
chead.increment_pending(common_pid,
pair(nr_migrated, nr_failed), None, None)
def compaction__mm_compaction_isolate_freepages(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, pair(nr_scanned, nr_taken), None)
def compaction__mm_compaction_isolate_migratepages(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, start_pfn, end_pfn, nr_scanned, nr_taken):
chead.increment_pending(common_pid,
None, None, pair(nr_scanned, nr_taken))
def compaction__mm_compaction_end(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, zone_start, migrate_start, free_start, zone_end,
sync, status):
chead.complete_pending(common_pid, common_secs, common_nsecs)
def compaction__mm_compaction_begin(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, zone_start, migrate_start, free_start, zone_end,
sync):
chead.create_pending(common_pid, common_comm, common_secs, common_nsecs)
def pr_help():
global usage
sys.stdout.write(usage)
sys.stdout.write("\n")
sys.stdout.write("-h display this help\n")
sys.stdout.write("-p display by process\n")
sys.stdout.write("-pv display by process (verbose)\n")
sys.stdout.write("-t display stall times only\n")
sys.stdout.write("-m display stats for migration\n")
sys.stdout.write("-fs display stats for free scanner\n")
sys.stdout.write("-ms display stats for migration scanner\n")
sys.stdout.write("-u display results in microseconds (default nanoseconds)\n")
comm_re = None
pid_re = None
pid_regex = "^(\d*)-(\d*)$|^(\d*)$"
opt_proc = popt.DISP_DFL
opt_disp = topt.DISP_ALL
opt_ns = True
argc = len(sys.argv) - 1
if argc >= 1:
pid_re = re.compile(pid_regex)
for i, opt in enumerate(sys.argv[1:]):
if opt[0] == "-":
if opt == "-h":
pr_help()
exit(0);
elif opt == "-p":
opt_proc = popt.DISP_PROC
elif opt == "-pv":
opt_proc = popt.DISP_PROC_VERBOSE
elif opt == '-u':
opt_ns = False
elif opt == "-t":
set_type(topt.DISP_TIME)
elif opt == "-m":
set_type(topt.DISP_MIG)
elif opt == "-fs":
set_type(topt.DISP_ISOLFREE)
elif opt == "-ms":
set_type(topt.DISP_ISOLMIG)
else:
sys.exit(usage)
elif i == argc - 1:
m = pid_re.search(opt)
if m != None and m.group() != "":
if m.group(3) != None:
f = pid_filter(m.group(3), m.group(3))
else:
f = pid_filter(m.group(1), m.group(2))
else:
try:
comm_re=re.compile(opt)
except:
sys.stderr.write("invalid regex '%s'" % opt)
sys.exit(usage)
f = comm_filter(comm_re)
chead.add_filter(f)
| gpl-2.0 | 5,668,654,525,239,555,000 | 24.562701 | 130 | 0.649811 | false |
evax/ansible-modules-core | cloud/amazon/ec2_vol.py | 50 | 15330 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
aliases: []
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
aliases: []
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
aliases: []
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
aliases: []
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
aliases: []
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
aliases: []
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
required: false
default: null
aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: ['aws_region', 'ec2_region']
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
author: "Lester Wade (@lwade)"
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 200
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }} "
volume_size: 5
with_items: ec2.instances
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: ec2.instances
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
'''
import time
from distutils.version import LooseVersion
try:
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
if not instance:
module.fail_json(msg = "Instance must be specified to get volumes")
try:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def create_volume(module, ec2, zone):
name = module.params.get('name')
id = module.params.get('id')
instance = module.params.get('instance')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
if instance == 'None' or instance == '':
instance = None
volume = get_volume(module, ec2)
if volume:
if volume.attachment_state() is not None:
if instance is None:
return volume
adata = volume.attach_data
if adata.instance_id != instance:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (name or id, adata.instance_id))
else:
module.exit_json(msg="Volume %s is already mapped on instance %s: %s" %
(name or id, adata.instance_id, adata.device),
volume_id=id,
device=adata.device,
changed=False)
else:
try:
if boto_supports_volume_encryption():
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
ec2.create_tags([volume.id], {"Name": name})
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
if device_name and instance:
try:
attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None and instance:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
else:
device_name = '/dev/xvdf'
attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def detach_volume(module, ec2):
vol = get_volume(module, ec2)
if not vol or vol.attachment_state() is None:
module.exit_json(changed=False)
else:
vol.detach()
module.exit_json(changed=True)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'),
iops = dict(),
encrypted = dict(),
device_name = dict(),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
if instance == 'None' or instance == '':
instance = None
ec2 = ec2_connect(module)
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append({
'create_time': v.create_time,
'id': v.id,
'iops': v.iops,
'size': v.size,
'snapshot_id': v.snapshot_id,
'status': v.status,
'type': v.type,
'zone': v.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'status': attachment.status
}
})
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
if instance:
reservation = ec2.get_all_instances(instance_ids=instance)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and (id or snapshot):
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
if state == 'absent':
delete_volume(module, ec2)
if state == 'present':
volume = create_volume(module, ec2, zone)
if instance:
attach_volume(module, ec2, volume, inst)
else:
detach_volume(module, ec2)
module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 | -1,062,248,617,243,396,200 | 32.253796 | 270 | 0.614155 | false |
tarc/gyp | test/mac/gyptest-lto.py | 69 | 2050 | #!/usr/bin/env python
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that LTO flags work.
"""
import TestGyp
import os
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'lto'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
def ObjPath(srcpath, target):
# TODO: Move this into TestGyp if it's needed elsewhere.
if test.format == 'xcode':
return os.path.join(CHDIR, 'build', 'test.build', 'Default',
target + '.build', 'Objects-normal', 'x86_64',
srcpath + '.o')
elif 'ninja' in test.format: # ninja, xcode-ninja
return os.path.join(CHDIR, 'out', 'Default', 'obj',
target + '.' + srcpath + '.o')
elif test.format == 'make':
return os.path.join(CHDIR, 'out', 'Default', 'obj.target',
target, srcpath + '.o')
def ObjType(p, t_expected):
r = re.compile(r'nsyms\s+(\d+)')
o = subprocess.check_output(['file', p])
objtype = 'unknown'
if ': Mach-O ' in o:
objtype = 'mach-o'
elif ': LLVM bit-code ' in o:
objtype = 'llvm'
if objtype != t_expected:
print 'Expected %s, got %s' % (t_expected, objtype)
test.fail_test()
ObjType(ObjPath('cfile', 'lto'), 'llvm')
ObjType(ObjPath('ccfile', 'lto'), 'llvm')
ObjType(ObjPath('mfile', 'lto'), 'llvm')
ObjType(ObjPath('mmfile', 'lto'), 'llvm')
ObjType(ObjPath('asmfile', 'lto'), 'mach-o')
ObjType(ObjPath('cfile', 'lto_static'), 'llvm')
ObjType(ObjPath('ccfile', 'lto_static'), 'llvm')
ObjType(ObjPath('mfile', 'lto_static'), 'llvm')
ObjType(ObjPath('mmfile', 'lto_static'), 'llvm')
ObjType(ObjPath('asmfile', 'lto_static'), 'mach-o')
test.pass_test()
# TODO: Probably test for -object_path_lto too, else dsymutil won't be
# useful maybe?
| bsd-3-clause | 7,805,014,245,021,200,000 | 30.538462 | 72 | 0.597073 | false |
jjdicharry/godot | tools/scripts/makeargs.py | 50 | 1945 |
text="""
#define FUNC$numR(m_r,m_func,$argt)\\
virtual m_r m_func($argtp) { \\
if (Thread::get_caller_ID()!=server_thread) {\\
m_r ret;\\
command_queue.push_and_ret( visual_server, &VisualServer::m_func,$argp,&ret);\\
return ret;\\
} else {\\
return visual_server->m_func($argp);\\
}\\
}
#define FUNC$numRC(m_r,m_func,$argt)\\
virtual m_r m_func($argtp) const { \\
if (Thread::get_caller_ID()!=server_thread) {\\
m_r ret;\\
command_queue.push_and_ret( visual_server, &VisualServer::m_func,$argp,&ret);\\
return ret;\\
} else {\\
return visual_server->m_func($argp);\\
}\\
}
#define FUNC$numS(m_func,$argt)\\
virtual void m_func($argtp) { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push_and_sync( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
#define FUNC$numSC(m_func,$argt)\\
virtual void m_func($argtp) const { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push_and_sync( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
#define FUNC$num(m_func,$argt)\\
virtual void m_func($argtp) { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
#define FUNC$numC(m_func,$argt)\\
virtual void m_func($argtp) const { \\
if (Thread::get_caller_ID()!=server_thread) {\\
command_queue.push( visual_server, &VisualServer::m_func,$argp);\\
} else {\\
visual_server->m_func($argp);\\
}\\
}
"""
for i in range(1,8):
tp=""
p=""
t=""
for j in range(i):
if (j>0):
tp+=", "
p+=", "
t+=", "
tp +=("m_arg"+str(j+1)+" p"+str(j+1))
p+=("p"+str(j+1))
t+=("m_arg"+str(j+1))
t = text.replace("$argtp",tp).replace("$argp",p).replace("$argt",t).replace("$num",str(i))
print(t)
| mit | -7,330,262,224,962,121,000 | 21.102273 | 91 | 0.575835 | false |
angelmtenor/IDSFC | L1_intro/H_olympics_medal_points.py | 1 | 1606 | import numpy as np
from pandas import DataFrame
def numpy_dot():
"""
Imagine a point system in which each country is awarded 4 points for each
gold medal, 2 points for each silver medal, and one point for each
bronze medal.
Using the numpy.dot function, create a new dataframe called
'olympic_points_df' that includes:
a) a column called 'country_name' with the country name
b) a column called 'points' with the total number of points the country
earned at the Sochi olympics.
You do not need to call the function in your code when running it in the
browser - the grader will do that automatically when you submit or test it.
"""
countries = ['Russian Fed.', 'Norway', 'Canada', 'United States',
'Netherlands', 'Germany', 'Switzerland', 'Belarus',
'Austria', 'France', 'Poland', 'China', 'Korea',
'Sweden', 'Czech Republic', 'Slovenia', 'Japan',
'Finland', 'Great Britain', 'Ukraine', 'Slovakia',
'Italy', 'Latvia', 'Australia', 'Croatia', 'Kazakhstan']
gold = [13, 11, 10, 9, 8, 8, 6, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
silver = [11, 5, 10, 7, 7, 6, 3, 0, 8, 4, 1, 4, 3, 7, 4, 2, 4, 3, 1, 0, 0, 2, 2, 2, 1, 0]
bronze = [9, 10, 5, 12, 9, 5, 2, 1, 5, 7, 1, 2, 2, 6, 2, 4, 3, 1, 2, 1, 0, 6, 2, 1, 0, 1]
# YOUR CODE HERE
points = np.dot([4, 2, 1], [gold, silver, bronze])
olympic_points_df = DataFrame({'country_name': countries, 'points': points})
return olympic_points_df
print(numpy_dot())
| mit | 6,231,642,565,624,961,000 | 41.263158 | 93 | 0.581569 | false |
karban/field | resources/python/rope/base/oi/__init__.py | 112 | 1684 | """Rope object analysis and inference package
Rope makes some simplifying assumptions about a python program. It
assumes that a program only performs assignments and function calls.
Tracking assignments is simple and `PyName` objects handle that. The
main problem is function calls. Rope uses these two approaches for
obtaining call information:
* Static object analysis: `rope.base.pycore.PyCore.analyze_module()`
It can analyze modules to obtain information about functions. This
is done by analyzing function calls in a module or scope. Currently
SOA analyzes the scopes that are changed while saving or when the
user asks to analyze a module. That is mainly because static
analysis is time-consuming.
* Dynamic object analysis: `rope.base.pycore.PyCore.run_module()`
When you run a module or your testsuite, when DOA is enabled, it
collects information about parameters passed to and objects returned
from functions. The main problem with this approach is that it is
quite slow; Not when looking up the information but when collecting
them.
An instance of `rope.base.oi.objectinfo.ObjectInfoManager` can be used
for accessing these information. It saves the data in a
`rope.base.oi.objectdb.ObjectDB` internally.
Now if our objectdb does not know anything about a function and we
need the value returned by it, static object inference, SOI, comes
into play. It analyzes function body and tries to infer the object
that is returned from it (we usually need the returned value for the
given parameter objects).
Rope might collect and store information for other `PyName`\s, too.
For instance rope stores the object builtin containers hold.
"""
| gpl-2.0 | -5,128,744,767,208,530,000 | 43.315789 | 70 | 0.789192 | false |
rgerkin/python-neo | neo/test/coretest/test_block.py | 1 | 32775 | # -*- coding: utf-8 -*-
"""
Tests of the neo.core.block.Block class
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
from datetime import datetime
from copy import deepcopy
import unittest
import numpy as np
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.block import Block
from neo.core.container import filterdata
from neo.core import SpikeTrain, Unit, AnalogSignal
from neo.test.tools import (assert_neo_object_is_compliant,
assert_same_sub_schema)
from neo.test.generate_datasets import (get_fake_value, get_fake_values,
fake_neo, clone_object,
get_annotations, TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = {str(x): TEST_ANNOTATIONS[x] for x in
range(len(TEST_ANNOTATIONS))}
def test__get_fake_values(self):
self.annotations['seed'] = 0
file_datetime = get_fake_value('file_datetime', datetime, seed=0)
rec_datetime = get_fake_value('rec_datetime', datetime, seed=1)
index = get_fake_value('index', int, seed=2)
name = get_fake_value('name', str, seed=3, obj=Block)
description = get_fake_value('description', str, seed=4, obj='Block')
file_origin = get_fake_value('file_origin', str)
attrs1 = {'file_datetime': file_datetime,
'rec_datetime': rec_datetime,
'index': index,
'name': name,
'description': description,
'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
res11 = get_fake_values(Block, annotate=False, seed=0)
res12 = get_fake_values('Block', annotate=False, seed=0)
res21 = get_fake_values(Block, annotate=True, seed=0)
res22 = get_fake_values('Block', annotate=True, seed=0)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
self.assertEqual(res21, attrs2)
self.assertEqual(res22, attrs2)
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = 'Block'
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
for child in res.children_recur:
del child.annotations['i']
del child.annotations['j']
self.assertTrue(isinstance(res, Block))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.segments), 1)
seg = res.segments[0]
self.assertEqual(seg.annotations, self.annotations)
self.assertEqual(len(res.channel_indexes), 1)
chx = res.channel_indexes[0]
self.assertEqual(chx.annotations, self.annotations)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.analogsignals), 1)
self.assertEqual(len(seg.irregularlysampledsignals), 1)
self.assertEqual(len(seg.spiketrains), 1)
self.assertEqual(len(seg.events), 1)
self.assertEqual(len(seg.epochs), 1)
self.assertEqual(seg.analogsignals[0].annotations,
self.annotations)
self.assertEqual(seg.analogsignals[0].annotations,
self.annotations)
self.assertEqual(seg.irregularlysampledsignals[0].annotations,
self.annotations)
self.assertEqual(seg.spiketrains[0].annotations,
self.annotations)
self.assertEqual(seg.events[0].annotations,
self.annotations)
self.assertEqual(seg.epochs[0].annotations,
self.annotations)
self.assertEqual(len(chx.units), 1)
unit = chx.units[0]
self.assertEqual(unit.annotations, self.annotations)
self.assertEqual(len(chx.analogsignals), 1)
self.assertEqual(chx.analogsignals[0].annotations,
self.annotations)
self.assertEqual(len(unit.spiketrains), 1)
self.assertEqual(unit.spiketrains[0].annotations,
self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = Block
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, Block))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.segments), 0)
self.assertEqual(len(res.channel_indexes), 0)
class TestBlock(unittest.TestCase):
def setUp(self):
self.nchildren = 2
self.seed1 = 0
self.seed2 = 10000
self.blk1 = fake_neo(Block, seed=self.seed1, n=self.nchildren)
self.blk2 = fake_neo(Block, seed=self.seed2, n=self.nchildren)
self.targobj = self.blk1
self.segs1 = self.blk1.segments
self.segs2 = self.blk2.segments
self.chxs1 = self.blk1.channel_indexes
self.chxs2 = self.blk2.channel_indexes
self.units1 = [[unit for unit in chx.units] for chx in self.chxs1]
self.units2 = [[unit for unit in chx.units] for chx in self.chxs2]
self.units1 = sum(self.units1, [])
self.units2 = sum(self.units2, [])
self.sigarrs1 = [[sigarr for sigarr in chx.analogsignals]
for chx in self.chxs1]
self.sigarrs2 = [[sigarr for sigarr in chx.analogsignals]
for chx in self.chxs2]
self.trains1 = [[train for train in unit.spiketrains]
for unit in self.units1]
self.trains2 = [[train for train in unit.spiketrains]
for unit in self.units2]
self.irsigs1 = [[irsig for irsig in chx.irregularlysampledsignals]
for chx in self.chxs1]
self.irsigs2 = [[irsig for irsig in chx.irregularlysampledsignals]
for chx in self.chxs2]
self.epcs1 = [[epc for epc in seg.epochs]
for seg in self.segs1]
self.epcs2 = [[epc for epc in seg.epochs]
for seg in self.segs2]
self.evts1 = [[evt for evt in seg.events]
for seg in self.segs1]
self.evts2 = [[evt for evt in seg.events]
for seg in self.segs2]
self.img_seqs1 = [[imgseq for imgseq in seg.imagesequences]
for seg in self.segs1]
self.img_seqs2 = [[imgseq for imgseq in seg.imagesequences]
for seg in self.segs2]
self.sigarrs1 = sum(self.sigarrs1, [])
self.sigarrs2 = sum(self.sigarrs2, [])
self.trains1 = sum(self.trains1, [])
self.trains2 = sum(self.trains2, [])
self.irsigs1 = sum(self.irsigs1, [])
self.irsigs2 = sum(self.irsigs2, [])
self.epcs1 = sum(self.epcs1, [])
self.epcs2 = sum(self.epcs2, [])
self.evts1 = sum(self.evts1, [])
self.evts2 = sum(self.evts2, [])
self.img_seqs1 = sum(self.img_seqs1, [])
self.img_seqs2 = sum(self.img_seqs2, [])
def test_block_init(self):
blk = Block(name='a block')
assert_neo_object_is_compliant(blk)
self.assertEqual(blk.name, 'a block')
self.assertEqual(blk.file_origin, None)
def check_creation(self, blk):
assert_neo_object_is_compliant(blk)
seed = blk.annotations['seed']
targ0 = get_fake_value('file_datetime', datetime, seed=seed + 0)
self.assertEqual(blk.file_datetime, targ0)
targ1 = get_fake_value('rec_datetime', datetime, seed=seed + 1)
self.assertEqual(blk.rec_datetime, targ1)
targ2 = get_fake_value('index', int, seed=seed + 2, obj=Block)
self.assertEqual(blk.index, targ2)
targ3 = get_fake_value('name', str, seed=seed + 3, obj=Block)
self.assertEqual(blk.name, targ3)
targ4 = get_fake_value('description', str, seed=seed + 4, obj=Block)
self.assertEqual(blk.description, targ4)
targ5 = get_fake_value('file_origin', str)
self.assertEqual(blk.file_origin, targ5)
targ6 = get_annotations()
targ6['seed'] = seed
self.assertEqual(blk.annotations, targ6)
self.assertTrue(hasattr(blk, 'channel_indexes'))
self.assertTrue(hasattr(blk, 'segments'))
self.assertEqual(len(blk.channel_indexes), self.nchildren)
self.assertEqual(len(blk.segments), self.nchildren)
def test__creation(self):
self.check_creation(self.blk1)
self.check_creation(self.blk2)
def test__merge(self):
blk1a = fake_neo(Block,
seed=self.seed1, n=self.nchildren)
assert_same_sub_schema(self.blk1, blk1a)
blk1a.annotate(seed=self.seed2)
blk1a.segments.append(self.segs2[0])
blk1a.merge(self.blk2)
segs1a = clone_object(self.blk1).segments
chxs1a = clone_object(self.chxs1)
assert_same_sub_schema(chxs1a + self.chxs2,
blk1a.channel_indexes)
assert_same_sub_schema(segs1a + self.segs2,
blk1a.segments)
def test__children(self):
segs1a = clone_object(self.blk1).segments
chxs1a = clone_object(self.chxs1)
self.assertEqual(self.blk1._container_child_objects,
('Segment', 'ChannelIndex'))
self.assertEqual(self.blk1._data_child_objects, ())
self.assertEqual(self.blk1._single_parent_objects, ())
self.assertEqual(self.blk1._multi_child_objects, ())
self.assertEqual(self.blk1._multi_parent_objects, ())
self.assertEqual(self.blk1._child_properties,
('Unit',))
self.assertEqual(self.blk1._single_child_objects,
('Segment', 'ChannelIndex'))
self.assertEqual(self.blk1._container_child_containers,
('segments', 'channel_indexes'))
self.assertEqual(self.blk1._data_child_containers, ())
self.assertEqual(self.blk1._single_child_containers,
('segments', 'channel_indexes'))
self.assertEqual(self.blk1._single_parent_containers, ())
self.assertEqual(self.blk1._multi_child_containers, ())
self.assertEqual(self.blk1._multi_parent_containers, ())
self.assertEqual(self.blk1._child_objects,
('Segment', 'ChannelIndex'))
self.assertEqual(self.blk1._child_containers,
('segments', 'channel_indexes'))
self.assertEqual(self.blk1._parent_objects, ())
self.assertEqual(self.blk1._parent_containers, ())
self.assertEqual(len(self.blk1._single_children), 2 * self.nchildren)
self.assertEqual(len(self.blk1._multi_children), 0)
self.assertEqual(len(self.blk1.data_children), 0)
self.assertEqual(len(self.blk1.data_children_recur),
1 * self.nchildren ** 3 + 5 * self.nchildren ** 2)
self.assertEqual(len(self.blk1.container_children), 2 * self.nchildren)
self.assertEqual(len(self.blk1.container_children_recur),
2 * self.nchildren + 1 * self.nchildren ** 2)
self.assertEqual(len(self.blk1.children), 2 * self.nchildren)
self.assertEqual(len(self.blk1.children_recur),
2 * self.nchildren +
6 * self.nchildren ** 2 +
1 * self.nchildren ** 3)
self.assertEqual(self.blk1._multi_children, ())
assert_same_sub_schema(list(self.blk1._single_children),
self.segs1 + self.chxs1)
assert_same_sub_schema(list(self.blk1.container_children),
self.segs1 + self.chxs1)
assert_same_sub_schema(list(self.blk1.container_children_recur),
self.segs1 + self.chxs1 +
self.units1[:2] +
self.units1[2:])
assert_same_sub_schema(list(self.blk1.data_children_recur),
self.sigarrs1[::2] +
self.epcs1[:2] + self.evts1[:2] +
self.irsigs1[::2] +
self.trains1[::2] +
self.img_seqs1[:2] +
self.sigarrs1[1::2] +
self.epcs1[2:] + self.evts1[2:] +
self.irsigs1[1::2] +
self.trains1[1::2] +
self.img_seqs1[2:],
exclude=['channel_index'])
assert_same_sub_schema(list(self.blk1.children),
segs1a + chxs1a)
assert_same_sub_schema(list(self.blk1.children_recur),
self.sigarrs1[::2] +
self.epcs1[:2] + self.evts1[:2] +
self.irsigs1[::2] +
self.trains1[::2] +
self.img_seqs1[:2] +
self.sigarrs1[1::2] +
self.epcs1[2:] + self.evts1[2:] +
self.irsigs1[1::2] +
self.trains1[1::2] +
self.img_seqs1[2:] +
self.segs1 + self.chxs1 +
self.units1[:2] +
self.units1[2:],
exclude=['channel_index'])
def test__size(self):
targ = {'segments': self.nchildren,
'channel_indexes': self.nchildren}
self.assertEqual(self.targobj.size, targ)
def test__filter_none(self):
targ = []
# collecting all data objects in target block
for seg in self.targobj.segments:
targ.extend(seg.analogsignals)
targ.extend(seg.epochs)
targ.extend(seg.events)
targ.extend(seg.irregularlysampledsignals)
targ.extend(seg.spiketrains)
targ.extend(seg.imagesequences)
res1 = self.targobj.filter()
res2 = self.targobj.filter({})
res3 = self.targobj.filter([])
res4 = self.targobj.filter([{}])
res5 = self.targobj.filter([{}, {}])
res6 = self.targobj.filter([{}, {}])
res7 = self.targobj.filter(targdict={})
res8 = self.targobj.filter(targdict=[])
res9 = self.targobj.filter(targdict=[{}])
res10 = self.targobj.filter(targdict=[{}, {}])
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
def test__filter_annotation_single(self):
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]])
res0 = self.targobj.filter(j=1)
res1 = self.targobj.filter({'j': 1})
res2 = self.targobj.filter(targdict={'j': 1})
res3 = self.targobj.filter([{'j': 1}])
res4 = self.targobj.filter(targdict=[{'j': 1}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_single_annotation_nores(self):
targ = []
res0 = self.targobj.filter(j=5)
res1 = self.targobj.filter({'j': 5})
res2 = self.targobj.filter(targdict={'j': 5})
res3 = self.targobj.filter([{'j': 5}])
res4 = self.targobj.filter(targdict=[{'j': 5}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_attribute_single(self):
targ = [self.trains1[0]]
name = self.trains1[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_attribute_single_nores(self):
targ = []
name = self.trains2[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi(self):
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]] +
[self.trains1[0]])
name = self.trains1[0].name
res0 = self.targobj.filter(name=name, j=1)
res1 = self.targobj.filter({'name': name, 'j': 1})
res2 = self.targobj.filter(targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_nores(self):
targ = []
name0 = self.sigarrs2[0].name
res0 = self.targobj.filter([{'j': 5}, {}])
res1 = self.targobj.filter({}, j=5)
res2 = self.targobj.filter([{}], i=6)
res3 = self.targobj.filter({'name': name0}, j=1)
res4 = self.targobj.filter(targdict={'name': name0}, j=1)
res5 = self.targobj.filter(name=name0, targdict={'j': 1})
res6 = self.targobj.filter(name=name0, j=5)
res7 = self.targobj.filter({'name': name0, 'j': 5})
res8 = self.targobj.filter(targdict={'name': name0, 'j': 5})
res9 = self.targobj.filter({'name': name0}, j=5)
res10 = self.targobj.filter(targdict={'name': name0}, j=5)
res11 = self.targobj.filter(name=name0, targdict={'j': 5})
res12 = self.targobj.filter({'name': name0}, j=5)
res13 = self.targobj.filter(targdict={'name': name0}, j=5)
res14 = self.targobj.filter(name=name0, targdict={'j': 5})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filter_multi_partres_annotation_attribute(self):
targ = [self.trains1[0]]
name = self.trains1[0].name
res0 = self.targobj.filter(name=name, j=90)
res1 = self.targobj.filter({'name': name, 'j': 90})
res2 = self.targobj.filter(targdict={'name': name, 'j': 90})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_partres_annotation_annotation(self):
targ = self.trains1[::2]
res0 = self.targobj.filter([{'j': 0}, {'i': 0}])
res1 = self.targobj.filter({'j': 0}, i=0)
res2 = self.targobj.filter([{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_no_annotation_but_object(self):
targ = []
for seg in self.targobj.segments:
targ.extend(seg.spiketrains)
res = self.targobj.filter(objects=SpikeTrain)
assert_same_sub_schema(res, targ)
targ = []
for seg in self.targobj.segments:
targ.extend(seg.analogsignals)
res = self.targobj.filter(objects=AnalogSignal)
assert_same_sub_schema(res, targ)
targ = []
for seg in self.targobj.segments:
targ.extend(seg.analogsignals)
targ.extend(seg.spiketrains)
res = self.targobj.filter(objects=[AnalogSignal, SpikeTrain])
assert_same_sub_schema(res, targ)
def test__filter_single_annotation_obj_single(self):
targ = self.trains1[1::2]
res0 = self.targobj.filter(j=1, objects='SpikeTrain')
res1 = self.targobj.filter(j=1, objects=SpikeTrain)
res2 = self.targobj.filter(j=1, objects=['SpikeTrain'])
res3 = self.targobj.filter(j=1, objects=[SpikeTrain])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
def test__filter_single_annotation_norecur(self):
targ = []
res0 = self.targobj.filter(j=1, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata(self):
targ = []
res0 = self.targobj.filter(j=1, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(j=1,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigarrs1[0].name,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container(self):
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]] +
[self.segs1[1], self.chxs1[1],
self.units1[1],
self.units1[3]])
res0 = self.targobj.filter(j=1, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_data(self):
targ = [self.trains1[0]]
res0 = self.targobj.filter(name=self.trains1[0].name, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container_norecur(self):
targ = [self.segs1[1], self.chxs1[1]]
res0 = self.targobj.filter(j=1, container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur(self):
targ = [self.segs1[0]]
res0 = self.targobj.filter(name=self.segs1[0].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur_nores(self):
targ = []
res0 = self.targobj.filter(name=self.trains1[0].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container(self):
targ = [self.segs1[1], self.chxs1[1],
self.units1[1],
self.units1[3]]
res0 = self.targobj.filter(j=1,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_nores(self):
targ = []
res0 = self.targobj.filter(name=self.trains1[0].name,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container_norecur(self):
targ = [self.segs1[1], self.chxs1[1]]
res0 = self.targobj.filter(j=1,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur(self):
targ = [self.segs1[0]]
res0 = self.targobj.filter(name=self.segs1[0].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur_nores(self):
targ = []
res0 = self.targobj.filter(name=self.trains1[0].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filterdata_multi(self):
data = self.targobj.children_recur
targ = ([self.epcs1[1], self.evts1[1]] +
[self.img_seqs1[1]] +
self.sigarrs1[1::2] +
[self.epcs1[3], self.evts1[3]] +
self.irsigs1[1::2] +
self.trains1[1::2] +
[self.img_seqs1[3]] +
[self.segs1[1], self.chxs1[1],
self.units1[1],
self.units1[3],
self.trains1[0]])
name = self.trains1[0].name
res0 = filterdata(data, name=name, j=1)
res1 = filterdata(data, {'name': name, 'j': 1})
res2 = filterdata(data, targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_nores(self):
data = self.targobj.children_recur
targ = []
name1 = self.sigarrs1[0].name
name2 = self.sigarrs2[0].name
res0 = filterdata(data, [{'j': 6}, {}])
res1 = filterdata(data, {}, i=6)
res2 = filterdata(data, [{}], i=6)
res3 = filterdata(data, name=name1, targdict={'j': 1})
res4 = filterdata(data, {'name': name1}, j=1)
res5 = filterdata(data, targdict={'name': name1}, j=1)
res6 = filterdata(data, name=name2, j=6)
res7 = filterdata(data, {'name': name2, 'j': 6})
res8 = filterdata(data, targdict={'name': name2, 'j': 6})
res9 = filterdata(data, {'name': name2}, j=6)
res10 = filterdata(data, targdict={'name': name2}, j=6)
res11 = filterdata(data, name=name2, targdict={'j': 6})
res12 = filterdata(data, {'name': name1}, j=6)
res13 = filterdata(data, targdict={'name': name1}, j=6)
res14 = filterdata(data, name=name1, targdict={'j': 6})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filterdata_multi_partres_annotation_attribute(self):
data = self.targobj.children_recur
targ = [self.trains1[0]]
name = self.trains1[0].name
res0 = filterdata(data, name=name, j=90)
res1 = filterdata(data, {'name': name, 'j': 90})
res2 = filterdata(data, targdict={'name': name, 'j': 90})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_partres_annotation_annotation(self):
data = self.targobj.children_recur
targ = (self.trains1[::2] +
self.segs1[:1] + self.units1[::2])
res0 = filterdata(data, [{'j': 0}, {'i': 0}])
res1 = filterdata(data, {'j': 0}, i=0)
res2 = filterdata(data, [{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
# @unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
# def test__pretty(self):
# res = pretty(self.blk1)
# ann = get_annotations()
# ann['seed'] = self.seed1
# ann = pretty(ann).replace('\n ', '\n ')
#
# seg0 = pretty(self.segs1[0])
# seg1 = pretty(self.segs1[1])
# seg0 = seg0.replace('\n', '\n ')
# seg1 = seg1.replace('\n', '\n ')
#
# targ = ("Block with " +
# ("%s segments, %s channel_indexes\n" %
# (len(self.segs1), len(self.chxs1))) +
# ("name: '%s'\ndescription: '%s'\n" % (self.blk1.name,
# self.blk1.description)) +
# ("annotations: %s\n" % ann) +
# ("file_origin: '%s'\n" % self.blk1.file_origin) +
# ("file_datetime: %s\n" % repr(self.blk1.file_datetime)) +
# ("rec_datetime: %s\n" % repr(self.blk1.rec_datetime)) +
# ("index: %s\n" % self.blk1.index) +
#
#
# ("# segments (N=%s)\n" % len(self.segs1)) +
# ('%s: %s\n' % (0, seg0)) +
# ('%s: %s' % (1, seg1)))
#
# self.assertEqual(res, targ)
def test_block_list_units(self):
assert_same_sub_schema(self.units1, self.blk1.list_units)
assert_same_sub_schema(self.units2, self.blk2.list_units)
assert_same_sub_schema(self.units1,
self.blk1.list_children_by_class(Unit))
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class(Unit))
assert_same_sub_schema(self.units1,
self.blk1.list_children_by_class('Unit'))
assert_same_sub_schema(self.units2,
self.blk2.list_children_by_class('Unit'))
def test__deepcopy(self):
blk1_copy = deepcopy(self.blk1)
# Check links from parents to children
assert_same_sub_schema(blk1_copy, self.blk1)
# Check links from children to parents
for segment in blk1_copy.segments:
self.assertEqual(id(segment.block), id(blk1_copy))
for sig in segment.analogsignals:
self.assertEqual(id(sig.segment), id(segment))
for sptr in segment.spiketrains:
self.assertEqual(id(sptr.segment), id(segment))
for chidx in blk1_copy.channel_indexes:
self.assertEqual(id(chidx.block), id(blk1_copy))
for sig in chidx.analogsignals:
self.assertEqual(id(sig.channel_index), id(chidx))
for sig in chidx.irregularlysampledsignals:
self.assertEqual(id(sig.channel_index), id(chidx))
for unit in chidx.units:
self.assertEqual(id(unit.channel_index), id(chidx))
for sptr in unit.spiketrains:
self.assertEqual(id(sptr.unit), id(unit))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 1,990,102,301,567,499,000 | 38.631197 | 81 | 0.559878 | false |
yoyo2k/l10n-romania | account_compensation_vat_on_payment/account_compensation.py | 2 | 10256 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Domsense s.r.l. (<http://www.domsense.com>).
# Copyright (C) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
# from openerp.tools import float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_compensation(osv.Model):
_inherit = "account.compensation"
def is_vat_on_payment(self, compensation):
vat_on_p = 0
for line in compensation.line_ids:
if line.amount:
if line.move_line_id and line.move_line_id.invoice and line.move_line_id.invoice.vat_on_payment:
vat_on_p += 1
return vat_on_p
def action_move_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
inv_pool = self.pool.get('account.invoice')
journal_pool = self.pool.get('account.journal')
move_line_pool = self.pool.get('account.move.line')
move_pool = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
currency_obj = self.pool.get('res.currency')
res = False
for compensation in self.browse(cr, uid, ids, context):
entry_posted = compensation.journal_id.entry_posted
# disable the 'skip draft state' option because "mixed" entry
# (shadow + real) won't pass validation. Anyway every entry will be
# posted later (if 'entry_posted' is enabled)
if entry_posted:
journal_pool.write(
cr, uid, compensation.journal_id.id, {'entry_posted': False})
res = super(account_compensation, self).action_move_line_create(
cr, uid, [compensation.id], context)
# because 'move_id' has been updated by 'action_move_line_create'
compensation.refresh()
if entry_posted:
journal_pool.write(
cr, uid, compensation.journal_id.id, {'entry_posted': True})
if self.is_vat_on_payment(compensation):
lines_to_create = []
amounts_by_invoice = self.allocated_amounts_grouped_by_invoice(
cr, uid, compensation, context)
for inv_id in amounts_by_invoice:
invoice = inv_pool.browse(cr, uid, inv_id, context)
for acc_move_line in invoice.move_id.line_id:
if acc_move_line.real_tax_code_id:
# compute the VAT or base line proportionally to
# the paid amount
new_line_amount = currency_obj.round(cr, uid, compensation.company_id.currency_id, ((amounts_by_invoice[invoice.id][
'allocated'] + amounts_by_invoice[invoice.id]['write-off']) / amounts_by_invoice[invoice.id]['total']) * acc_move_line.tax_amount)
acc = acc_move_line.real_account_id and acc_move_line.real_account_id.id or acc_move_line.account_id.id
# prepare the real move line
vals = {
'name': invoice.number + ' - ' + acc_move_line.name,
'account_id': acc,
'credit': acc_move_line.credit and new_line_amount or 0.0,
'debit': acc_move_line.debit and new_line_amount or 0.0,
'date': compensation.date,
'partner_id': acc_move_line.partner_id and acc_move_line.partner_id.id or False,
'tax_code_id': acc_move_line.real_tax_code_id.id,
'tax_amount': new_line_amount
}
if acc_move_line.product_id:
vals['debit'] = vals['credit'] = 0.00
lines_to_create.append(vals)
# prepare the shadow move line
vals = {
'name': invoice.number + ' - ' + acc_move_line.name,
'account_id': acc_move_line.account_id.id,
'credit': acc_move_line.debit and new_line_amount or 0.0,
'debit': acc_move_line.credit and new_line_amount or 0.0,
'date': compensation.date,
'partner_id': acc_move_line.partner_id and acc_move_line.partner_id.id or False,
'tax_code_id': acc_move_line.tax_code_id.id,
'tax_amount': -new_line_amount
}
if acc_move_line.product_id:
vals['debit'] = vals['credit'] = 0.00
lines_to_create.append(vals)
for line_to_create in lines_to_create:
line_to_create['move_id'] = compensation.move_id.id
move_line_pool.create(cr, uid, line_to_create, context)
self.balance_move(cr, uid, compensation.move_id.id, context)
move_pool.post(cr, uid, [compensation.move_id.id], context=context)
return res
def balance_move(self, cr, uid, move_id, context=None):
currency_obj = self.pool.get('res.currency')
move = self.pool.get('account.move').browse(cr, uid, move_id, context)
amount = 0.0
for line in move.line_id:
amount += line.debit - line.credit
amount = currency_obj.round(
cr, uid, move.company_id.currency_id, amount)
# check if balance differs for more than 1 decimal according to account
# decimal precision
if abs(amount * 10 ** dp.get_precision('Account')(cr)[1]) > 1:
raise osv.except_osv(_('Error'), _(
'The generated payment entry is unbalanced for more than 1 decimal'))
if not currency_obj.is_zero(cr, uid, move.company_id.currency_id, amount):
for line in move.line_id:
# adjust the first move line that's not receivable, payable or
# liquidity
if line.account_id.type != 'receivable' and line.account_id.type != 'payable' and line.account_id.type != 'liquidity':
if line.credit:
line.write({
'credit': line.credit + amount,
}, update_check=False)
elif line.debit:
line.write({
'debit': line.debit - amount,
}, update_check=False)
if line.tax_amount:
line.write({
'tax_amount': line.tax_amount + amount,
}, update_check=False)
break
return amount
def get_invoice_total(self, invoice):
res = 0.0
for inv_move_line in invoice.move_id.line_id:
if inv_move_line.account_id.id == invoice.account_id.id:
# can both be presents?
res += inv_move_line.debit or inv_move_line.credit
return res
def allocated_amounts_grouped_by_invoice(self, cr, uid, compensation, context=None):
'''
this method builds a dictionary in the following form
{
first_invoice_id: {
'allocated': 120.0,
'total': 120.0,
'write-off': 20.0,
}
second_invoice_id: {
'allocated': 50.0,
'total': 100.0,
'write-off': 0.0,
}
}
every amout is expressed in company currency.
In order to compute cashed amount correctly, write-off will be subtract to reconciled amount.
If more than one invoice is paid with this compensation, we distribute write-off equally (if allowed)
'''
res = {}
company_currency = super(account_compensation, self)._get_company_currency(
cr, uid, compensation.id, context)
current_currency = super(account_compensation, self)._get_current_currency(
cr, uid, compensation.id, context)
for line in compensation.line_ids:
if line.amount and line.move_line_id and line.move_line_id.invoice:
if line.move_line_id.invoice.id not in res:
res[line.move_line_id.invoice.id] = {
'allocated': 0.0,
'total': 0.0,
'write-off': 0.0, }
current_amount = line.amount
if company_currency != current_currency:
current_amount = super(account_compensation, self)._convert_amount(
cr, uid, line.amount, compensation.id, context)
res[line.move_line_id.invoice.id][
'allocated'] += current_amount
res[line.move_line_id.invoice.id][
'total'] = self.get_invoice_total(line.move_line_id.invoice)
return res
| agpl-3.0 | -6,054,684,893,767,761,000 | 50.537688 | 195 | 0.523498 | false |
mganeva/mantid | scripts/Interface/reduction_gui/widgets/inelastic/dgs_pd_sc_conversion.py | 1 | 5097 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
#pylint: disable=invalid-name
from __future__ import (absolute_import, division, print_function)
from PyQt4 import QtGui, QtCore
from functools import partial
from reduction_gui.widgets.base_widget import BaseWidget
from reduction_gui.reduction.inelastic.dgs_pd_sc_conversion_script import PdAndScConversionScript
import ui.inelastic.ui_dgs_pd_sc_conversion
import reduction_gui.widgets.util as util
class PdAndScConversionWidget(BaseWidget):
"""
Widget that presents powder and single crystal data conversion options
to the user.
"""
## Widget name
name = "Powder and SC"
def __init__(self, parent=None, state=None, settings=None, data_type=None):
super(PdAndScConversionWidget, self).__init__(parent, state, settings,
data_type=data_type)
class PdAndScConversionFrame(QtGui.QFrame, ui.inelastic.ui_dgs_pd_sc_conversion.Ui_PdScConversionFrame):
def __init__(self, parent=None):
QtGui.QFrame.__init__(self, parent)
self.setupUi(self)
self._content = PdAndScConversionFrame(self)
self._layout.addWidget(self._content)
self._instrument_name = settings.instrument_name
self.initialize_content()
if state is not None:
self.set_state(state)
else:
self.set_state(PdAndScConversionScript(self._instrument_name))
def initialize_content(self):
# Constraints
self._content.q_low_edit.setValidator(QtGui.QDoubleValidator(self._content.q_low_edit))
self._content.q_width_edit.setValidator(QtGui.QDoubleValidator(self._content.q_width_edit))
self._content.q_high_edit.setValidator(QtGui.QDoubleValidator(self._content.q_high_edit))
# Default states
self._save_powder_nxs_state(self._content.save_procnexus_cb.isChecked())
# Connections
self.connect(self._content.save_procnexus_save, QtCore.SIGNAL("clicked()"),
self._save_powder_nxs_save)
self.connect(self._content.save_procnexus_cb, QtCore.SIGNAL("toggled(bool)"),
self._save_powder_nxs_state)
# Validate widgets
self._connect_validated_lineedit(self._content.q_low_edit)
self._connect_validated_lineedit(self._content.q_width_edit)
self._connect_validated_lineedit(self._content.q_high_edit)
def _check_and_set_lineedit_content(self, lineedit, content):
lineedit.setText(content)
util.set_valid(lineedit, not lineedit.text() == '')
def _connect_validated_lineedit(self, ui_ctrl):
call_back = partial(self._validate_edit, ctrl=ui_ctrl)
self.connect(ui_ctrl, QtCore.SIGNAL("editingFinished()"), call_back)
self.connect(ui_ctrl, QtCore.SIGNAL("textEdited(QString)"), call_back)
self.connect(ui_ctrl, QtCore.SIGNAL("textChanged(QString)"), call_back)
def _validate_edit(self, ctrl=None):
is_valid = True
if not ctrl.text():
is_valid = False
util.set_valid(ctrl, is_valid)
def _save_powder_nxs_state(self, state):
self._content.save_procnexus_edit.setEnabled(state)
self._content.save_procnexus_save.setEnabled(state)
def _save_powder_nxs_save(self):
fname = self.data_save_dialog("*.nxs")
if fname:
self._content.save_procnexus_edit.setText(fname)
def set_state(self, state):
"""
Populate the UI elements with the data from the given state.
@param state: PdAndScConversionScript object
"""
self._content.powder_gb.setChecked(state.do_pd_convert)
self._check_and_set_lineedit_content(self._content.q_low_edit,
state.pd_q_range_low)
self._check_and_set_lineedit_content(self._content.q_width_edit,
state.pd_q_range_width)
self._check_and_set_lineedit_content(self._content.q_high_edit,
state.pd_q_range_high)
self._content.save_procnexus_cb.setChecked(state.save_powder_nxs)
self._content.save_procnexus_edit.setText(state.save_powder_nxs_file)
def get_state(self):
"""
Returns an object with the state of the interface
"""
p = PdAndScConversionScript(self._instrument_name)
p.do_pd_convert = self._content.powder_gb.isChecked()
p.pd_q_range_low = self._content.q_low_edit.text()
p.pd_q_range_width = self._content.q_width_edit.text()
p.pd_q_range_high = self._content.q_high_edit.text()
p.save_powder_nxs = self._content.save_procnexus_cb.isChecked()
p.save_powder_nxs_file = self._content.save_procnexus_edit.text()
return p
| gpl-3.0 | -2,858,507,349,164,029,400 | 43.321739 | 112 | 0.64332 | false |
jualvarez/charlex | 03-Bonus/charlexapi/charlas/models.py | 2 | 2443 | from django.db import models
from django.conf import settings
class Orador(models.Model):
class Meta:
verbose_name = "orador"
verbose_name_plural = "oradores"
nombre = models.CharField(verbose_name='nombre', max_length=100)
bio = models.TextField(verbose_name='curriculum vitae')
foto = models.ImageField(verbose_name='foto', upload_to='fotosorador')
def __str__(self):
return self.nombre
class Lugar(models.Model):
class Meta:
verbose_name = "lugar"
verbose_name_plural = "lugares"
nombre = models.CharField(verbose_name='nombre del lugar', max_length=100)
def __str__(self):
return self.nombre
class Charla(models.Model):
class Meta:
verbose_name = "charla"
verbose_name_plural = "charlas"
titulo = models.CharField(verbose_name='título', max_length=100)
orador = models.ForeignKey(Orador, verbose_name='orador')
lugar = models.ForeignKey(Lugar, verbose_name='lugar')
hora = models.DateTimeField(verbose_name='hora')
duracion = models.DurationField(verbose_name='duración')
descripcion = models.TextField(verbose_name='descripción de la charla', null=True)
asistentes = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='UsuarioCharla',
through_fields=('charla', 'usuario'),
related_name='charlas'
)
fotos = models.ManyToManyField(
settings.AUTH_USER_MODEL,
through='FotoCharla',
through_fields=('charla', 'usuario'),
related_name='fotos_charlas'
)
def __str__(self):
return "%s (%s)" % (self.titulo, self.orador.nombre)
class UsuarioCharla(models.Model):
usuario = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
charla = models.ForeignKey(Charla, on_delete=models.CASCADE)
rating = models.IntegerField(verbose_name='rating', null=True)
class Meta:
unique_together = ('usuario', 'charla')
def __str__(self):
return "%s va a '%s'" % (self.usuario.username, self.charla.titulo)
class FotoCharla(models.Model):
foto = models.ImageField(upload_to='fotoscharla')
usuario = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)
charla = models.ForeignKey(Charla, on_delete=models.CASCADE)
def __str__(self):
return "Sacada por %s en '%s'" % (self.usuario.username, self.charla.titulo)
| gpl-3.0 | -708,000,062,447,423,600 | 29.5 | 95 | 0.663934 | false |
vicente-gonzalez-ruiz/QSVC | trunk/src/old_py/motion_expand_COPIA_SIN_LIST.py | 1 | 4326 | #!/usr/bin/python
# -*- coding: iso-8859-15 -*-
# motion_expand.py
# Descomprime los datos con el movimiento.
import os
import sys
from GOP import GOP
from subprocess import check_call
from subprocess import CalledProcessError
from MCTF_parser import MCTF_parser
#MOTION_DECODER_NAME = "gzip"
#MOTION_DECODER_NAME = "kdu_v_expand"
MCTF_MOTION_CODEC = os.environ["MCTF_MOTION_CODEC"]
block_size = 16
block_size_min = 16
GOPs = 1
pixels_in_x = 352
pixels_in_y = 288
TRLs = 5
parser = MCTF_parser(description="Expands the motion data.")
parser.block_size(block_size)
parser.block_size_min(block_size_min)
parser.GOPs(GOPs)
parser.pixels_in_x(pixels_in_x)
parser.pixels_in_y(pixels_in_y)
parser.TRLs(TRLs)
args = parser.parse_known_args()[0]
if args.block_size:
block_size = int(args.block_size)
if args.block_size_min:
block_size_min = int(args.block_size_min)
if args.GOPs:
GOPs = int(args.GOPs)
if args.pixels_in_x:
pixels_in_x = int(args.pixels_in_x)
if args.pixels_in_y:
pixels_in_y = int(args.pixels_in_y)
if args.TRLs:
TRLs = int(args.TRLs)
gop=GOP()
GOP_size = gop.get_size(TRLs)
pictures = GOPs * GOP_size + 1
if block_size < block_size_min:
block_size_min = block_size
# Cálculo del tamaño de bloque usado en el nivel de resolución
# temporal más bajo.
iterations = TRLs - 1
max_block_size = block_size
iters = TRLs - 1
fields = pictures / 2
iteration = 0
while iteration < iterations:
block_size = block_size / 2
if (block_size < block_size_min):
block_size = block_size_min
fields /= 2
iteration += 1
blocks_in_y = pixels_in_y / block_size
blocks_in_x = pixels_in_x / block_size
# Descomprimimos los campos de movimiento.
iteration = 1
fields = pictures / 2
while iteration <= iterations:
try:
check_call("mctf motion_expand_" + MCTF_MOTION_CODEC
+ " --file=" + "\"" + "motion_residue_" + str(iteration) + "\""
+ " --blocks_in_y=" + str(blocks_in_y)
+ " --blocks_in_x=" + str(blocks_in_x)
+ " --fields=" + str(fields)
+ " --pictures=" + str(pictures),
shell=True)
except CalledProcessError:
sys.exit(-1)
fields /= 2
# os.system("svc motion_expand_" + "gzip"
# + " --blocks_in_x=" + str(blocks_in_x)
# + " --blocks_in_y=" + str(blocks_in_y)
# + " --iteration=" + str(iteration)
# + " --file=" + "\"" + prefix + "_motion_residue_" + str(iteration) + "\""
# + " --pictures=" + str(pictures)
# + " --temporal_levels=" + str(temporal_levels)
# )
iteration += 1
fields = GOPs
try:
# Deshacemos la descorrelación bidireccional en el nivel de resolución
# temporal más bajo.
check_call("mctf bidirectional_motion_correlate"
+ " --blocks_in_y=" + str(blocks_in_y)
+ " --blocks_in_x=" + str(blocks_in_x)
+ " --fields=" + str(fields)
+ " --input=" + "\"" + "motion_residue_" + str(TRLs - 1) + "\""
+ " --output=" + "\"" + "motion_" + str(TRLs - 1) + "\"",
shell=True)
except CalledProcessError:
sys.exit(-1)
# Deshacemos la descorrelación interlevel.
iterations = TRLs - 1
iteration = iterations
while iteration > 1:
iteration -= 1
fields = pictures / (2**iteration)
blocks_in_y = pixels_in_y / block_size
blocks_in_x = pixels_in_x / block_size
try:
# Descorrelacionamos los campos de movimiento entre niveles de
# resolución.
check_call("mctf interlevel_motion_correlate"
+ " --blocks_in_x=" + str(blocks_in_x)
+ " --blocks_in_y=" + str(blocks_in_y)
+ " --fields_in_predicted=" + str(fields)
+ " --predicted=" + "\"" + "motion_" + str(iteration) + "\""
+ " --reference=" + "\"" + "motion_" + str(iteration+1) + "\""
+ " --residue=" + "\"" + "motion_residue_" + str(iteration) + "\"",
shell=True)
except CalledProcessError:
sys.exit(-1)
# Calculamos el tamaño de bloque usado en esta iteración temporal.
block_size = block_size/2
if (block_size<block_size_min):
block_size = block_size_min
| gpl-2.0 | 6,844,768,240,237,397,000 | 28.630137 | 88 | 0.574434 | false |
anhaidgroup/py_entitymatching | benchmarks/benchmark_sn_blocker.py | 1 | 11105 | # Write the benchmarking functions here.
# See "Writing benchmarks" in the asv docs for more information.
import os
import sys
import py_entitymatching as mg
p = mg.get_install_path()
datasets_path = os.sep.join([p, 'datasets', 'example_datasets'])
snb = mg.SortedNeighborhoodBlocker()
class TimeBlockTablesAnime:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'anime', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'anime', 'B.csv'])
self.l_block_attr = 'Year'
self.r_block_attr = 'Year'
self.l_output_attrs = ['Title', 'Year', 'Episodes']
self.r_output_attrs = ['Title', 'Year', 'Episodes']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'anime\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesBikes:
def setup(self):
p = mg.get_install_path()
path_for_A = os.sep.join([datasets_path, 'bikes', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'bikes', 'B.csv'])
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'id')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'id')
except AssertionError:
print("Dataset \'bikes\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
self.l_block_attr = 'city_posted'
self.r_block_attr = 'city_posted'
self.l_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
self.r_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesBooks:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'books', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'books', 'B.csv'])
self.l_block_attr = 'Author'
self.r_block_attr = 'Author'
self.l_output_attrs = ['Title', 'Author', 'ISBN13', 'Publisher',
'Publication_Date']
self.r_output_attrs = ['Title', 'Author', 'ISBN13', 'Publisher',
'Publication_Date']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'books\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesCitations:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'citations', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'citations', 'B.csv'])
self.l_block_attr = 'year'
self.r_block_attr = 'year'
self.l_output_attrs = ['title', 'author', 'year', 'ENTRYTYPE']
self.r_output_attrs = ['title', 'author', 'year', 'ENTRYTYPE']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'citations\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesElectronics:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'electronics', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'electronics', 'B.csv'])
self.l_block_attr = 'Brand'
self.r_block_attr = 'Brand'
self.l_output_attrs = ['Brand', 'Amazon_Price']
self.r_output_attrs = ['Brand', 'Price']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'electronics\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockTablesRestaurants:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'restaurants', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'restaurants', 'B.csv'])
self.l_block_attr = 'PHONENUMBER'
self.r_block_attr = 'PHONENUMBER'
self.l_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
self.r_output_attrs = ['NAME', 'PHONENUMBER', 'ADDRESS']
try:
self.A = mg.read_csv_metadata(path_for_A)
mg.set_key(self.A, 'ID')
self.B = mg.read_csv_metadata(path_for_B)
mg.set_key(self.B, 'ID')
except AssertionError:
print("Dataset \'restaurants\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
def time_block_tables(self):
snb.block_tables(self.A, self.B, self.l_block_attr,
self.r_block_attr, self.l_output_attrs,
self.r_output_attrs)
def teardown(self):
del self.A
del self.B
del self.l_block_attr
del self.r_block_attr
del self.l_output_attrs
del self.r_output_attrs
class TimeBlockCandsetAnime:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'anime', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'anime', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'ID')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'ID')
self.C = snb.block_tables(A, B, 'Year', 'Year',
['Title', 'Year', 'Episodes'],
['Title', 'Year', 'Episodes'])
except AssertionError:
print("Dataset \'anime\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
self.l_block_attr = 'Episodes'
self.r_block_attr = 'Episodes'
def time_block_candset(self):
snb.block_candset(self.C, self.l_block_attr, self.r_block_attr)
def teardown(self):
del self.C
del self.l_block_attr
del self.r_block_attr
class TimeBlockCandsetBikes:
timeout = 300.0
def setup(self):
path_for_A = os.sep.join([datasets_path, 'bikes', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'bikes', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'id')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'id')
l_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
r_output_attrs = ['bike_name', 'city_posted', 'km_driven', 'price',
'color', 'model_year']
self.C = snb.block_tables(A, B, 'city_posted', 'city_posted',
l_output_attrs, r_output_attrs)
except AssertionError:
print("Dataset \'bikes\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
self.l_block_attr = 'model_year'
self.r_block_attr = 'model_year'
def time_block_candset(self):
snb.block_candset(self.C, self.l_block_attr, self.r_block_attr)
def teardown(self):
del self.C
del self.l_block_attr
del self.r_block_attr
class TimeBlockCandsetBooks:
def setup(self):
path_for_A = os.sep.join([datasets_path, 'books', 'A.csv'])
path_for_B = os.sep.join([datasets_path, 'books', 'B.csv'])
try:
A = mg.read_csv_metadata(path_for_A)
mg.set_key(A, 'ID')
B = mg.read_csv_metadata(path_for_B)
mg.set_key(B, 'ID')
self.C = snb.block_tables(A, B, 'Author', 'Author',
['Title', 'Author', 'ISBN13', 'Publisher'],
['Title', 'Author', 'ISBN13', 'Publisher'])
except AssertionError:
print("Dataset \'books\' not found. Please visit the project"
" website to download the dataset.")
raise SystemExit
self.l_block_attr = 'ISBN13'
self.r_block_attr = 'ISBN13'
def time_block_candset(self):
snb.block_candset(self.C, self.l_block_attr, self.r_block_attr)
def teardown(self):
del self.C
del self.l_block_attr
del self.r_block_attr
| bsd-3-clause | 2,245,673,775,480,971,000 | 35.771523 | 80 | 0.535795 | false |
sencha/chromium-spacewalk | third_party/closure_compiler/processor_test.py | 33 | 3425 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test resources processing, i.e. <if> and <include> tag handling."""
import unittest
from processor import FileCache, Processor, LineNumber
class ProcessorTest(unittest.TestCase):
"""Test <include> tag processing logic."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.maxDiff = None
def setUp(self):
FileCache._cache["/debug.js"] = """
// Copyright 2002 Older Chromium Author dudes.
function debug(msg) { if (window.DEBUG) alert(msg); }
""".strip()
FileCache._cache["/global.js"] = """
// Copyright 2014 Old Chromium Author dudes.
<include src="/debug.js">
var global = 'type checking!';
""".strip()
FileCache._cache["/checked.js"] = """
// Copyright 2028 Future Chromium Author dudes.
/**
* @fileoverview Coolest app ever.
* @author Douglas Crockford ([email protected])
*/
<include src="/global.js">
debug(global);
// Here continues checked.js, a swell file.
""".strip()
self._processor = Processor("/checked.js")
def testInline(self):
self.assertMultiLineEqual("""
// Copyright 2028 Future Chromium Author dudes.
/**
* @fileoverview Coolest app ever.
* @author Douglas Crockford ([email protected])
*/
// Copyright 2014 Old Chromium Author dudes.
// Copyright 2002 Older Chromium Author dudes.
function debug(msg) { if (window.DEBUG) alert(msg); }
var global = 'type checking!';
debug(global);
// Here continues checked.js, a swell file.
""".strip(), self._processor.contents)
def assertLineNumber(self, abs_line, expected_line):
actual_line = self._processor.get_file_from_line(abs_line)
self.assertEqual(expected_line.file, actual_line.file)
self.assertEqual(expected_line.line_number, actual_line.line_number)
def testGetFileFromLine(self):
"""Verify that inlined files retain their original line info."""
self.assertLineNumber(1, LineNumber("/checked.js", 1))
self.assertLineNumber(5, LineNumber("/checked.js", 5))
self.assertLineNumber(6, LineNumber("/global.js", 1))
self.assertLineNumber(7, LineNumber("/debug.js", 1))
self.assertLineNumber(8, LineNumber("/debug.js", 2))
self.assertLineNumber(9, LineNumber("/global.js", 3))
self.assertLineNumber(10, LineNumber("/checked.js", 7))
self.assertLineNumber(11, LineNumber("/checked.js", 8))
def testIncludedFiles(self):
"""Verify that files are tracked correctly as they're inlined."""
self.assertEquals(set(["/global.js", "/debug.js"]),
self._processor.included_files)
class IfStrippingTest(unittest.TestCase):
"""Test that the contents of XML <if> blocks are stripped."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.maxDiff = None
def setUp(self):
FileCache._cache["/century.js"] = """
function getCurrentCentury() {
<if expr="netscape_os">
alert("Oh wow!");
return "XX";
</if>
return "XXI";
}
""".strip()
self.processor_ = Processor("/century.js")
def testIfStripping(self):
self.assertMultiLineEqual("""
function getCurrentCentury() {
alert("Oh wow!");
return "XX";
return "XXI";
}
""".strip(), self.processor_.contents)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -3,318,801,330,395,282,400 | 29.04386 | 72 | 0.677664 | false |
openstack-dev/devstack | roles/write-devstack-local-conf/library/devstack_local_conf.py | 3 | 12720 | # Copyright (C) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
class DependencyGraph(object):
# This is based on the JobGraph from Zuul.
def __init__(self):
self._names = set()
self._dependencies = {} # dependent_name -> set(parent_names)
def add(self, name, dependencies):
# Append the dependency information
self._dependencies.setdefault(name, set())
try:
for dependency in dependencies:
# Make sure a circular dependency is never created
ancestors = self._getParentNamesRecursively(
dependency, soft=True)
ancestors.add(dependency)
if name in ancestors:
raise Exception("Dependency cycle detected in {}".
format(name))
self._dependencies[name].add(dependency)
except Exception:
del self._dependencies[name]
raise
def getDependenciesRecursively(self, parent):
dependencies = []
current_dependencies = self._dependencies[parent]
for current in current_dependencies:
if current not in dependencies:
dependencies.append(current)
for dep in self.getDependenciesRecursively(current):
if dep not in dependencies:
dependencies.append(dep)
return dependencies
def _getParentNamesRecursively(self, dependent, soft=False):
all_parent_items = set()
items_to_iterate = set([dependent])
while len(items_to_iterate) > 0:
current_item = items_to_iterate.pop()
current_parent_items = self._dependencies.get(current_item)
if current_parent_items is None:
if soft:
current_parent_items = set()
else:
raise Exception("Dependent item {} not found: ".format(
dependent))
new_parent_items = current_parent_items - all_parent_items
items_to_iterate |= new_parent_items
all_parent_items |= new_parent_items
return all_parent_items
class VarGraph(DependencyGraph):
def __init__(self, vars):
super(VarGraph, self).__init__()
self.vars = {}
self._varnames = set()
for k, v in vars.items():
self._varnames.add(k)
for k, v in vars.items():
self._addVar(k, str(v))
bash_var_re = re.compile(r'\$\{?(\w+)')
def getDependencies(self, value):
return self.bash_var_re.findall(value)
def _addVar(self, key, value):
if key in self.vars:
raise Exception("Variable {} already added".format(key))
self.vars[key] = value
# Append the dependency information
dependencies = set()
for dependency in self.getDependencies(value):
if dependency == key:
# A variable is allowed to reference itself; no
# dependency link needed in that case.
continue
if dependency not in self._varnames:
# It's not necessary to create a link for an
# external variable.
continue
dependencies.add(dependency)
try:
self.add(key, dependencies)
except Exception:
del self.vars[key]
raise
def getVars(self):
ret = []
keys = sorted(self.vars.keys())
seen = set()
for key in keys:
dependencies = self.getDependenciesRecursively(key)
for var in dependencies + [key]:
if var not in seen:
ret.append((var, self.vars[var]))
seen.add(var)
return ret
class PluginGraph(DependencyGraph):
def __init__(self, base_dir, plugins):
super(PluginGraph, self).__init__()
# The dependency trees expressed by all the plugins we found
# (which may be more than those the job is using).
self._plugin_dependencies = {}
self.loadPluginNames(base_dir)
self.plugins = {}
self._pluginnames = set()
for k, v in plugins.items():
self._pluginnames.add(k)
for k, v in plugins.items():
self._addPlugin(k, str(v))
def loadPluginNames(self, base_dir):
if base_dir is None:
return
git_roots = []
for root, dirs, files in os.walk(base_dir):
if '.git' not in dirs:
continue
# Don't go deeper than git roots
dirs[:] = []
git_roots.append(root)
for root in git_roots:
devstack = os.path.join(root, 'devstack')
if not (os.path.exists(devstack) and os.path.isdir(devstack)):
continue
settings = os.path.join(devstack, 'settings')
if not (os.path.exists(settings) and os.path.isfile(settings)):
continue
self.loadDevstackPluginInfo(settings)
define_re = re.compile(r'^define_plugin\s+(\S+).*')
require_re = re.compile(r'^plugin_requires\s+(\S+)\s+(\S+).*')
def loadDevstackPluginInfo(self, fn):
name = None
reqs = set()
with open(fn) as f:
for line in f:
m = self.define_re.match(line)
if m:
name = m.group(1)
m = self.require_re.match(line)
if m:
if name == m.group(1):
reqs.add(m.group(2))
if name and reqs:
self._plugin_dependencies[name] = reqs
def getDependencies(self, value):
return self._plugin_dependencies.get(value, [])
def _addPlugin(self, key, value):
if key in self.plugins:
raise Exception("Plugin {} already added".format(key))
self.plugins[key] = value
# Append the dependency information
dependencies = set()
for dependency in self.getDependencies(key):
if dependency == key:
continue
dependencies.add(dependency)
try:
self.add(key, dependencies)
except Exception:
del self.plugins[key]
raise
def getPlugins(self):
ret = []
keys = sorted(self.plugins.keys())
seen = set()
for key in keys:
dependencies = self.getDependenciesRecursively(key)
for plugin in dependencies + [key]:
if plugin not in seen:
ret.append((plugin, self.plugins[plugin]))
seen.add(plugin)
return ret
class LocalConf(object):
def __init__(self, localrc, localconf, base_services, services, plugins,
base_dir, projects, project, tempest_plugins):
self.localrc = []
self.warnings = []
self.meta_sections = {}
self.plugin_deps = {}
self.base_dir = base_dir
self.projects = projects
self.project = project
self.tempest_plugins = tempest_plugins
if services or base_services:
self.handle_services(base_services, services or {})
self.handle_localrc(localrc)
# Plugins must be the last items in localrc, otherwise
# the configuration lines which follows them in the file are
# not applied to the plugins (for example, the value of DEST.)
if plugins:
self.handle_plugins(plugins)
if localconf:
self.handle_localconf(localconf)
def handle_plugins(self, plugins):
pg = PluginGraph(self.base_dir, plugins)
for k, v in pg.getPlugins():
if v:
self.localrc.append('enable_plugin {} {}'.format(k, v))
def handle_services(self, base_services, services):
enable_base_services = services.pop('base', True)
if enable_base_services and base_services:
self.localrc.append('ENABLED_SERVICES={}'.format(
",".join(base_services)))
else:
self.localrc.append('disable_all_services')
for k, v in services.items():
if v is False:
self.localrc.append('disable_service {}'.format(k))
elif v is True:
self.localrc.append('enable_service {}'.format(k))
def handle_localrc(self, localrc):
lfg = False
tp = False
if localrc:
vg = VarGraph(localrc)
for k, v in vg.getVars():
# Avoid double quoting
if len(v) and v[0]=='"':
self.localrc.append('{}={}'.format(k, v))
else:
self.localrc.append('{}="{}"'.format(k, v))
if k == 'LIBS_FROM_GIT':
lfg = True
elif k == 'TEMPEST_PLUGINS':
tp = True
if not lfg and (self.projects or self.project):
required_projects = []
if self.projects:
for project_name, project_info in self.projects.items():
if project_info.get('required'):
required_projects.append(project_info['short_name'])
if self.project:
if self.project['short_name'] not in required_projects:
required_projects.append(self.project['short_name'])
if required_projects:
self.localrc.append('LIBS_FROM_GIT={}'.format(
','.join(required_projects)))
if self.tempest_plugins:
if not tp:
tp_dirs = []
for tempest_plugin in self.tempest_plugins:
tp_dirs.append(os.path.join(self.base_dir, tempest_plugin))
self.localrc.append('TEMPEST_PLUGINS="{}"'.format(
' '.join(tp_dirs)))
else:
self.warnings.append('TEMPEST_PLUGINS already defined ({}),'
'requested value {} ignored'.format(
tp, self.tempest_plugins))
def handle_localconf(self, localconf):
for phase, phase_data in localconf.items():
for fn, fn_data in phase_data.items():
ms_name = '[[{}|{}]]'.format(phase, fn)
ms_data = []
for section, section_data in fn_data.items():
ms_data.append('[{}]'.format(section))
for k, v in section_data.items():
ms_data.append('{} = {}'.format(k, v))
ms_data.append('')
self.meta_sections[ms_name] = ms_data
def write(self, path):
with open(path, 'w') as f:
f.write('[[local|localrc]]\n')
f.write('\n'.join(self.localrc))
f.write('\n\n')
for section, lines in self.meta_sections.items():
f.write('{}\n'.format(section))
f.write('\n'.join(lines))
def main():
module = AnsibleModule(
argument_spec=dict(
plugins=dict(type='dict'),
base_services=dict(type='list'),
services=dict(type='dict'),
localrc=dict(type='dict'),
local_conf=dict(type='dict'),
base_dir=dict(type='path'),
path=dict(type='str'),
projects=dict(type='dict'),
project=dict(type='dict'),
tempest_plugins=dict(type='list'),
)
)
p = module.params
lc = LocalConf(p.get('localrc'),
p.get('local_conf'),
p.get('base_services'),
p.get('services'),
p.get('plugins'),
p.get('base_dir'),
p.get('projects'),
p.get('project'),
p.get('tempest_plugins'))
lc.write(p['path'])
module.exit_json(warnings=lc.warnings)
try:
from ansible.module_utils.basic import * # noqa
from ansible.module_utils.basic import AnsibleModule
except ImportError:
pass
if __name__ == '__main__':
main()
| apache-2.0 | -1,782,150,664,127,316,500 | 35.239316 | 79 | 0.535299 | false |
AndroidOpenDevelopment/android_external_chromium_org | mojo/public/tools/bindings/generators/mojom_cpp_generator.py | 8 | 9880 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates C++ source files from a mojom.Module."""
import mojom.generate.generator as generator
import mojom.generate.module as mojom
import mojom.generate.pack as pack
from mojom.generate.template_expander import UseJinja
_kind_to_cpp_type = {
mojom.BOOL: "bool",
mojom.INT8: "int8_t",
mojom.UINT8: "uint8_t",
mojom.INT16: "int16_t",
mojom.UINT16: "uint16_t",
mojom.INT32: "int32_t",
mojom.UINT32: "uint32_t",
mojom.FLOAT: "float",
mojom.HANDLE: "mojo::Handle",
mojom.DCPIPE: "mojo::DataPipeConsumerHandle",
mojom.DPPIPE: "mojo::DataPipeProducerHandle",
mojom.MSGPIPE: "mojo::MessagePipeHandle",
mojom.SHAREDBUFFER: "mojo::SharedBufferHandle",
mojom.INT64: "int64_t",
mojom.UINT64: "uint64_t",
mojom.DOUBLE: "double",
}
def DefaultValue(field):
if field.default:
if isinstance(field.kind, mojom.Struct):
assert field.default == "default"
return "%s::New()" % GetNameForKind(field.kind)
return ExpressionToText(field.default)
return ""
def NamespaceToArray(namespace):
return namespace.split('.') if namespace else []
def GetNameForKind(kind, internal = False):
parts = []
if kind.imported_from:
parts.extend(NamespaceToArray(kind.imported_from["namespace"]))
if internal:
parts.append("internal")
if kind.parent_kind:
parts.append(kind.parent_kind.name)
parts.append(kind.name)
return "::".join(parts)
def GetCppType(kind):
if isinstance(kind, mojom.Struct):
return "%s_Data*" % GetNameForKind(kind, internal=True)
if isinstance(kind, mojom.Array):
return "mojo::internal::Array_Data<%s>*" % GetCppType(kind.kind)
if isinstance(kind, mojom.Interface) or \
isinstance(kind, mojom.InterfaceRequest):
return "mojo::MessagePipeHandle"
if isinstance(kind, mojom.Enum):
return "int32_t"
if kind.spec == 's':
return "mojo::internal::String_Data*"
return _kind_to_cpp_type[kind]
def GetCppPodType(kind):
if kind.spec == 's':
return "char*"
return _kind_to_cpp_type[kind]
def GetCppArrayArgWrapperType(kind):
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if isinstance(kind, mojom.Struct):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.Array):
return "mojo::Array<%s> " % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
raise Exception("Arrays of interfaces not yet supported!")
if isinstance(kind, mojom.InterfaceRequest):
raise Exception("Arrays of interface requests not yet supported!")
if kind.spec == 's':
return "mojo::String"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:d:c':
return "mojo::ScopedDataPipeConsumerHandle"
if kind.spec == 'h:d:p':
return "mojo::ScopedDataPipeProducerHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
if kind.spec == 'h:s':
return "mojo::ScopedSharedBufferHandle"
return _kind_to_cpp_type[kind]
def GetCppResultWrapperType(kind):
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if isinstance(kind, mojom.Struct):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.Array):
return "mojo::Array<%s>" % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.InterfaceRequest):
return "mojo::InterfaceRequest<%s>" % GetNameForKind(kind.kind)
if kind.spec == 's':
return "mojo::String"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:d:c':
return "mojo::ScopedDataPipeConsumerHandle"
if kind.spec == 'h:d:p':
return "mojo::ScopedDataPipeProducerHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
if kind.spec == 'h:s':
return "mojo::ScopedSharedBufferHandle"
return _kind_to_cpp_type[kind]
def GetCppWrapperType(kind):
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if isinstance(kind, mojom.Struct):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.Array):
return "mojo::Array<%s>" % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
return "mojo::ScopedMessagePipeHandle"
if isinstance(kind, mojom.InterfaceRequest):
raise Exception("InterfaceRequest fields not supported!")
if kind.spec == 's':
return "mojo::String"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:d:c':
return "mojo::ScopedDataPipeConsumerHandle"
if kind.spec == 'h:d:p':
return "mojo::ScopedDataPipeProducerHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
if kind.spec == 'h:s':
return "mojo::ScopedSharedBufferHandle"
return _kind_to_cpp_type[kind]
def GetCppConstWrapperType(kind):
if isinstance(kind, mojom.Struct):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.Array):
return "mojo::Array<%s>" % GetCppArrayArgWrapperType(kind.kind)
if isinstance(kind, mojom.Interface):
return "%sPtr" % GetNameForKind(kind)
if isinstance(kind, mojom.InterfaceRequest):
return "mojo::InterfaceRequest<%s>" % GetNameForKind(kind.kind)
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if kind.spec == 's':
return "const mojo::String&"
if kind.spec == 'h':
return "mojo::ScopedHandle"
if kind.spec == 'h:d:c':
return "mojo::ScopedDataPipeConsumerHandle"
if kind.spec == 'h:d:p':
return "mojo::ScopedDataPipeProducerHandle"
if kind.spec == 'h:m':
return "mojo::ScopedMessagePipeHandle"
if kind.spec == 'h:s':
return "mojo::ScopedSharedBufferHandle"
if not kind in _kind_to_cpp_type:
print "missing:", kind.spec
return _kind_to_cpp_type[kind]
def GetCppFieldType(kind):
if isinstance(kind, mojom.Struct):
return ("mojo::internal::StructPointer<%s_Data>" %
GetNameForKind(kind, internal=True))
if isinstance(kind, mojom.Array):
return "mojo::internal::ArrayPointer<%s>" % GetCppType(kind.kind)
if isinstance(kind, mojom.Interface) or \
isinstance(kind, mojom.InterfaceRequest):
return "mojo::MessagePipeHandle"
if isinstance(kind, mojom.Enum):
return GetNameForKind(kind)
if kind.spec == 's':
return "mojo::internal::StringPointer"
return _kind_to_cpp_type[kind]
def IsStructWithHandles(struct):
for pf in struct.packed.packed_fields:
if generator.IsHandleKind(pf.field.kind):
return True
return False
def TranslateConstants(token):
if isinstance(token, (mojom.NamedValue, mojom.EnumValue)):
# Both variable and enum constants are constructed like:
# Namespace::Struct::CONSTANT_NAME
name = []
if token.imported_from:
name.extend(NamespaceToArray(token.namespace))
if token.parent_kind:
name.append(token.parent_kind.name)
name.append(token.name)
return "::".join(name)
return token
def ExpressionToText(value):
return TranslateConstants(value)
def HasCallbacks(interface):
for method in interface.methods:
if method.response_parameters != None:
return True
return False
def ShouldInlineStruct(struct):
# TODO(darin): Base this on the size of the wrapper class.
if len(struct.fields) > 4:
return False
for field in struct.fields:
if generator.IsHandleKind(field.kind) or generator.IsObjectKind(field.kind):
return False
return True
_HEADER_SIZE = 8
class Generator(generator.Generator):
cpp_filters = {
"cpp_const_wrapper_type": GetCppConstWrapperType,
"cpp_field_type": GetCppFieldType,
"cpp_pod_type": GetCppPodType,
"cpp_result_type": GetCppResultWrapperType,
"cpp_type": GetCppType,
"cpp_wrapper_type": GetCppWrapperType,
"default_value": DefaultValue,
"expression_to_text": ExpressionToText,
"get_name_for_kind": GetNameForKind,
"get_pad": pack.GetPad,
"has_callbacks": HasCallbacks,
"should_inline": ShouldInlineStruct,
"is_enum_kind": generator.IsEnumKind,
"is_move_only_kind": generator.IsMoveOnlyKind,
"is_handle_kind": generator.IsHandleKind,
"is_interface_kind": generator.IsInterfaceKind,
"is_interface_request_kind": generator.IsInterfaceRequestKind,
"is_object_kind": generator.IsObjectKind,
"is_string_kind": generator.IsStringKind,
"is_struct_with_handles": IsStructWithHandles,
"struct_size": lambda ps: ps.GetTotalSize() + _HEADER_SIZE,
"struct_from_method": generator.GetStructFromMethod,
"response_struct_from_method": generator.GetResponseStructFromMethod,
"stylize_method": generator.StudlyCapsToCamel,
}
def GetJinjaExports(self):
return {
"module": self.module,
"namespace": self.module.namespace,
"namespaces_as_array": NamespaceToArray(self.module.namespace),
"imports": self.module.imports,
"kinds": self.module.kinds,
"enums": self.module.enums,
"structs": self.GetStructs(),
"interfaces": self.module.interfaces,
}
@UseJinja("cpp_templates/module.h.tmpl", filters=cpp_filters)
def GenerateModuleHeader(self):
return self.GetJinjaExports()
@UseJinja("cpp_templates/module-internal.h.tmpl", filters=cpp_filters)
def GenerateModuleInternalHeader(self):
return self.GetJinjaExports()
@UseJinja("cpp_templates/module.cc.tmpl", filters=cpp_filters)
def GenerateModuleSource(self):
return self.GetJinjaExports()
def GenerateFiles(self, args):
self.Write(self.GenerateModuleHeader(), "%s.h" % self.module.name)
self.Write(self.GenerateModuleInternalHeader(),
"%s-internal.h" % self.module.name)
self.Write(self.GenerateModuleSource(), "%s.cc" % self.module.name)
| bsd-3-clause | 7,320,768,438,576,670,000 | 33.545455 | 80 | 0.697267 | false |
poojavade/Genomics_Docker | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Sphinx-1.2.3-py2.7.egg/sphinx/config.py | 11 | 10843 | # -*- coding: utf-8 -*-
"""
sphinx.config
~~~~~~~~~~~~~
Build configuration file handling.
:copyright: Copyright 2007-2014 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
from os import path
from sphinx.errors import ConfigError
from sphinx.locale import l_
from sphinx.util.osutil import make_filename
from sphinx.util.pycompat import bytes, b, execfile_
nonascii_re = re.compile(b(r'[\x80-\xff]'))
CONFIG_SYNTAX_ERROR = "There is a syntax error in your configuration file: %s"
if sys.version_info >= (3, 0):
CONFIG_SYNTAX_ERROR += "\nDid you change the syntax from 2.x to 3.x?"
class Config(object):
"""
Configuration file abstraction.
"""
# the values are: (default, what needs to be rebuilt if changed)
# If you add a value here, don't forget to include it in the
# quickstart.py file template as well as in the docs!
config_values = dict(
# general options
project = ('Python', 'env'),
copyright = ('', 'html'),
version = ('', 'env'),
release = ('', 'env'),
today = ('', 'env'),
today_fmt = (None, 'env'), # the real default is locale-dependent
language = (None, 'env'),
locale_dirs = ([], 'env'),
master_doc = ('contents', 'env'),
source_suffix = ('.rst', 'env'),
source_encoding = ('utf-8-sig', 'env'),
exclude_patterns = ([], 'env'),
# the next three are all deprecated now
unused_docs = ([], 'env'),
exclude_trees = ([], 'env'),
exclude_dirnames = ([], 'env'),
default_role = (None, 'env'),
add_function_parentheses = (True, 'env'),
add_module_names = (True, 'env'),
trim_footnote_reference_space = (False, 'env'),
show_authors = (False, 'env'),
pygments_style = (None, 'html'),
highlight_language = ('python', 'env'),
templates_path = ([], 'html'),
template_bridge = (None, 'html'),
keep_warnings = (False, 'env'),
modindex_common_prefix = ([], 'html'),
rst_epilog = (None, 'env'),
rst_prolog = (None, 'env'),
trim_doctest_flags = (True, 'env'),
primary_domain = ('py', 'env'),
needs_sphinx = (None, None),
nitpicky = (False, 'env'),
nitpick_ignore = ([], 'html'),
# HTML options
html_theme = ('default', 'html'),
html_theme_path = ([], 'html'),
html_theme_options = ({}, 'html'),
html_title = (lambda self: l_('%s %s documentation') %
(self.project, self.release),
'html'),
html_short_title = (lambda self: self.html_title, 'html'),
html_style = (None, 'html'),
html_logo = (None, 'html'),
html_favicon = (None, 'html'),
html_static_path = ([], 'html'),
html_extra_path = ([], 'html'),
# the real default is locale-dependent
html_last_updated_fmt = (None, 'html'),
html_use_smartypants = (True, 'html'),
html_translator_class = (None, 'html'),
html_sidebars = ({}, 'html'),
html_additional_pages = ({}, 'html'),
html_use_modindex = (True, 'html'), # deprecated
html_domain_indices = (True, 'html'),
html_add_permalinks = (u'\u00B6', 'html'),
html_use_index = (True, 'html'),
html_split_index = (False, 'html'),
html_copy_source = (True, 'html'),
html_show_sourcelink = (True, 'html'),
html_use_opensearch = ('', 'html'),
html_file_suffix = (None, 'html'),
html_link_suffix = (None, 'html'),
html_show_copyright = (True, 'html'),
html_show_sphinx = (True, 'html'),
html_context = ({}, 'html'),
html_output_encoding = ('utf-8', 'html'),
html_compact_lists = (True, 'html'),
html_secnumber_suffix = ('. ', 'html'),
html_search_language = (None, 'html'),
html_search_options = ({}, 'html'),
html_search_scorer = ('', None),
# HTML help only options
htmlhelp_basename = (lambda self: make_filename(self.project), None),
# Qt help only options
qthelp_basename = (lambda self: make_filename(self.project), None),
# Devhelp only options
devhelp_basename = (lambda self: make_filename(self.project), None),
# Epub options
epub_basename = (lambda self: make_filename(self.project), None),
epub_theme = ('epub', 'html'),
epub_theme_options = ({}, 'html'),
epub_title = (lambda self: self.html_title, 'html'),
epub_author = ('unknown', 'html'),
epub_language = (lambda self: self.language or 'en', 'html'),
epub_publisher = ('unknown', 'html'),
epub_copyright = (lambda self: self.copyright, 'html'),
epub_identifier = ('unknown', 'html'),
epub_scheme = ('unknown', 'html'),
epub_uid = ('unknown', 'env'),
epub_cover = ((), 'env'),
epub_guide = ((), 'env'),
epub_pre_files = ([], 'env'),
epub_post_files = ([], 'env'),
epub_exclude_files = ([], 'env'),
epub_tocdepth = (3, 'env'),
epub_tocdup = (True, 'env'),
epub_tocscope = ('default', 'env'),
epub_fix_images = (False, 'env'),
epub_max_image_width = (0, 'env'),
epub_show_urls = ('inline', 'html'),
epub_use_index = (lambda self: self.html_use_index, 'html'),
# LaTeX options
latex_documents = (lambda self: [(self.master_doc,
make_filename(self.project) + '.tex',
self.project,
'', 'manual')],
None),
latex_logo = (None, None),
latex_appendices = ([], None),
latex_use_parts = (False, None),
latex_use_modindex = (True, None), # deprecated
latex_domain_indices = (True, None),
latex_show_urls = ('no', None),
latex_show_pagerefs = (False, None),
# paper_size and font_size are still separate values
# so that you can give them easily on the command line
latex_paper_size = ('letter', None),
latex_font_size = ('10pt', None),
latex_elements = ({}, None),
latex_additional_files = ([], None),
latex_docclass = ({}, None),
# now deprecated - use latex_elements
latex_preamble = ('', None),
# text options
text_sectionchars = ('*=-~"+`', 'env'),
text_newlines = ('unix', 'env'),
# manpage options
man_pages = (lambda self: [(self.master_doc,
make_filename(self.project).lower(),
'%s %s' % (self.project, self.release),
[], 1)],
None),
man_show_urls = (False, None),
# Texinfo options
texinfo_documents = (lambda self: [(self.master_doc,
make_filename(self.project).lower(),
self.project, '',
make_filename(self.project),
'The %s reference manual.' %
make_filename(self.project),
'Python')],
None),
texinfo_appendices = ([], None),
texinfo_elements = ({}, None),
texinfo_domain_indices = (True, None),
texinfo_show_urls = ('footnote', None),
texinfo_no_detailmenu = (False, None),
# linkcheck options
linkcheck_ignore = ([], None),
linkcheck_timeout = (None, None),
linkcheck_workers = (5, None),
linkcheck_anchors = (True, None),
# gettext options
gettext_compact = (True, 'gettext'),
# XML options
xml_pretty = (True, 'env'),
)
def __init__(self, dirname, filename, overrides, tags):
self.overrides = overrides
self.values = Config.config_values.copy()
config = {}
if "extensions" in overrides:
config["extensions"] = overrides["extensions"]
if dirname is not None:
config_file = path.join(dirname, filename)
config['__file__'] = config_file
config['tags'] = tags
olddir = os.getcwd()
try:
# we promise to have the config dir as current dir while the
# config file is executed
os.chdir(dirname)
try:
execfile_(filename, config)
except SyntaxError, err:
raise ConfigError(CONFIG_SYNTAX_ERROR % err)
finally:
os.chdir(olddir)
self._raw_config = config
# these two must be preinitialized because extensions can add their
# own config values
self.setup = config.get('setup', None)
self.extensions = config.get('extensions', [])
def check_unicode(self, warn):
# check all string values for non-ASCII characters in bytestrings,
# since that can result in UnicodeErrors all over the place
for name, value in self._raw_config.iteritems():
if isinstance(value, bytes) and nonascii_re.search(value):
warn('the config value %r is set to a string with non-ASCII '
'characters; this can lead to Unicode errors occurring. '
'Please use Unicode strings, e.g. %r.' % (name, u'Content')
)
def init_values(self):
config = self._raw_config
for valname, value in self.overrides.iteritems():
if '.' in valname:
realvalname, key = valname.split('.', 1)
config.setdefault(realvalname, {})[key] = value
else:
config[valname] = value
for name in config:
if name in self.values:
self.__dict__[name] = config[name]
del self._raw_config
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError(name)
if name not in self.values:
raise AttributeError('No such config value: %s' % name)
default = self.values[name][0]
if hasattr(default, '__call__'):
return default(self)
return default
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def __contains__(self, name):
return name in self.values
| apache-2.0 | -2,239,708,325,841,953,500 | 37.179577 | 80 | 0.515448 | false |
daviddao/luminosity | sklearn-server/flask/lib/python2.7/site-packages/werkzeug/serving.py | 116 | 25317 | # -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import ssl
import signal
def _get_openssl_crypto_module():
try:
from OpenSSL import crypto
except ImportError:
raise TypeError('Using ad-hoc certificates requires the pyOpenSSL '
'library.')
else:
return crypto
try:
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from socketserver import ThreadingMixIn, ForkingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug._compat import reraise, wsgi_encoding_dance
from werkzeug.urls import url_parse, url_unquote
from werkzeug.exceptions import InternalServerError
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
path_info = url_unquote(request_url.path)
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown': shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': wsgi_encoding_dance(path_info),
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is bytes, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader active
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode('ascii'))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
crypto = _get_openssl_crypto_module()
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = '*'
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxsize))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 1024)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'wb') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'wb') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
crypto = _get_openssl_crypto_module()
import tempfile
import atexit
cert, pkey = generate_adhoc_ssl_pair()
cert_handle, cert_file = tempfile.mkstemp()
pkey_handle, pkey_file = tempfile.mkstemp()
atexit.register(os.remove, pkey_file)
atexit.register(os.remove, cert_file)
os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
os.close(cert_handle)
os.close(pkey_handle)
ctx = load_ssl_context(cert_file, pkey_file)
return ctx
def load_ssl_context(cert_file, pkey_file=None, protocol=None):
"""Loads SSL context from cert/private key files and optional protocol.
Many parameters are directly taken from the API of
:py:class:`ssl.SSLContext`.
:param cert_file: Path of the certificate to use.
:param pkey_file: Path of the private key to use. If not given, the key
will be obtained from the certificate file.
:param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl``
module. Defaults to ``PROTOCOL_SSLv23``.
"""
if protocol is None:
protocol = ssl.PROTOCOL_SSLv23
ctx = _SSLContext(protocol)
ctx.load_cert_chain(cert_file, pkey_file)
return ctx
class _SSLContext(object):
'''A dummy class with a small subset of Python3's ``ssl.SSLContext``, only
intended to be used with and by Werkzeug.'''
def __init__(self, protocol):
self._protocol = protocol
self._certfile = None
self._keyfile = None
self._password = None
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._certfile = certfile
self._keyfile = keyfile or certfile
self._password = password
def wrap_socket(self, sock, **kwargs):
return ssl.wrap_socket(sock, keyfile=self._keyfile,
certfile=self._certfile,
ssl_version=self._protocol, **kwargs)
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
exc_types = (ssl.SSLError,)
try:
from OpenSSL.SSL import Error
exc_types += (Error,)
except ImportError:
pass
if error is None:
error = sys.exc_info()[1]
return isinstance(error, exc_types)
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
# try:
# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
# socket.SOCK_STREAM, 0,
# socket.AI_PASSIVE)
# if info:
# return info[0][0]
# except socket.gaierror:
# pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = 128
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
if ssl_context is not None:
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = ssl_context.wrap_socket(self.socket,
server_side=True)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
finally:
self.server_close()
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def is_running_from_reloader():
"""Checks if the application is running from within the Werkzeug
reloader subprocess.
.. versionadded:: 0.10
"""
return os.environ.get('WERKZEUG_RUN_MAIN') == 'true'
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1,
reloader_type='auto', threaded=False, processes=1,
request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start a WSGI application. Optional features include a reloader,
multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
.. versionadded:: 0.10
Improved the reloader and added support for changing the backend
through the `reloader_type` parameter. See :ref:`reloader`
for more information.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
quit_msg = '(Press CTRL+C to quit)'
_log('info', ' * Running on %s://%s:%d/ %s', ssl_context is None
and 'http' or 'https', display_hostname, port, quit_msg)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
address_family = select_ip_version(hostname, port)
test_socket = socket.socket(address_family, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
from ._reloader import run_with_reloader
run_with_reloader(inner, extra_files, reloader_interval,
reloader_type)
else:
inner()
def run_with_reloader(*args, **kwargs):
# People keep using undocumented APIs. Do not use this function
# please, we do not guarantee that it continues working.
from ._reloader import run_with_reloader
return run_with_reloader(*args, **kwargs)
def main():
'''A simple command-line interface for :py:func:`run_simple`.'''
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from werkzeug.utils import import_string
parser = optparse.OptionParser(
usage='Usage: %prog [options] app_module:app_object')
parser.add_option('-b', '--bind', dest='address',
help='The hostname:port the app should listen on.')
parser.add_option('-d', '--debug', dest='use_debugger',
action='store_true', default=False,
help='Use Werkzeug\'s debugger.')
parser.add_option('-r', '--reload', dest='use_reloader',
action='store_true', default=False,
help='Reload Python process if modules change.')
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(':')
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write('No application supplied, or too much. See --help\n')
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
application=app, use_reloader=options.use_reloader,
use_debugger=options.use_debugger
)
if __name__ == '__main__':
main()
| bsd-3-clause | 4,549,302,009,272,451,000 | 36.618128 | 81 | 0.596121 | false |
lambdamusic/testproject | konproj/libs/django_extensions/management/commands/runscript.py | 5 | 5776 | from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from optparse import make_option
import sys
import os
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
def vararg_callback(option, opt_str, opt_value, parser):
parser.rargs.insert(0, opt_value)
value = []
for arg in parser.rargs:
# stop on --foo like options
if arg[:2] == "--" and len(arg) > 2:
break
# stop on -a like options
if arg[:1] == "-":
break
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--fixtures', action='store_true', dest='infixtures', default=False,
help='Only look in app.fixtures subdir'),
make_option('--noscripts', action='store_true', dest='noscripts', default=False,
help='Look in app.scripts subdir'),
make_option('-s', '--silent', action='store_true', dest='silent', default=False,
help='Run silently, do not show errors and tracebacks'),
make_option('--no-traceback', action='store_true', dest='no_traceback', default=False,
help='Do not show tracebacks'),
make_option('--script-args', action='callback', callback=vararg_callback, type='string',
help='Space-separated argument list to be passed to the scripts. Note that the '
'same arguments will be passed to all named scripts.'),
)
help = 'Runs a script in django context.'
args = "script [script ...]"
def handle(self, *scripts, **options):
from django.db.models import get_apps
NOTICE = self.style.SQL_TABLE
NOTICE2 = self.style.SQL_FIELD
ERROR = self.style.ERROR
ERROR2 = self.style.NOTICE
subdirs = []
if not options.get('noscripts'):
subdirs.append('scripts')
if options.get('infixtures'):
subdirs.append('fixtures')
verbosity = int(options.get('verbosity', 1))
show_traceback = options.get('traceback', True)
if show_traceback is None:
# XXX: traceback is set to None from Django ?
show_traceback = True
no_traceback = options.get('no_traceback', False)
if no_traceback:
show_traceback = False
silent = options.get('silent', False)
if silent:
verbosity = 0
if len(subdirs) < 1:
print NOTICE("No subdirs to run left.")
return
if len(scripts) < 1:
print ERROR("Script name required.")
return
def run_script(mod, *script_args):
try:
mod.run(*script_args)
except Exception, e:
if silent:
return
if verbosity > 0:
print ERROR("Exception while running run() in '%s'" % mod.__name__)
if show_traceback:
raise
def my_import(mod):
if verbosity > 1:
print NOTICE("Check for %s" % mod)
try:
t = __import__(mod, [], [], [" "])
#if verbosity > 1:
# print NOTICE("Found script %s ..." % mod)
if hasattr(t, "run"):
if verbosity > 1:
print NOTICE2("Found script '%s' ..." % mod)
#if verbosity > 1:
# print NOTICE("found run() in %s. executing..." % mod)
return t
else:
if verbosity > 1:
print ERROR2("Find script '%s' but no run() function found." % mod)
except ImportError:
return False
def find_modules_for_script(script):
""" find script module which contains 'run' attribute """
modules = []
# first look in apps
for app in get_apps():
app_name = app.__name__.split(".")[:-1] # + ['fixtures']
for subdir in subdirs:
mod = my_import(".".join(app_name + [subdir, script]))
if mod:
modules.append(mod)
# try app.DIR.script import
sa = script.split(".")
for subdir in subdirs:
nn = ".".join(sa[:-1] + [subdir, sa[-1]])
mod = my_import(nn)
if mod:
modules.append(mod)
# try direct import
if script.find(".") != -1:
mod = my_import(script)
if mod:
modules.append(mod)
return modules
if options.get('script_args'):
script_args = options['script_args']
else:
script_args = []
for script in scripts:
modules = find_modules_for_script(script)
if not modules:
if verbosity > 0 and not silent:
print ERROR("No module for script '%s' found" % script)
for mod in modules:
if verbosity > 1:
print NOTICE2("Running script '%s' ..." % mod.__name__)
run_script(mod, *script_args)
# Backwards compatibility for Django r9110
if not [opt for opt in Command.option_list if opt.dest == 'verbosity']:
Command.option_list += (
make_option('--verbosity', '-v', action="store", dest="verbosity",
default='1', type='choice', choices=['0', '1', '2'],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output"),
)
| gpl-2.0 | 1,356,080,805,958,714,000 | 35.556962 | 96 | 0.515409 | false |
oiertwo/vampyr | pdftoexcel.py | 1 | 8194 | __author__ = 'oier'
import os
import numpy as np
from data.parameters import true_params
from data.parameters import false_params
import distance as dist
import numpy as np
def pdftotext(path):
os.system("pdftotext {data}".format(data=path))
return(path.replace(".pdf",".txt"))
import pandas as pd
def parse(path):
txt = pd.read_table(path, sep='\n', na_values=False, header=None)
for i in txt.index:
try :
if pd.isnull(float(txt.ix[i])) == False:
name = getname(i,txt)
print(name)
print(float(txt.ix[i]))
except :
pass
def getname(index, df):
name = ""
for i in range(0,index):
size = len(df.ix[i].to_string().split())
idxname = " ".join(df.ix[i].to_string().split()[1:size])
if (len( idxname )> 5) and idxname != None and idxname != "NaN":
name = idxname
#print(name)
return (name)
from collections import deque
def getnamedict(path):
dict = {}
numdict = {}
names = deque()
txt = pd.read_table(path, sep='\n', na_values=False, header=None)
name = ""
for i in txt.index:
try :
size = len(txt.ix[i].to_string().split())
nextname = " ".join(txt.ix[i].to_string().split()[1:size])
if (len( nextname )> 5) and \
nextname != None and \
nextname != "NaN" and \
isclean(nextname) and \
validateparam(nextname):
names.append(nextname)
dict[i] = nextname
#print(name)
#print(nextname)
if pd.isnull(float(txt.ix[i])) == False:
number = float(txt.ix[i])
numdict[names.pop()] = number
#print(number)
#print(i)
except :
pass
print(dict.keys())
print(dict.values())
print(numdict.keys())
print(numdict.values())
#organize(dict,numdict)
# print(dict[i])
def organize(names, numbers):
'''
:param names: must be dictionary
:param numbers: must be dictionary
:return: dictionary, dict[name] = number
'''
numbs = dict(numbers)
nams = dict(names)
conn1 = {}
conn2 = {}
array1 = np.array(nams.keys())
for i in numbs.keys():
actual = 100.0
inconn2 = False
key = min(nams.keys(), key=lambda k: abs(k - i))
print(" {} - {} ".format(key,i))
print(" {} - {} ".format(nams[key],numbs[i]))
'''
for j in numbs.keys():
actual = i - j
if ( actual > conn1[i] or conn1[i] == None):
if( conn2[j] == None):
conn1[i] = j
conn2[j] = actual
else:
best = j
inconn2 = True
else if (conn2[j] != None ):
'''
return()
def isclean(word):
w = str(word)
test = True
strg = "_[]*"
bool = True
for i in range(len(strg)):
c = strg[i]
bool = bool or (w.find(c) != -1)
test = test and (bool)
return(test)
def validateparam(word):
t_dist = []
f_dist = []
for i in true_params:
t_dist.append(dist.levenshtein(word,i))
for i in false_params:
f_dist.append(dist.levenshtein(word, i))
print("Word: {}, T: {} , F: {}".format(word, np.min(t_dist), np.min(f_dist[0])))
if( min(t_dist) == 0):
print("TRUE")
return (True)
if (min(f_dist) == 0):
print("FALSE")
return("FALSE")
if ( np.mean(t_dist )< np.mean(f_dist) ):
print("TRUE")
return(True)
print("FALSE")
return(False)
def getmyarray(path, apath):
dict = {}
appearances = {}
names = deque()
with open(path) as f:
txt = f.readlines()
#txt = pd.read_table(path, sep='\n', na_values=False, header=None)
array_txt = pd.read_table(apath, sep='\n', header=None)
name = ""
for i in txt:
actual = i.replace("\n", '')
if(len(actual.strip()) == 0):
continue
try :
number = float(actual)
if (number > 10000000):
continue
try:
appearances[actual] += 1
except:
appearances[actual] = 1
name = localgetmyarray(path, apath, actual, appearances[i])
dict[name] = i
print("name: {} numb: {}".format(name, i))
except :
pass
print(dict.keys())
print(dict.values())
def localgetmyarray(path, apath, word, count):
with open(path) as f:
txt = f.readlines()
#txt = pd.read_table(path, sep='\n', na_values=False, header=None)
f = open(apath)
array_txt_str = f.read()
name = ""
idx = [k.start() for k in re.finditer(word, array_txt_str)][count -1]
opt = len(array_txt_str)
apps ={}
for i in txt:
try :
nextname = i.replace("\n", '')
try :
float(nextname)
except :
if (len( nextname )> 5) and nextname != None and \
nextname != "NaN" and isclean(nextname):
try:
apps[nextname ] += 1
except:
apps[nextname] = 1
id = [k for k in re.finditer(nextname, array_txt_str)][apps[nextname]-1].start()
myopt = idx - id
if (myopt > 0) and (myopt < opt):
opt = myopt
name = nextname
except :
pass
print("optimum: {} number: {} found: {}".format(opt, word, name))
f.close()
return name
#DOWN FROM HERE JAVA+PYTHON PDF TO TEXT:
import re
import extractText as txt
def remove_unwanted(str):
s = re.sub(r'\[.*?\]', '',str)
s = s.replace("\*", "")
s = s.replace("\n", "")
return (s)
def line_control(str):
#may return True if str is not valid
#returns false if str is valid
if(len(str) < 15):
return True
if(len(str) == 1):
return True
if(len(str.split(" ")) > 10):
return True
return False
def line_parser(str):
item = ''
valor = ''
dict = {}
sline = str.split(" ")
helper = {}
pos = 0
for schar in sline:
try:
#dict["val"]
if(len(dict.keys()) == 3 and len(sline) > 6):
helper[pos] = dict
dict = {}
pos += 1
dict["val"] #to force failure/raise ofd exception
except:
try:
valor = ''
table = [char for char in schar if '/' in char]
if schar.find('%') != -1:
valor = schar
if len(table) > 0:
valor = schar
if(valor != ''):
dict["val"] = valor
continue
except:
pass
try:
#dict["num"]
if(len(dict.keys()) == 3 and len(sline) > 6):
helper[pos] = dict
dict = {}
pos += 1
dict["num"]
except:
try:
num = float(schar)
if(num > 10000):
return({})
dict["num"] = num
continue
except:
pass
try:
dict["item"] += " " + schar
except:
dict["item"] = schar
helper[pos] = dict
return(helper)
def getfromjava(path, dest=''):
if (dest == ''):
d = path.replace(".pdf", ".txt")
txt.extractText(path, d, '')
with open(d) as f:
text = f.readlines()
for line in text:
sline = remove_unwanted(line)
if(line_control(sline) == True):
continue
dict = line_parser(sline)
for i in dict.keys():
if(len(dict[i].keys()) == 3):
print("ITEM: {} NUM: {} VAL: {}".format(dict[i]["item"], dict[i]["num"], dict[i]["val"]))
| mit | -6,777,632,973,228,448,000 | 24.84858 | 105 | 0.461679 | false |
russdill/juniper-vpn-py | tncc.py | 1 | 22051 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import logging
import StringIO
import mechanize
import cookielib
import struct
import socket
import ssl
import base64
import collections
import zlib
import HTMLParser
import socket
import netifaces
import urlgrabber
import urllib2
import platform
import json
import datetime
import pyasn1_modules.pem
import pyasn1_modules.rfc2459
import pyasn1.codec.der.decoder
import xml.etree.ElementTree
ssl._create_default_https_context = ssl._create_unverified_context
debug = False
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG if debug else logging.INFO)
MSG_POLICY = 0x58316
MSG_FUNK_PLATFORM = 0x58301
MSG_FUNK = 0xa4c01
# 0013 - Message
def decode_0013(buf, indent):
logging.debug('%scmd 0013 (Message) %d bytes', indent, len(buf))
ret = collections.defaultdict(list)
while (len(buf) >= 12):
length, cmd, out = decode_packet(buf, indent + " ")
buf = buf[length:]
ret[cmd].append(out)
return ret
# 0012 - u32
def decode_0012(buf, indent):
logging.debug('%scmd 0012 (u32) %d bytes', indent, len(buf))
return struct.unpack(">I", buf)
# 0016 - zlib compressed message
def decode_0016(buf, indent):
logging.debug('%scmd 0016 (compressed message) %d bytes', indent, len(buf))
_, compressed = struct.unpack(">I" + str(len(buf) - 4) + "s", buf)
buf = zlib.decompress(compressed)
ret = collections.defaultdict(list)
while (len(buf) >= 12):
length, cmd, out = decode_packet(buf, indent + " ")
buf = buf[length:]
ret[cmd].append(out)
return ret
# 0ce4 - encapsulation
def decode_0ce4(buf, indent):
logging.debug('%scmd 0ce4 (encapsulation) %d bytes', indent, len(buf))
ret = collections.defaultdict(list)
while (len(buf) >= 12):
length, cmd, out = decode_packet(buf, indent + " ")
buf = buf[length:]
ret[cmd].append(out)
return ret
# 0ce5 - string without hex prefixer
def decode_0ce5(buf, indent):
s = struct.unpack(str(len(buf)) + "s", buf)[0]
logging.debug('%scmd 0ce5 (string) %d bytes', indent, len(buf))
s = s.rstrip('\0')
logging.debug('%s', s)
return s
# 0ce7 - string with hex prefixer
def decode_0ce7(buf, indent):
id, s = struct.unpack(">I" + str(len(buf) - 4) + "s", buf)
logging.debug('%scmd 0ce7 (id %08x string) %d bytes', indent, id, len(buf))
if s.startswith('COMPRESSED:'):
typ, length, data = s.split(':', 2)
s = zlib.decompress(data)
s = s.rstrip('\0')
logging.debug('%s', s)
return (id, s)
# 0cf0 - encapsulation
def decode_0cf0(buf, indent):
logging.debug('%scmd 0cf0 (encapsulation) %d bytes', indent, len(buf))
ret = dict()
cmd, _, out = decode_packet(buf, indent + " ")
ret[cmd] = out
return ret
# 0cf1 - string without hex prefixer
def decode_0cf1(buf, indent):
s = struct.unpack(str(len(buf)) + "s", buf)[0]
logging.debug('%scmd 0cf1 (string) %d bytes', indent, len(buf))
s = s.rstrip('\0')
logging.debug('%s', s)
return s
# 0cf3 - u32
def decode_0cf3(buf, indent):
ret = struct.unpack(">I", buf)
logging.debug('%scmd 0cf3 (u32) %d bytes - %d', indent, len(buf), ret[0])
return ret
def decode_packet(buf, indent=""):
cmd, _1, _2, length, _3 = struct.unpack(">IBBHI", buf[:12])
if length < 12:
raise Exception("Invalid packet, cmd %04x, _1 %02x, _2 %02x, length %d" % (cmd, _1, _2, length))
data = buf[12:length]
if length % 4:
length += 4 - (length % 4)
if cmd == 0x0013:
data = decode_0013(data, indent)
elif cmd == 0x0012:
data = decode_0012(data, indent)
elif cmd == 0x0016:
data = decode_0016(data, indent)
elif cmd == 0x0ce4:
data = decode_0ce4(data, indent)
elif cmd == 0x0ce5:
data = decode_0ce5(data, indent)
elif cmd == 0x0ce7:
data = decode_0ce7(data, indent)
elif cmd == 0x0cf0:
data = decode_0cf0(data, indent)
elif cmd == 0x0cf1:
data = decode_0cf1(data, indent)
elif cmd == 0x0cf3:
data = decode_0cf3(data, indent)
else:
logging.debug('%scmd %04x(%02x:%02x) is unknown, length %d', indent, cmd, _1, _2, length)
data = None
return length, cmd, data
def encode_packet(cmd, align, buf):
align = 4
orig_len = len(buf)
if align > 1 and (len(buf) + 12) % align:
buf += struct.pack(str(align - len(buf) % align) + "x")
return struct.pack(">IBBHI", cmd, 0xc0, 0x00, orig_len + 12, 0x0000583) + buf
# 0013 - Message
def encode_0013(buf):
return encode_packet(0x0013, 4, buf)
# 0012 - u32
def encode_0012(i):
return encode_packet(0x0012, 1, struct.pack("<I", i))
# 0ce4 - encapsulation
def encode_0ce4(buf):
return encode_packet(0x0ce4, 4, buf)
# 0ce5 - string without hex prefixer
def encode_0ce5(s):
return encode_packet(0x0ce5, 1, struct.pack(str(len(s)) + "s", s))
# 0ce7 - string with hex prefixer
def encode_0ce7(s, prefix):
s += '\0'
return encode_packet(0x0ce7, 1, struct.pack(">I" + str(len(s)) + "sx",
prefix, s))
# 0cf0 - encapsulation
def encode_0cf0(buf):
return encode_packet(0x0cf0, 4, buf)
# 0cf1 - string without hex prefixer
def encode_0cf1(s):
s += '\0'
return encode_packet(0x0ce5, 1, struct.pack(str(len(s)) + "s", s))
# 0cf3 - u32
def encode_0cf3(i):
return encode_packet(0x0013, 1, struct.pack("<I", i))
class x509cert(object):
@staticmethod
def decode_names(data):
ret = dict()
for i in range(0, len(data)):
for attr in data[i]:
type = str(attr.getComponentByPosition(0).getComponentByName('type'))
value = str(attr.getComponentByPosition(0).getComponentByName('value'))
value = str(pyasn1.codec.der.decoder.decode(value)[0])
try:
ret[type].append(value)
except:
ret[type] = [value]
return ret
@staticmethod
def decode_time(tm):
tm_str = tm.getComponent()._value
tz = 0
if tm_str[-1] == 'Z':
tz = 0
tm_str = tm_str[:-1]
elif '-' in tm_str:
tm_str, tz = tm_str.split('-')
tz = datetime.datetime.strptime(tz, '%H%M')
tz = -(tz.hour * 60 + tz.minute)
elif '+' in tm_str:
tm_str, tz = tm_str.split('+')
tz = datetime.datetime.strptime(tz, '%H%M')
tz = tz.hour * 60 + tz.minute
else:
logging.warn('No timezone in certificate')
if tm.getName() == 'generalTime':
formats = ['%Y%m%d%H%M%S.%f', '%Y%m%d%H%M%S', '%Y%m%d%H%M', '%Y%m%d%H']
elif tm.getName() == 'utcTime':
formats = ['%y%m%d%H%M%S', '%y%m%d%H%M']
else:
raise Exception('Unknown time format')
for fmt in formats:
try:
ret = datetime.datetime.strptime(tm_str, fmt)
ret += datetime.timedelta(minutes=tz)
return ret
except:
pass
raise Exception('Could not parse certificate time')
def __init__(self, cert_file):
with open(cert_file, 'r') as f:
self.data = f.read()
f = StringIO.StringIO(self.data)
substrate = pyasn1_modules.pem.readPemFromFile(f)
cert = pyasn1.codec.der.decoder.decode(substrate, pyasn1_modules.rfc2459.Certificate())[0]
tbs = cert.getComponentByName('tbsCertificate')
self.issuer = self.decode_names(tbs.getComponentByName('issuer'))
validity = tbs.getComponentByName('validity')
self.not_before = self.decode_time(validity.getComponentByName("notBefore"))
self.not_after = self.decode_time(validity.getComponentByName("notAfter"))
self.subject = self.decode_names(tbs.getComponentByName('subject'))
class tncc(object):
def __init__(self, vpn_host, device_id=None, funk=None, platform=None, hostname=None, mac_addrs=[], certs=[]):
self.vpn_host = vpn_host
self.path = '/dana-na/'
self.funk = funk
self.platform = platform
self.hostname = hostname
self.mac_addrs = mac_addrs
self.avail_certs = certs
self.deviceid = device_id
self.br = mechanize.Browser()
self.cj = cookielib.LWPCookieJar()
self.br.set_cookiejar(self.cj)
# Browser options
self.br.set_handle_equiv(True)
self.br.set_handle_redirect(True)
self.br.set_handle_referer(True)
self.br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
self.br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),
max_time=1)
# Want debugging messages?
if debug:
self.br.set_debug_http(True)
self.br.set_debug_redirects(True)
self.br.set_debug_responses(True)
self.user_agent = 'Neoteris HC Http'
self.br.addheaders = [('User-agent', self.user_agent)]
def find_cookie(self, name):
for cookie in self.cj:
if cookie.name == name:
return cookie
return None
def set_cookie(self, name, value):
cookie = cookielib.Cookie(version=0, name=name, value=value,
port=None, port_specified=False, domain=self.vpn_host,
domain_specified=True, domain_initial_dot=False, path=self.path,
path_specified=True, secure=True, expires=None, discard=True,
comment=None, comment_url=None, rest=None, rfc2109=False)
self.cj.set_cookie(cookie)
def parse_response(self):
# Read in key/token fields in HTTP response
response = dict()
last_key = ''
for line in self.r.readlines():
line = line.strip()
# Note that msg is too long and gets wrapped, handle it special
if last_key == 'msg' and len(line):
response['msg'] += line
else:
key = ''
try:
key, val = line.split('=', 1)
response[key] = val
except:
pass
last_key = key
return response
def parse_policy_response(self, msg_data):
# The decompressed data is HTMLish, decode it. The value="" of each
# tag is the data we want.
objs = []
class ParamHTMLParser(HTMLParser.HTMLParser):
def handle_starttag(self, tag, attrs):
if tag.lower() == 'param':
for key, value in attrs:
if key.lower() == 'value':
# It's made up of a bunch of key=value pairs separated
# by semicolons
d = dict()
for field in value.split(';'):
field = field.strip()
try:
key, value = field.split('=', 1)
d[key] = value
except:
pass
objs.append(d)
p = ParamHTMLParser()
p.feed(msg_data)
p.close()
return objs
def parse_funk_response(self, msg_data):
e = xml.etree.ElementTree.fromstring(msg_data)
req_certs = dict()
for cert in e.find('AttributeRequest').findall('CertData'):
dns = dict()
cert_id = cert.attrib['Id']
for attr in cert.findall('Attribute'):
name = attr.attrib['Name']
value = attr.attrib['Value']
attr_type = attr.attrib['Type']
if attr_type == 'DN':
dns[name] = dict(n.strip().split('=') for n in value.split(','))
else:
# Unknown attribute type
pass
req_certs[cert_id] = dns
return req_certs
def gen_funk_platform(self):
# We don't know if the xml parser on the other end is fully complaint,
# just format a string like it expects.
msg = "<FunkMessage VendorID='2636' ProductID='1' Version='1' Platform='%s' ClientType='Agentless'> " % self.platform
msg += "<ClientAttributes SequenceID='-1'> "
def add_attr(key, val):
return "<Attribute Name='%s' Value='%s' />" % (key, val)
msg += add_attr('Platform', self.platform)
if self.hostname:
msg += add_attr(self.hostname, 'NETBIOSName') # Reversed
for mac in self.mac_addrs:
msg += add_attr(mac, 'MACAddress') # Reversed
msg += "</ClientAttributes> </FunkMessage>"
return encode_0ce7(msg, MSG_FUNK_PLATFORM)
def gen_funk_present(self):
msg = "<FunkMessage VendorID='2636' ProductID='1' Version='1' Platform='%s' ClientType='Agentless'> " % self.platform
msg += "<Present SequenceID='0'></Present> </FunkMessage>"
return encode_0ce7(msg, MSG_FUNK)
def gen_funk_response(self, certs):
msg = "<FunkMessage VendorID='2636' ProductID='1' Version='1' Platform='%s' ClientType='Agentless'> " % self.platform
msg += "<ClientAttributes SequenceID='0'> "
msg += "<Attribute Name='Platform' Value='%s' />" % self.platform
for name, value in certs.iteritems():
msg += "<Attribute Name='%s' Value='%s' />" % (name, value.data.strip())
msg += "<Attribute Name='%s' Value='%s' />" % (name, value.data.strip())
msg += "</ClientAttributes> </FunkMessage>"
return encode_0ce7(msg, MSG_FUNK)
def gen_policy_request(self):
policy_blocks = collections.OrderedDict({
'policy_request': {
'message_version': '3'
},
'esap': {
'esap_version': 'NOT_AVAILABLE',
'fileinfo': 'NOT_AVAILABLE',
'has_file_versions': 'YES',
'needs_exact_sdk': 'YES',
'opswat_sdk_version': '3'
},
'system_info': {
'os_version': '2.6.2',
'sp_version': '0',
'hc_mode': 'userMode'
}
})
msg = ''
for policy_key, policy_val in policy_blocks.iteritems():
v = ''.join([ '%s=%s;' % (k, v) for k, v in policy_val.iteritems()])
msg += '<parameter name="%s" value="%s">' % (policy_key, v)
return encode_0ce7(msg, 0xa4c18)
def gen_policy_response(self, policy_objs):
# Make a set of policies
policies = set()
for entry in policy_objs:
if 'policy' in entry:
policies.add(entry['policy'])
# Try to determine on policy name whether the response should be OK
# or NOTOK. Default to OK if we don't know, this may need updating.
msg = ''
for policy in policies:
msg += '\npolicy:%s\nstatus:' % policy
if 'Unsupported' in policy or 'Deny' in policy:
msg += 'NOTOK\nerror:Unknown error'
elif 'Required' in policy:
msg += 'OK\n'
else:
# Default action
msg += 'OK\n'
return encode_0ce7(msg, MSG_POLICY)
def get_cookie(self, dspreauth=None, dssignin=None):
if dspreauth is None or dssignin is None:
self.r = self.br.open('https://' + self.vpn_host)
else:
try:
self.cj.set_cookie(dspreauth)
except:
self.set_cookie('DSPREAUTH', dspreauth)
try:
self.cj.set_cookie(dssignin)
except:
self.set_cookie('DSSIGNIN', dssignin)
inner = self.gen_policy_request()
inner += encode_0ce7('policy request\x00v4', MSG_POLICY)
if self.funk:
inner += self.gen_funk_platform()
inner += self.gen_funk_present()
msg_raw = encode_0013(encode_0ce4(inner) + encode_0ce5('Accept-Language: en') + encode_0cf3(1))
logging.debug('Sending packet -')
decode_packet(msg_raw)
post_attrs = {
'connID': '0',
'timestamp': '0',
'msg': base64.b64encode(msg_raw),
'firsttime': '1'
}
if self.deviceid:
post_attrs['deviceid'] = self.deviceid
post_data = ''.join([ '%s=%s;' % (k, v) for k, v in post_attrs.iteritems()])
self.r = self.br.open('https://' + self.vpn_host + self.path + 'hc/tnchcupdate.cgi', post_data)
# Parse the data returned into a key/value dict
response = self.parse_response()
# msg has the stuff we want, it's base64 encoded
logging.debug('Receiving packet -')
msg_raw = base64.b64decode(response['msg'])
_1, _2, msg_decoded = decode_packet(msg_raw)
# Within msg, there is a field of data
sub_strings = msg_decoded[0x0ce4][0][0x0ce7]
# Pull the data out of the 'value' key in the htmlish stuff returned
policy_objs = []
req_certs = dict()
for str_id, sub_str in sub_strings:
if str_id == MSG_POLICY:
policy_objs += self.parse_policy_response(sub_str)
elif str_id == MSG_FUNK:
req_certs = self.parse_funk_response(sub_str)
if debug:
for obj in policy_objs:
if 'policy' in obj:
logging.debug('policy %s', obj['policy'])
for key, val in obj.iteritems():
if key != 'policy':
logging.debug('\t%s %s', key, val)
# Try to locate the required certificates
certs = dict()
for cert_id, req_dns in req_certs.iteritems():
for cert in self.avail_certs:
fail = False
for dn_name, dn_vals in req_dns.iteritems():
for name, val in dn_vals.iteritems():
try:
if dn_name == 'IssuerDN':
assert val in cert.issuer[name]
else:
logging.warn('Unknown DN type %s', str(dn_name))
raise Exception()
except:
fail = True
break
if fail:
break
if not fail:
certs[cert_id] = cert
break
if cert_id not in certs:
logging.warn('Could not find certificate for %s', str(req_dns))
inner = ''
if certs:
inner += self.gen_funk_response(certs)
inner += self.gen_policy_response(policy_objs)
msg_raw = encode_0013(encode_0ce4(inner) + encode_0ce5('Accept-Language: en'))
logging.debug('Sending packet -')
decode_packet(msg_raw)
post_attrs = {
'connID': '1',
'msg': base64.b64encode(msg_raw),
'firsttime': '1'
}
post_data = ''.join([ '%s=%s;' % (k, v) for k, v in post_attrs.iteritems()])
self.r = self.br.open('https://' + self.vpn_host + self.path + 'hc/tnchcupdate.cgi', post_data)
# We have a new DSPREAUTH cookie
return self.find_cookie('DSPREAUTH')
class tncc_server(object):
def __init__(self, s, t):
self.sock = s
self.tncc = t
def process_cmd(self):
buf = sock.recv(1024).decode('ascii')
if not len(buf):
sys.exit(0)
cmd, buf = buf.split('\n', 1)
cmd = cmd.strip()
args = dict()
for n in buf.split('\n'):
n = n.strip()
if len(n):
key, val = n.strip().split('=', 1)
args[key] = val
if cmd == 'start':
cookie = self.tncc.get_cookie(args['Cookie'], args['DSSIGNIN'])
resp = '200\n3\n%s\n\n' % cookie.value
sock.send(resp.encode('ascii'))
elif cmd == 'setcookie':
# FIXME: Support for periodic updates
dsid_value = args['Cookie']
if __name__ == "__main__":
vpn_host = sys.argv[1]
funk = 'TNCC_FUNK' in os.environ and os.environ['TNCC_FUNK'] != '0'
platform = os.environ.get('TNCC_PLATFORM', platform.system() + ' ' + platform.release())
if 'TNCC_HWADDR' in os.environ:
mac_addrs = [n.strip() for n in os.environ['TNCC_HWADDR'].split(',')]
else:
mac_addrs = []
for iface in netifaces.interfaces():
try:
mac = netifaces.ifaddresses(iface)[netifaces.AF_LINK][0]['addr']
assert mac != '00:00:00:00:00:00'
mac_addrs.append(mac)
except:
pass
hostname = os.environ.get('TNCC_HOSTNAME', socket.gethostname())
certs = []
if 'TNCC_CERTS' in os.environ:
now = datetime.datetime.now()
for f in os.environ['TNCC_CERTS'].split(','):
cert = x509cert(f.strip())
if now < cert.not_before:
logging.warn('WARNING: %s is not yet valid', f)
if now > cert.not_after:
logging.warn('WARNING: %s is expired', f)
certs.append(cert)
# \HKEY_CURRENT_USER\Software\Juniper Networks\Device Id
device_id = os.environ.get('TNCC_DEVICE_ID')
t = tncc(vpn_host, device_id, funk, platform, hostname, mac_addrs, certs)
if len(sys.argv) == 4:
dspreauth_value = sys.argv[2]
dssignin_value = sys.argv[3]
'TNCC ', dspreauth_value, dssignin_value
print t.get_cookie(dspreauth, dssignin).value
else:
sock = socket.fromfd(0, socket.AF_UNIX, socket.SOCK_SEQPACKET)
server = tncc_server(sock, t)
while True:
server.process_cmd()
| lgpl-2.1 | 5,726,034,952,718,490,000 | 33.347352 | 125 | 0.539749 | false |
cjayb/mne-python | mne/datasets/tests/test_datasets.py | 6 | 6158 | import os
from os import path as op
import shutil
import zipfile
import sys
import pytest
from mne import datasets, read_labels_from_annot, write_labels_to_annot
from mne.datasets import testing
from mne.datasets._fsaverage.base import _set_montage_coreg_path
from mne.datasets.utils import _manifest_check_download
from mne.utils import (run_tests_if_main, requires_good_network, modified_env,
get_subjects_dir, ArgvSetter, _pl, use_log_level,
catch_logging, hashfunc)
subjects_dir = op.join(testing.data_path(download=False), 'subjects')
def test_datasets_basic(tmpdir):
"""Test simple dataset functions."""
# XXX 'hf_sef' and 'misc' do not conform to these standards
for dname in ('sample', 'somato', 'spm_face', 'testing', 'opm',
'bst_raw', 'bst_auditory', 'bst_resting', 'multimodal',
'bst_phantom_ctf', 'bst_phantom_elekta', 'kiloword',
'mtrf', 'phantom_4dbti',
'visual_92_categories', 'fieldtrip_cmc'):
if dname.startswith('bst'):
dataset = getattr(datasets.brainstorm, dname)
check_name = 'brainstorm.%s' % (dname,)
else:
dataset = getattr(datasets, dname)
check_name = dname
if dataset.data_path(download=False) != '':
assert isinstance(dataset.get_version(), str)
assert datasets.utils.has_dataset(check_name)
else:
assert dataset.get_version() is None
assert not datasets.utils.has_dataset(check_name)
print('%s: %s' % (dname, datasets.utils.has_dataset(check_name)))
tempdir = str(tmpdir)
# don't let it read from the config file to get the directory,
# force it to look for the default
with modified_env(**{'_MNE_FAKE_HOME_DIR': tempdir, 'SUBJECTS_DIR': None}):
assert (datasets.utils._get_path(None, 'foo', 'bar') ==
op.join(tempdir, 'mne_data'))
assert get_subjects_dir(None) is None
_set_montage_coreg_path()
sd = get_subjects_dir()
assert sd.endswith('MNE-fsaverage-data')
def _fake_fetch_file(url, destination, print_destination=False):
with open(destination, 'w') as fid:
fid.write(url)
@requires_good_network
def test_downloads(tmpdir):
"""Test dataset URL handling."""
# Try actually downloading a dataset
path = datasets._fake.data_path(path=str(tmpdir), update_path=False)
assert op.isfile(op.join(path, 'bar'))
assert datasets._fake.get_version() is None
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_good_network
def test_fetch_parcellations(tmpdir):
"""Test fetching parcellations."""
this_subjects_dir = str(tmpdir)
os.mkdir(op.join(this_subjects_dir, 'fsaverage'))
os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'label'))
os.mkdir(op.join(this_subjects_dir, 'fsaverage', 'surf'))
for hemi in ('lh', 'rh'):
shutil.copyfile(
op.join(subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi),
op.join(this_subjects_dir, 'fsaverage', 'surf', '%s.white' % hemi))
# speed up by prenteding we have one of them
with open(op.join(this_subjects_dir, 'fsaverage', 'label',
'lh.aparc_sub.annot'), 'wb'):
pass
datasets.fetch_aparc_sub_parcellation(subjects_dir=this_subjects_dir)
with ArgvSetter(('--accept-hcpmmp-license',)):
datasets.fetch_hcp_mmp_parcellation(subjects_dir=this_subjects_dir)
for hemi in ('lh', 'rh'):
assert op.isfile(op.join(this_subjects_dir, 'fsaverage', 'label',
'%s.aparc_sub.annot' % hemi))
# test our annot round-trips here
kwargs = dict(subject='fsaverage', hemi='both', sort=False,
subjects_dir=this_subjects_dir)
labels = read_labels_from_annot(parc='HCPMMP1', **kwargs)
write_labels_to_annot(
labels, parc='HCPMMP1_round',
table_name='./left.fsaverage164.label.gii', **kwargs)
orig = op.join(this_subjects_dir, 'fsaverage', 'label', 'lh.HCPMMP1.annot')
first = hashfunc(orig)
new = orig[:-6] + '_round.annot'
second = hashfunc(new)
assert first == second
_zip_fnames = ['foo/foo.txt', 'foo/bar.txt', 'foo/baz.txt']
def _fake_zip_fetch(url, fname, hash_):
with zipfile.ZipFile(fname, 'w') as zipf:
with zipf.open('foo/', 'w'):
pass
for fname in _zip_fnames:
with zipf.open(fname, 'w'):
pass
@pytest.mark.skipif(sys.version_info < (3, 6),
reason="writing zip files requires python3.6 or higher")
@pytest.mark.parametrize('n_have', range(len(_zip_fnames)))
def test_manifest_check_download(tmpdir, n_have, monkeypatch):
"""Test our manifest downloader."""
monkeypatch.setattr(datasets.utils, '_fetch_file', _fake_zip_fetch)
destination = op.join(str(tmpdir), 'empty')
manifest_path = op.join(str(tmpdir), 'manifest.txt')
with open(manifest_path, 'w') as fid:
for fname in _zip_fnames:
fid.write('%s\n' % fname)
assert n_have in range(len(_zip_fnames) + 1)
assert not op.isdir(destination)
if n_have > 0:
os.makedirs(op.join(destination, 'foo'))
assert op.isdir(op.join(destination, 'foo'))
for fname in _zip_fnames:
assert not op.isfile(op.join(destination, fname))
for fname in _zip_fnames[:n_have]:
with open(op.join(destination, fname), 'w'):
pass
with catch_logging() as log:
with use_log_level(True):
url = hash_ = '' # we mock the _fetch_file so these are not used
_manifest_check_download(manifest_path, destination, url, hash_)
log = log.getvalue()
n_missing = 3 - n_have
assert ('%d file%s missing from' % (n_missing, _pl(n_missing))) in log
for want in ('Extracting missing', 'Successfully '):
if n_missing > 0:
assert want in log
else:
assert want not in log
assert op.isdir(destination)
for fname in _zip_fnames:
assert op.isfile(op.join(destination, fname))
run_tests_if_main()
| bsd-3-clause | -5,556,583,623,025,122,000 | 38.474359 | 79 | 0.620656 | false |
wufangjie/leetcode | 640. Solve the Equation.py | 1 | 1269 | class Solution(object):
def solveEquation(self, equation):
"""
:type equation: str
:rtype: str
"""
left, right = equation.split('=')
a1, b1 = self._parse(left)
a2, b2 = self._parse(right)
if a1 == a2:
if b1 == b2:
return "Infinite solutions"
else:
return "No solution"
else:
return 'x={}'.format((b2 - b1) // (a1 - a2))
@staticmethod
def _parse(experession):
ret = [0, 0]
pre, pre_idx, add_idx, sign = 0, 0, 1, '+'
for i, c in enumerate(experession + '+'):
if c in ('+', '-'):
ret[add_idx] += pre if sign == '+' else -pre
pre, pre_idx, add_idx, sign = 0, i + 1, 1, c
elif c == 'x':
add_idx = 0
if pre_idx == i:
pre = 1
else:
pre = pre * 10 + int(c)
return ret
assert Solution().solveEquation("x+5-3+x=6+x-2") == "x=2"
assert Solution().solveEquation("x=x") == "Infinite solutions"
assert Solution().solveEquation("2x=x") == "x=0"
assert Solution().solveEquation("x=x+2") == "No solution"
assert Solution().solveEquation("2x+3x-6x=x+2") == "x=-1"
| gpl-3.0 | -166,488,784,348,331,420 | 31.538462 | 62 | 0.463357 | false |
Pointedstick/ReplicatorG | skein_engines/skeinforge-35/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/gts.py | 6 | 4607 | """
This page is in the table of contents.
The gts.py script is an import translator plugin to get a carving from an gts file.
An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name.
The getCarving function takes the file name of an gts file and returns the carving.
The GNU Triangulated Surface (.gts) format is described at:
http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE
Quoted from http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE
"All the lines beginning with GTS_COMMENTS (#!) are ignored. The first line contains three unsigned integers separated by spaces. The first integer is the number of vertexes, nv, the second is the number of edges, ne and the third is the number of faces, nf.
Follows nv lines containing the x, y and z coordinates of the vertexes. Follows ne lines containing the two indices (starting from one) of the vertexes of each edge. Follows nf lines containing the three ordered indices (also starting from one) of the edges of each face.
The format described above is the least common denominator to all GTS files. Consistent with an object-oriented approach, the GTS file format is extensible. Each of the lines of the file can be extended with user-specific attributes accessible through the read() and write() virtual methods of each of the objects written (surface, vertexes, edges or faces). When read with different object classes, these extra attributes are just ignored."
This example gets a carving for the gts file Screw Holder Bottom.gts. This example is run in a terminal in the folder which contains Screw Holder Bottom.gts and gts.py.
> python
Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31)
[GCC 4.2.1 (SUSE Linux)] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import gts
>>> gts.getCarving()
[11.6000003815, 10.6837882996, 7.80209827423
..
many more lines of the carving
..
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.geometry_tools import face
from fabmetheus_utilities.geometry.solids import trianglemesh
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import gcodec
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GPL 3.0'
def getFromGNUTriangulatedSurfaceText( gnuTriangulatedSurfaceText, triangleMesh ):
"Initialize from a GNU Triangulated Surface Text."
if gnuTriangulatedSurfaceText == '':
return None
lines = archive.getTextLines( gnuTriangulatedSurfaceText )
linesWithoutComments = []
for line in lines:
if len(line) > 0:
firstCharacter = line[0]
if firstCharacter != '#' and firstCharacter != '!':
linesWithoutComments.append(line)
splitLine = linesWithoutComments[0].split()
numberOfVertexes = int( splitLine[0] )
numberOfEdges = int(splitLine[1])
numberOfFaces = int( splitLine[2] )
faceTriples = []
for vertexIndex in xrange( numberOfVertexes ):
line = linesWithoutComments[ vertexIndex + 1 ]
splitLine = line.split()
vertex = Vector3( float( splitLine[0] ), float(splitLine[1]), float( splitLine[2] ) )
triangleMesh.vertexes.append(vertex)
edgeStart = numberOfVertexes + 1
for edgeIndex in xrange( numberOfEdges ):
line = linesWithoutComments[ edgeIndex + edgeStart ]
splitLine = line.split()
vertexIndexes = []
for word in splitLine[ : 2 ]:
vertexIndexes.append( int(word) - 1 )
edge = face.Edge().getFromVertexIndexes( edgeIndex, vertexIndexes )
triangleMesh.edges.append( edge )
faceStart = edgeStart + numberOfEdges
for faceIndex in xrange( numberOfFaces ):
line = linesWithoutComments[ faceIndex + faceStart ]
splitLine = line.split()
edgeIndexes = []
for word in splitLine[ : 3 ]:
edgeIndexes.append( int(word) - 1 )
triangleMesh.faces.append( face.Face().getFromEdgeIndexes( edgeIndexes, triangleMesh.edges, faceIndex ) )
return triangleMesh
def getCarving(fileName):
"Get the carving for the gts file."
return getFromGNUTriangulatedSurfaceText( archive.getFileText(fileName), trianglemesh.TriangleMesh() )
| gpl-2.0 | -3,633,897,840,715,864,600 | 48.010638 | 441 | 0.763186 | false |
zooba/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/cp852.py | 593 | 35258 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
u'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
u'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
u'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
u'\xac' # 0x00aa -> NOT SIGN
u'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
u'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
u'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
u'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
u'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
u'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
u'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
u'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
u'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
u'\u02db' # 0x00f2 -> OGONEK
u'\u02c7' # 0x00f3 -> CARON
u'\u02d8' # 0x00f4 -> BREVE
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\u02d9' # 0x00fa -> DOT ABOVE
u'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
u'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 | 3,126,205,067,138,561,500 | 49.512894 | 97 | 0.608231 | false |
arangodb/arangodb | 3rdParty/rocksdb/6.8/tools/advisor/advisor/db_options_parser.py | 14 | 16518 | # Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
import copy
from advisor.db_log_parser import DataSource, NO_COL_FAMILY
from advisor.ini_parser import IniParser
import os
class OptionsSpecParser(IniParser):
@staticmethod
def is_new_option(line):
return '=' in line
@staticmethod
def get_section_type(line):
'''
Example section header: [TableOptions/BlockBasedTable "default"]
Here ConfigurationOptimizer returned would be
'TableOptions.BlockBasedTable'
'''
section_path = line.strip()[1:-1].split()[0]
section_type = '.'.join(section_path.split('/'))
return section_type
@staticmethod
def get_section_name(line):
# example: get_section_name('[CFOptions "default"]')
token_list = line.strip()[1:-1].split('"')
# token_list = ['CFOptions', 'default', '']
if len(token_list) < 3:
return None
return token_list[1] # return 'default'
@staticmethod
def get_section_str(section_type, section_name):
# Example:
# Case 1: get_section_str('DBOptions', NO_COL_FAMILY)
# Case 2: get_section_str('TableOptions.BlockBasedTable', 'default')
section_type = '/'.join(section_type.strip().split('.'))
# Case 1: section_type = 'DBOptions'
# Case 2: section_type = 'TableOptions/BlockBasedTable'
section_str = '[' + section_type
if section_name == NO_COL_FAMILY:
# Case 1: '[DBOptions]'
return (section_str + ']')
else:
# Case 2: '[TableOptions/BlockBasedTable "default"]'
return section_str + ' "' + section_name + '"]'
@staticmethod
def get_option_str(key, values):
option_str = key + '='
# get_option_str('db_log_dir', None), returns 'db_log_dir='
if values:
# example:
# get_option_str('max_bytes_for_level_multiplier_additional',
# [1,1,1,1,1,1,1]), returned string:
# 'max_bytes_for_level_multiplier_additional=1:1:1:1:1:1:1'
if isinstance(values, list):
for value in values:
option_str += (str(value) + ':')
option_str = option_str[:-1]
else:
# example: get_option_str('write_buffer_size', 1048576)
# returned string: 'write_buffer_size=1048576'
option_str += str(values)
return option_str
class DatabaseOptions(DataSource):
@staticmethod
def is_misc_option(option_name):
# these are miscellaneous options that are not yet supported by the
# Rocksdb options file, hence they are not prefixed with any section
# name
return '.' not in option_name
@staticmethod
def get_options_diff(opt_old, opt_new):
# type: Dict[option, Dict[col_fam, value]] X 2 ->
# Dict[option, Dict[col_fam, Tuple(old_value, new_value)]]
# note: diff should contain a tuple of values only if they are
# different from each other
options_union = set(opt_old.keys()).union(set(opt_new.keys()))
diff = {}
for opt in options_union:
diff[opt] = {}
# if option in options_union, then it must be in one of the configs
if opt not in opt_old:
for col_fam in opt_new[opt]:
diff[opt][col_fam] = (None, opt_new[opt][col_fam])
elif opt not in opt_new:
for col_fam in opt_old[opt]:
diff[opt][col_fam] = (opt_old[opt][col_fam], None)
else:
for col_fam in opt_old[opt]:
if col_fam in opt_new[opt]:
if opt_old[opt][col_fam] != opt_new[opt][col_fam]:
diff[opt][col_fam] = (
opt_old[opt][col_fam],
opt_new[opt][col_fam]
)
else:
diff[opt][col_fam] = (opt_old[opt][col_fam], None)
for col_fam in opt_new[opt]:
if col_fam in opt_old[opt]:
if opt_old[opt][col_fam] != opt_new[opt][col_fam]:
diff[opt][col_fam] = (
opt_old[opt][col_fam],
opt_new[opt][col_fam]
)
else:
diff[opt][col_fam] = (None, opt_new[opt][col_fam])
if not diff[opt]:
diff.pop(opt)
return diff
def __init__(self, rocksdb_options, misc_options=None):
super().__init__(DataSource.Type.DB_OPTIONS)
# The options are stored in the following data structure:
# Dict[section_type, Dict[section_name, Dict[option_name, value]]]
self.options_dict = None
self.column_families = None
# Load the options from the given file to a dictionary.
self.load_from_source(rocksdb_options)
# Setup the miscellaneous options expected to be List[str], where each
# element in the List has the format "<option_name>=<option_value>"
# These options are the ones that are not yet supported by the Rocksdb
# OPTIONS file, so they are provided separately
self.setup_misc_options(misc_options)
def setup_misc_options(self, misc_options):
self.misc_options = {}
if misc_options:
for option_pair_str in misc_options:
option_name = option_pair_str.split('=')[0].strip()
option_value = option_pair_str.split('=')[1].strip()
self.misc_options[option_name] = option_value
def load_from_source(self, options_path):
self.options_dict = {}
with open(options_path, 'r') as db_options:
for line in db_options:
line = OptionsSpecParser.remove_trailing_comment(line)
if not line:
continue
if OptionsSpecParser.is_section_header(line):
curr_sec_type = (
OptionsSpecParser.get_section_type(line)
)
curr_sec_name = OptionsSpecParser.get_section_name(line)
if curr_sec_type not in self.options_dict:
self.options_dict[curr_sec_type] = {}
if not curr_sec_name:
curr_sec_name = NO_COL_FAMILY
self.options_dict[curr_sec_type][curr_sec_name] = {}
# example: if the line read from the Rocksdb OPTIONS file
# is [CFOptions "default"], then the section type is
# CFOptions and 'default' is the name of a column family
# that for this database, so it's added to the list of
# column families stored in this object
if curr_sec_type == 'CFOptions':
if not self.column_families:
self.column_families = []
self.column_families.append(curr_sec_name)
elif OptionsSpecParser.is_new_option(line):
key, value = OptionsSpecParser.get_key_value_pair(line)
self.options_dict[curr_sec_type][curr_sec_name][key] = (
value
)
else:
error = 'Not able to parse line in Options file.'
OptionsSpecParser.exit_with_parse_error(line, error)
def get_misc_options(self):
# these are options that are not yet supported by the Rocksdb OPTIONS
# file, hence they are provided and stored separately
return self.misc_options
def get_column_families(self):
return self.column_families
def get_all_options(self):
# This method returns all the options that are stored in this object as
# a: Dict[<sec_type>.<option_name>: Dict[col_fam, option_value]]
all_options = []
# Example: in the section header '[CFOptions "default"]' read from the
# OPTIONS file, sec_type='CFOptions'
for sec_type in self.options_dict:
for col_fam in self.options_dict[sec_type]:
for opt_name in self.options_dict[sec_type][col_fam]:
option = sec_type + '.' + opt_name
all_options.append(option)
all_options.extend(list(self.misc_options.keys()))
return self.get_options(all_options)
def get_options(self, reqd_options):
# type: List[str] -> Dict[str, Dict[str, Any]]
# List[option] -> Dict[option, Dict[col_fam, value]]
reqd_options_dict = {}
for option in reqd_options:
if DatabaseOptions.is_misc_option(option):
# the option is not prefixed by '<section_type>.' because it is
# not yet supported by the Rocksdb OPTIONS file; so it has to
# be fetched from the misc_options dictionary
if option not in self.misc_options:
continue
if option not in reqd_options_dict:
reqd_options_dict[option] = {}
reqd_options_dict[option][NO_COL_FAMILY] = (
self.misc_options[option]
)
else:
# Example: option = 'TableOptions.BlockBasedTable.block_align'
# then, sec_type = 'TableOptions.BlockBasedTable'
sec_type = '.'.join(option.split('.')[:-1])
# opt_name = 'block_align'
opt_name = option.split('.')[-1]
if sec_type not in self.options_dict:
continue
for col_fam in self.options_dict[sec_type]:
if opt_name in self.options_dict[sec_type][col_fam]:
if option not in reqd_options_dict:
reqd_options_dict[option] = {}
reqd_options_dict[option][col_fam] = (
self.options_dict[sec_type][col_fam][opt_name]
)
return reqd_options_dict
def update_options(self, options):
# An example 'options' object looks like:
# {'DBOptions.max_background_jobs': {NO_COL_FAMILY: 2},
# 'CFOptions.write_buffer_size': {'default': 1048576, 'cf_A': 128000},
# 'bloom_bits': {NO_COL_FAMILY: 4}}
for option in options:
if DatabaseOptions.is_misc_option(option):
# this is a misc_option i.e. an option that is not yet
# supported by the Rocksdb OPTIONS file, so it is not prefixed
# by '<section_type>.' and must be stored in the separate
# misc_options dictionary
if NO_COL_FAMILY not in options[option]:
print(
'WARNING(DatabaseOptions.update_options): not ' +
'updating option ' + option + ' because it is in ' +
'misc_option format but its scope is not ' +
NO_COL_FAMILY + '. Check format of option.'
)
continue
self.misc_options[option] = options[option][NO_COL_FAMILY]
else:
sec_name = '.'.join(option.split('.')[:-1])
opt_name = option.split('.')[-1]
if sec_name not in self.options_dict:
self.options_dict[sec_name] = {}
for col_fam in options[option]:
# if the option is not already present in the dictionary,
# it will be inserted, else it will be updated to the new
# value
if col_fam not in self.options_dict[sec_name]:
self.options_dict[sec_name][col_fam] = {}
self.options_dict[sec_name][col_fam][opt_name] = (
copy.deepcopy(options[option][col_fam])
)
def generate_options_config(self, nonce):
# this method generates a Rocksdb OPTIONS file in the INI format from
# the options stored in self.options_dict
this_path = os.path.abspath(os.path.dirname(__file__))
file_name = '../temp/OPTIONS_' + str(nonce) + '.tmp'
file_path = os.path.join(this_path, file_name)
with open(file_path, 'w') as fp:
for section in self.options_dict:
for col_fam in self.options_dict[section]:
fp.write(
OptionsSpecParser.get_section_str(section, col_fam) +
'\n'
)
for option in self.options_dict[section][col_fam]:
values = self.options_dict[section][col_fam][option]
fp.write(
OptionsSpecParser.get_option_str(option, values) +
'\n'
)
fp.write('\n')
return file_path
def check_and_trigger_conditions(self, conditions):
for cond in conditions:
reqd_options_dict = self.get_options(cond.options)
# This contains the indices of options that are specific to some
# column family and are not database-wide options.
incomplete_option_ix = []
options = []
missing_reqd_option = False
for ix, option in enumerate(cond.options):
if option not in reqd_options_dict:
print(
'WARNING(DatabaseOptions.check_and_trigger): ' +
'skipping condition ' + cond.name + ' because it '
'requires option ' + option + ' but this option is' +
' not available'
)
missing_reqd_option = True
break # required option is absent
if NO_COL_FAMILY in reqd_options_dict[option]:
options.append(reqd_options_dict[option][NO_COL_FAMILY])
else:
options.append(None)
incomplete_option_ix.append(ix)
if missing_reqd_option:
continue
# if all the options are database-wide options
if not incomplete_option_ix:
try:
if eval(cond.eval_expr):
cond.set_trigger({NO_COL_FAMILY: options})
except Exception as e:
print(
'WARNING(DatabaseOptions) check_and_trigger:' + str(e)
)
continue
# for all the options that are not database-wide, we look for their
# values specific to column families
col_fam_options_dict = {}
for col_fam in self.column_families:
present = True
for ix in incomplete_option_ix:
option = cond.options[ix]
if col_fam not in reqd_options_dict[option]:
present = False
break
options[ix] = reqd_options_dict[option][col_fam]
if present:
try:
if eval(cond.eval_expr):
col_fam_options_dict[col_fam] = (
copy.deepcopy(options)
)
except Exception as e:
print(
'WARNING(DatabaseOptions) check_and_trigger: ' +
str(e)
)
# Trigger for an OptionCondition object is of the form:
# Dict[col_fam_name: List[option_value]]
# where col_fam_name is the name of a column family for which
# 'eval_expr' evaluated to True and List[option_value] is the list
# of values of the options specified in the condition's 'options'
# field
if col_fam_options_dict:
cond.set_trigger(col_fam_options_dict)
| apache-2.0 | 3,116,506,955,234,241,500 | 45.139665 | 79 | 0.51792 | false |
vlaznev/gui_plotter | plot_window/color_picker.py | 1 | 1419 | __author__ = 'user'
from PyQt4 import Qt, QtGui
class QColorComboBox(QtGui.QComboBox):
ColorNames = ["darkGreen", "green", "gray", "red", "white", "blue", "cyan", "darkMagenta", "yellow",
"darkRed", "black", "magenta"]
ColorRole = Qt.Qt.UserRole + 1
def __init__(self, parent=None):
super(QColorComboBox, self).__init__(parent)
self.fillColors()
def fillColors(self):
size = self.style().pixelMetric(QtGui.QStyle.PM_SmallIconSize)
pixmap = QtGui.QPixmap(size, size)
idx = 0
for colorName in self.ColorNames:
color = QtGui.QColor(colorName)
self.addItem(colorName, QtGui.QColor(idx))
pixmap.fill(color)
self.setItemData(idx, pixmap, Qt.Qt.DecorationRole)
self.setItemData(idx, color, self.ColorRole)
idx += 1
def currentColor(self):
idx = self.currentIndex()
if idx >= 0:
return self.itemData(idx, self.ColorRole)
else:
return None
def color(self, index):
return self.itemData(index, self.ColorRole)
def setCurrentColor(self, color):
colorObject = QtGui.QColor(color)
for idx in xrange(self.count()):
if colorObject == self.color(idx):
self.setCurrentIndex(idx)
return
raise ValueError("Color not found: " + str(color))
| mit | -48,099,799,798,734,580 | 30.533333 | 104 | 0.582805 | false |
Fritz449/SRLF | models/feed_forward.py | 1 | 5832 | import tensorflow as tf
import os
import sys
sys.path.append(os.path.realpath(".."))
from helpers.layers import denselayer
from models.base_model import BaseModel
import numpy as np
class FeedForward(BaseModel):
def __init__(self, sess, args):
BaseModel.__init__(self, sess)
self.n_hiddens = args['n_hiddens']
self.n_features = args['n_features']
self.critic = args.get('critic')
self.nonlinearity = args.get('nonlin', tf.nn.tanh)
self.state_input = tf.placeholder(tf.float32, shape=(None, self.n_features))
self.value_weights = []
self.value_weights_phs = []
self.create_network()
def create_network(self):
input = self.state_input
mean = tf.get_variable("means", shape=(1, int(input.get_shape()[1])), initializer=tf.constant_initializer(0),
trainable=False)
std = tf.get_variable("stds", shape=(1, int(input.get_shape()[1])), initializer=tf.constant_initializer(1),
trainable=False)
mean_ph = tf.placeholder(tf.float32, shape=mean.get_shape())
std_ph = tf.placeholder(tf.float32, shape=std.get_shape())
self.norm_set_op = [mean.assign(mean_ph), std.assign(std_ph)]
self.norm_phs = [mean_ph, std_ph]
hidden = (input - mean) / (std + 1e-5)
hidden = tf.clip_by_value(hidden, -20, 20)
self.hidden = hidden
for index, n_hidden in enumerate(self.n_hiddens):
hidden, weights = denselayer("hidden_{}".format(index), hidden, n_hidden, self.nonlinearity)
self.weights += weights
self.weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
self.hidden = hidden
hidden = self.hidden
self.value, weights = denselayer("value", hidden, 1)
self.value = tf.reshape(self.value, [-1])
self.value_weights += weights
self.value_weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
class FFDiscrete(FeedForward):
def __init__(self, sess, args):
FeedForward.__init__(self, sess, args)
self.n_actions = args['n_actions']
self.create_output()
for weight, ph in zip(self.weights, self.weights_phs):
self.set_op.append(weight.assign(ph))
def create_output(self):
self.action_probs = []
self.action_logprobs = []
for index, n in enumerate(self.n_actions):
log_probs, weights = denselayer("lob_probs_{}".format(index), self.hidden, n, tf.nn.log_softmax)
self.action_logprobs.append(log_probs)
self.weights += weights
self.weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
self.action_probs.append(tf.exp(self.action_logprobs[index]))
self.sess.run(tf.global_variables_initializer())
def act(self, obs, exploration=True, return_dists=False):
log_probs = self.sess.run(self.action_logprobs, feed_dict={self.state_input: obs})
actions = np.zeros(shape=(len(log_probs),), dtype=np.int32)
for i in range(len(log_probs, )):
if not exploration:
actions[i] = np.argmax(log_probs[i][0])
continue
actions[i] = np.random.choice(np.arange(self.n_actions[i], dtype=np.int32), p=np.exp(log_probs[i][0]))
if return_dists:
return actions, log_probs
return actions
class FFContinuous(FeedForward):
def __init__(self, sess, args):
FeedForward.__init__(self, sess, args)
self.n_actions = args['n_actions']
self.std = args.get('std', "Const")
self.init_log_std = args.get('init_log_std', 0)
self.create_output()
for weight, ph in zip(self.weights, self.weights_phs):
self.set_op.append(weight.assign(ph))
def create_output(self):
self.action_means, weights = denselayer("means", self.hidden, len(self.n_actions))
self.weights += weights
self.weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
if self.std == "Const":
self.action_log_stds = tf.get_variable("std", shape=(1, len(self.n_actions),),
initializer=tf.constant_initializer(self.init_log_std),
trainable=False)
self.action_stds = tf.exp(self.action_log_stds)
elif self.std == "Param":
self.action_log_stds = tf.get_variable("std", shape=(1, len(self.n_actions)),
initializer=tf.constant_initializer(self.init_log_std))
self.action_stds = tf.exp(self.action_log_stds)
self.weights.append(self.action_log_stds)
self.weights_phs.append(tf.placeholder(tf.float32, shape=(1, len(self.n_actions))))
elif self.std == "Train":
self.action_stds, weights = denselayer("stds", self.hidden, len(self.n_actions), tf.exp)
self.weights += weights
self.weights_phs += [tf.placeholder(tf.float32, shape=w.get_shape()) for w in weights]
else:
raise Exception
self.sess.run(tf.global_variables_initializer())
def act(self, obs, exploration=True, return_dists=False):
means, stds = self.sess.run([self.action_means, self.action_stds], feed_dict={self.state_input: obs})
means = means[0]
stds = stds[0]
if not exploration:
return means
actions = np.zeros(shape=means.shape)
for i in range(actions.shape[0]):
actions[i] = np.random.normal(means[i], stds[i])
if return_dists:
return actions, [means, stds]
return actions
| apache-2.0 | -8,037,661,740,209,013,000 | 42.522388 | 117 | 0.592078 | false |
Stefan-Schmidt/linux-wpan-next | tools/power/pm-graph/analyze_boot.py | 84 | 31499 | #!/usr/bin/python
#
# Tool for analyzing boot timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <[email protected]>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's boot time. It creates an html representation of
# the kernel boot timeline up to the start of the init process.
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
import shutil
from datetime import datetime, timedelta
from subprocess import call, Popen, PIPE
import analyze_suspend as aslib
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues(aslib.SystemValues):
title = 'BootGraph'
version = '2.1'
hostname = 'localhost'
testtime = ''
kernel = ''
dmesgfile = ''
ftracefile = ''
htmlfile = 'bootgraph.html'
outfile = ''
testdir = ''
testdirprefix = 'boot'
embedded = False
testlog = False
dmesglog = False
ftracelog = False
useftrace = False
usecallgraph = False
usedevsrc = True
suspendmode = 'boot'
max_graph_depth = 2
graph_filter = 'do_one_initcall'
reboot = False
manual = False
iscronjob = False
timeformat = '%.6f'
bootloader = 'grub'
blexec = []
def __init__(self):
if('LOG_FILE' in os.environ and 'TEST_RESULTS_IDENTIFIER' in os.environ):
self.embedded = True
self.dmesglog = True
self.outfile = os.environ['LOG_FILE']
self.htmlfile = os.environ['LOG_FILE']
self.hostname = platform.node()
self.testtime = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if os.path.exists('/proc/version'):
fp = open('/proc/version', 'r')
val = fp.read().strip()
fp.close()
self.kernel = self.kernelVersion(val)
else:
self.kernel = 'unknown'
self.testdir = datetime.now().strftime('boot-%y%m%d-%H%M%S')
def kernelVersion(self, msg):
return msg.split()[2]
def checkFtraceKernelVersion(self):
val = tuple(map(int, self.kernel.split('-')[0].split('.')))
if val >= (4, 10, 0):
return True
return False
def kernelParams(self):
cmdline = 'initcall_debug log_buf_len=32M'
if self.useftrace:
if self.cpucount > 0:
bs = min(self.memtotal / 2, 2*1024*1024) / self.cpucount
else:
bs = 131072
cmdline += ' trace_buf_size=%dK trace_clock=global '\
'trace_options=nooverwrite,funcgraph-abstime,funcgraph-cpu,'\
'funcgraph-duration,funcgraph-proc,funcgraph-tail,'\
'nofuncgraph-overhead,context-info,graph-time '\
'ftrace=function_graph '\
'ftrace_graph_max_depth=%d '\
'ftrace_graph_filter=%s' % \
(bs, self.max_graph_depth, self.graph_filter)
return cmdline
def setGraphFilter(self, val):
master = self.getBootFtraceFilterFunctions()
fs = ''
for i in val.split(','):
func = i.strip()
if func == '':
doError('badly formatted filter function string')
if '[' in func or ']' in func:
doError('loadable module functions not allowed - "%s"' % func)
if ' ' in func:
doError('spaces found in filter functions - "%s"' % func)
if func not in master:
doError('function "%s" not available for ftrace' % func)
if not fs:
fs = func
else:
fs += ','+func
if not fs:
doError('badly formatted filter function string')
self.graph_filter = fs
def getBootFtraceFilterFunctions(self):
self.rootCheck(True)
fp = open(self.tpath+'available_filter_functions')
fulllist = fp.read().split('\n')
fp.close()
list = []
for i in fulllist:
if not i or ' ' in i or '[' in i or ']' in i:
continue
list.append(i)
return list
def myCronJob(self, line):
if '@reboot' not in line:
return False
if 'bootgraph' in line or 'analyze_boot.py' in line or '-cronjob' in line:
return True
return False
def cronjobCmdString(self):
cmdline = '%s -cronjob' % os.path.abspath(sys.argv[0])
args = iter(sys.argv[1:])
for arg in args:
if arg in ['-h', '-v', '-cronjob', '-reboot']:
continue
elif arg in ['-o', '-dmesg', '-ftrace', '-func']:
args.next()
continue
cmdline += ' '+arg
if self.graph_filter != 'do_one_initcall':
cmdline += ' -func "%s"' % self.graph_filter
cmdline += ' -o "%s"' % os.path.abspath(self.testdir)
return cmdline
def manualRebootRequired(self):
cmdline = self.kernelParams()
print 'To generate a new timeline manually, follow these steps:\n'
print '1. Add the CMDLINE string to your kernel command line.'
print '2. Reboot the system.'
print '3. After reboot, re-run this tool with the same arguments but no command (w/o -reboot or -manual).\n'
print 'CMDLINE="%s"' % cmdline
sys.exit()
def getExec(self, cmd):
dirlist = ['/sbin', '/bin', '/usr/sbin', '/usr/bin',
'/usr/local/sbin', '/usr/local/bin']
for path in dirlist:
cmdfull = os.path.join(path, cmd)
if os.path.exists(cmdfull):
return cmdfull
return ''
def blGrub(self):
blcmd = ''
for cmd in ['update-grub', 'grub-mkconfig', 'grub2-mkconfig']:
if blcmd:
break
blcmd = self.getExec(cmd)
if not blcmd:
doError('[GRUB] missing update command')
if not os.path.exists('/etc/default/grub'):
doError('[GRUB] missing /etc/default/grub')
if 'grub2' in blcmd:
cfg = '/boot/grub2/grub.cfg'
else:
cfg = '/boot/grub/grub.cfg'
if not os.path.exists(cfg):
doError('[GRUB] missing %s' % cfg)
if 'update-grub' in blcmd:
self.blexec = [blcmd]
else:
self.blexec = [blcmd, '-o', cfg]
def getBootLoader(self):
if self.bootloader == 'grub':
self.blGrub()
else:
doError('unknown boot loader: %s' % self.bootloader)
sysvals = SystemValues()
# Class: Data
# Description:
# The primary container for test data.
class Data(aslib.Data):
dmesg = {} # root data structure
start = 0.0 # test start
end = 0.0 # test end
dmesgtext = [] # dmesg text file in memory
testnumber = 0
idstr = ''
html_device_id = 0
valid = False
tUserMode = 0.0
boottime = ''
phases = ['kernel', 'user']
do_one_initcall = False
def __init__(self, num):
self.testnumber = num
self.idstr = 'a'
self.dmesgtext = []
self.dmesg = {
'kernel': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0,
'order': 0, 'color': 'linear-gradient(to bottom, #fff, #bcf)'},
'user': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0,
'order': 1, 'color': '#fff'}
}
def deviceTopology(self):
return ''
def newAction(self, phase, name, pid, start, end, ret, ulen):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end,
'pid': pid, 'length': length, 'row': 0, 'id': devid,
'ret': ret, 'ulen': ulen }
return name
def deviceMatch(self, pid, cg):
if cg.end - cg.start == 0:
return True
for p in data.phases:
list = self.dmesg[p]['list']
for devname in list:
dev = list[devname]
if pid != dev['pid']:
continue
if cg.name == 'do_one_initcall':
if(cg.start <= dev['start'] and cg.end >= dev['end'] and dev['length'] > 0):
dev['ftrace'] = cg
self.do_one_initcall = True
return True
else:
if(cg.start > dev['start'] and cg.end < dev['end']):
if 'ftraces' not in dev:
dev['ftraces'] = []
dev['ftraces'].append(cg)
return True
return False
# ----------------- FUNCTIONS --------------------
# Function: parseKernelLog
# Description:
# parse a kernel log for boot data
def parseKernelLog():
phase = 'kernel'
data = Data(0)
data.dmesg['kernel']['start'] = data.start = ktime = 0.0
sysvals.stamp = {
'time': datetime.now().strftime('%B %d %Y, %I:%M:%S %p'),
'host': sysvals.hostname,
'mode': 'boot', 'kernel': ''}
tp = aslib.TestProps()
devtemp = dict()
if(sysvals.dmesgfile):
lf = open(sysvals.dmesgfile, 'r')
else:
lf = Popen('dmesg', stdout=PIPE).stdout
for line in lf:
line = line.replace('\r\n', '')
# grab the stamp and sysinfo
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if(ktime > 120):
break
msg = m.group('msg')
data.dmesgtext.append(line)
if(ktime == 0.0 and re.match('^Linux version .*', msg)):
if(not sysvals.stamp['kernel']):
sysvals.stamp['kernel'] = sysvals.kernelVersion(msg)
continue
m = re.match('.* setting system clock to (?P<t>.*) UTC.*', msg)
if(m):
bt = datetime.strptime(m.group('t'), '%Y-%m-%d %H:%M:%S')
bt = bt - timedelta(seconds=int(ktime))
data.boottime = bt.strftime('%Y-%m-%d_%H:%M:%S')
sysvals.stamp['time'] = bt.strftime('%B %d %Y, %I:%M:%S %p')
continue
m = re.match('^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
if(m):
func = m.group('f')
pid = int(m.group('p'))
devtemp[func] = (ktime, pid)
continue
m = re.match('^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
if(m):
data.valid = True
data.end = ktime
f, r, t = m.group('f', 'r', 't')
if(f in devtemp):
start, pid = devtemp[f]
data.newAction(phase, f, pid, start, ktime, int(r), int(t))
del devtemp[f]
continue
if(re.match('^Freeing unused kernel memory.*', msg)):
data.tUserMode = ktime
data.dmesg['kernel']['end'] = ktime
data.dmesg['user']['start'] = ktime
phase = 'user'
if tp.stamp:
sysvals.stamp = 0
tp.parseStamp(data, sysvals)
data.dmesg['user']['end'] = data.end
lf.close()
return data
# Function: parseTraceLog
# Description:
# Check if trace is available and copy to a temp file
def parseTraceLog(data):
# parse the trace log
ftemp = dict()
tp = aslib.TestProps()
tp.setTracerType('function_graph')
tf = open(sysvals.ftracefile, 'r')
for line in tf:
if line[0] == '#':
continue
m = re.match(tp.ftrace_line_fmt, line.strip())
if(not m):
continue
m_time, m_proc, m_pid, m_msg, m_dur = \
m.group('time', 'proc', 'pid', 'msg', 'dur')
if float(m_time) > data.end:
break
if(m_time and m_pid and m_msg):
t = aslib.FTraceLine(m_time, m_msg, m_dur)
pid = int(m_pid)
else:
continue
if t.fevent or t.fkprobe:
continue
key = (m_proc, pid)
if(key not in ftemp):
ftemp[key] = []
ftemp[key].append(aslib.FTraceCallGraph(pid))
cg = ftemp[key][-1]
if(cg.addLine(t)):
ftemp[key].append(aslib.FTraceCallGraph(pid))
tf.close()
# add the callgraph data to the device hierarchy
for key in ftemp:
proc, pid = key
for cg in ftemp[key]:
if len(cg.list) < 1 or cg.invalid:
continue
if(not cg.postProcess()):
print('Sanity check failed for %s-%d' % (proc, pid))
continue
# match cg data to devices
if not data.deviceMatch(pid, cg):
print ' BAD: %s %s-%d [%f - %f]' % (cg.name, proc, pid, cg.start, cg.end)
# Function: retrieveLogs
# Description:
# Create copies of dmesg and/or ftrace for later processing
def retrieveLogs():
# check ftrace is configured first
if sysvals.useftrace:
tracer = sysvals.fgetVal('current_tracer').strip()
if tracer != 'function_graph':
doError('ftrace not configured for a boot callgraph')
# create the folder and get dmesg
sysvals.systemInfo(aslib.dmidecode(sysvals.mempath))
sysvals.initTestOutput('boot')
sysvals.writeDatafileHeader(sysvals.dmesgfile)
call('dmesg >> '+sysvals.dmesgfile, shell=True)
if not sysvals.useftrace:
return
# get ftrace
sysvals.writeDatafileHeader(sysvals.ftracefile)
call('cat '+sysvals.tpath+'trace >> '+sysvals.ftracefile, shell=True)
# Function: colorForName
# Description:
# Generate a repeatable color from a list for a given name
def colorForName(name):
list = [
('c1', '#ec9999'),
('c2', '#ffc1a6'),
('c3', '#fff0a6'),
('c4', '#adf199'),
('c5', '#9fadea'),
('c6', '#a699c1'),
('c7', '#ad99b4'),
('c8', '#eaffea'),
('c9', '#dcecfb'),
('c10', '#ffffea')
]
i = 0
total = 0
count = len(list)
while i < len(name):
total += ord(name[i])
i += 1
return list[total % count]
def cgOverview(cg, minlen):
stats = dict()
large = []
for l in cg.list:
if l.fcall and l.depth == 1:
if l.length >= minlen:
large.append(l)
if l.name not in stats:
stats[l.name] = [0, 0.0]
stats[l.name][0] += (l.length * 1000.0)
stats[l.name][1] += 1
return (large, stats)
# Function: createBootGraph
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createBootGraph(data):
# html function templates
html_srccall = '<div id={6} title="{5}" class="srccall" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;">{0}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="blue">Init process starts @ <b>{0} ms</b></td>'\
'<td class="blue">Last initcall ends @ <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
# device timeline
devtl = aslib.Timeline(100, 20)
# write the test title and general info header
devtl.createHeader(sysvals)
# Generate the header for this timeline
t0 = data.start
tMax = data.end
tTotal = tMax - t0
if(tTotal == 0):
print('ERROR: No timeline data')
return False
user_mode = '%.0f'%(data.tUserMode*1000)
last_init = '%.0f'%(tTotal*1000)
devtl.html += html_timetotal.format(user_mode, last_init)
# determine the maximum number of rows we need to draw
devlist = []
for p in data.phases:
list = data.dmesg[p]['list']
for devname in list:
d = aslib.DevItem(0, p, list[devname])
devlist.append(d)
devtl.getPhaseRows(devlist, 0, 'start')
devtl.calcTotalRows()
# draw the timeline background
devtl.createZoomBox()
devtl.html += devtl.html_tblock.format('boot', '0', '100', devtl.scaleH)
for p in data.phases:
phase = data.dmesg[p]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
phase['color'], '')
# draw the device timeline
num = 0
devstats = dict()
for phase in data.phases:
list = data.dmesg[phase]['list']
for devname in sorted(list):
cls, color = colorForName(devname)
dev = list[devname]
info = '@|%.3f|%.3f|%.3f|%d' % (dev['start']*1000.0, dev['end']*1000.0,
dev['ulen']/1000.0, dev['ret'])
devstats[dev['id']] = {'info':info}
dev['color'] = color
height = devtl.phaseRowHeight(0, phase, dev['row'])
top = '%.6f' % ((dev['row']*height) + devtl.scaleH)
left = '%.6f' % (((dev['start']-t0)*100)/tTotal)
width = '%.6f' % (((dev['end']-dev['start'])*100)/tTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
devtl.html += devtl.html_device.format(dev['id'],
devname+length+phase+'_mode', left, top, '%.3f'%height,
width, devname, ' '+cls, '')
rowtop = devtl.phaseRowTop(0, phase, dev['row'])
height = '%.6f' % (devtl.rowH / 2)
top = '%.6f' % (rowtop + devtl.scaleH + (devtl.rowH / 2))
if data.do_one_initcall:
if('ftrace' not in dev):
continue
cg = dev['ftrace']
large, stats = cgOverview(cg, 0.001)
devstats[dev['id']]['fstat'] = stats
for l in large:
left = '%f' % (((l.time-t0)*100)/tTotal)
width = '%f' % (l.length*100/tTotal)
title = '%s (%0.3fms)' % (l.name, l.length * 1000.0)
devtl.html += html_srccall.format(l.name, left,
top, height, width, title, 'x%d'%num)
num += 1
continue
if('ftraces' not in dev):
continue
for cg in dev['ftraces']:
left = '%f' % (((cg.start-t0)*100)/tTotal)
width = '%f' % ((cg.end-cg.start)*100/tTotal)
cglen = (cg.end - cg.start) * 1000.0
title = '%s (%0.3fms)' % (cg.name, cglen)
cg.id = 'x%d' % num
devtl.html += html_srccall.format(cg.name, left,
top, height, width, title, dev['id']+cg.id)
num += 1
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(t0, tMax, tTotal, 'boot')
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
devtl.html += '<div class="legend">\n'
pdelta = 20.0
pmargin = 36.0
for phase in data.phases:
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
devtl.html += devtl.html_legend.format(order, \
data.dmesg[phase]['color'], phase+'_mode', phase[0])
devtl.html += '</div>\n'
if(sysvals.outfile == sysvals.htmlfile):
hf = open(sysvals.htmlfile, 'a')
else:
hf = open(sysvals.htmlfile, 'w')
# add the css if this is not an embedded run
extra = '\
.c1 {background:rgba(209,0,0,0.4);}\n\
.c2 {background:rgba(255,102,34,0.4);}\n\
.c3 {background:rgba(255,218,33,0.4);}\n\
.c4 {background:rgba(51,221,0,0.4);}\n\
.c5 {background:rgba(17,51,204,0.4);}\n\
.c6 {background:rgba(34,0,102,0.4);}\n\
.c7 {background:rgba(51,0,68,0.4);}\n\
.c8 {background:rgba(204,255,204,0.4);}\n\
.c9 {background:rgba(169,208,245,0.4);}\n\
.c10 {background:rgba(255,255,204,0.4);}\n\
.vt {transform:rotate(-60deg);transform-origin:0 0;}\n\
table.fstat {table-layout:fixed;padding:150px 15px 0 0;font-size:10px;column-width:30px;}\n\
.fstat th {width:55px;}\n\
.fstat td {text-align:left;width:35px;}\n\
.srccall {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.srccall:hover {color:white;font-weight:bold;border:1px solid white;}\n'
if(not sysvals.embedded):
aslib.addCSS(hf, sysvals, 1, False, extra)
# write the device timeline
hf.write(devtl.html)
# add boot specific html
statinfo = 'var devstats = {\n'
for n in sorted(devstats):
statinfo += '\t"%s": [\n\t\t"%s",\n' % (n, devstats[n]['info'])
if 'fstat' in devstats[n]:
funcs = devstats[n]['fstat']
for f in sorted(funcs, key=funcs.get, reverse=True):
if funcs[f][0] < 0.01 and len(funcs) > 10:
break
statinfo += '\t\t"%f|%s|%d",\n' % (funcs[f][0], f, funcs[f][1])
statinfo += '\t],\n'
statinfo += '};\n'
html = \
'<div id="devicedetailtitle"></div>\n'\
'<div id="devicedetail" style="display:none;">\n'\
'<div id="devicedetail0">\n'
for p in data.phases:
phase = data.dmesg[p]
html += devtl.html_phaselet.format(p+'_mode', '0', '100', phase['color'])
html += '</div>\n</div>\n'\
'<script type="text/javascript">\n'+statinfo+\
'</script>\n'
hf.write(html)
# add the callgraph html
if(sysvals.usecallgraph):
aslib.addCallgraphs(sysvals, hf, data)
# add the dmesg log as a hidden div
if sysvals.dmesglog:
hf.write('<div id="dmesglog" style="display:none;">\n')
for line in data.dmesgtext:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
hf.write('</div>\n')
if(not sysvals.embedded):
# write the footer and close
aslib.addScriptCode(hf, [data])
hf.write('</body>\n</html>\n')
else:
# embedded out will be loaded in a page, skip the js
hf.write('<div id=bounds style=display:none>%f,%f</div>' % \
(data.start*1000, data.end*1000))
hf.close()
return True
# Function: updateCron
# Description:
# (restore=False) Set the tool to run automatically on reboot
# (restore=True) Restore the original crontab
def updateCron(restore=False):
if not restore:
sysvals.rootUser(True)
crondir = '/var/spool/cron/crontabs/'
if not os.path.exists(crondir):
crondir = '/var/spool/cron/'
if not os.path.exists(crondir):
doError('%s not found' % crondir)
cronfile = crondir+'root'
backfile = crondir+'root-analyze_boot-backup'
cmd = sysvals.getExec('crontab')
if not cmd:
doError('crontab not found')
# on restore: move the backup cron back into place
if restore:
if os.path.exists(backfile):
shutil.move(backfile, cronfile)
call([cmd, cronfile])
return
# backup current cron and install new one with reboot
if os.path.exists(cronfile):
shutil.move(cronfile, backfile)
else:
fp = open(backfile, 'w')
fp.close()
res = -1
try:
fp = open(backfile, 'r')
op = open(cronfile, 'w')
for line in fp:
if not sysvals.myCronJob(line):
op.write(line)
continue
fp.close()
op.write('@reboot python %s\n' % sysvals.cronjobCmdString())
op.close()
res = call([cmd, cronfile])
except Exception, e:
print 'Exception: %s' % str(e)
shutil.move(backfile, cronfile)
res = -1
if res != 0:
doError('crontab failed')
# Function: updateGrub
# Description:
# update grub.cfg for all kernels with our parameters
def updateGrub(restore=False):
# call update-grub on restore
if restore:
try:
call(sysvals.blexec, stderr=PIPE, stdout=PIPE,
env={'PATH': '.:/sbin:/usr/sbin:/usr/bin:/sbin:/bin'})
except Exception, e:
print 'Exception: %s\n' % str(e)
return
# extract the option and create a grub config without it
sysvals.rootUser(True)
tgtopt = 'GRUB_CMDLINE_LINUX_DEFAULT'
cmdline = ''
grubfile = '/etc/default/grub'
tempfile = '/etc/default/grub.analyze_boot'
shutil.move(grubfile, tempfile)
res = -1
try:
fp = open(tempfile, 'r')
op = open(grubfile, 'w')
cont = False
for line in fp:
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
opt = line.split('=')[0].strip()
if opt == tgtopt:
cmdline = line.split('=', 1)[1].strip('\\')
if line[-1] == '\\':
cont = True
elif cont:
cmdline += line.strip('\\')
if line[-1] != '\\':
cont = False
else:
op.write('%s\n' % line)
fp.close()
# if the target option value is in quotes, strip them
sp = '"'
val = cmdline.strip()
if val and (val[0] == '\'' or val[0] == '"'):
sp = val[0]
val = val.strip(sp)
cmdline = val
# append our cmd line options
if len(cmdline) > 0:
cmdline += ' '
cmdline += sysvals.kernelParams()
# write out the updated target option
op.write('\n%s=%s%s%s\n' % (tgtopt, sp, cmdline, sp))
op.close()
res = call(sysvals.blexec)
os.remove(grubfile)
except Exception, e:
print 'Exception: %s' % str(e)
res = -1
# cleanup
shutil.move(tempfile, grubfile)
if res != 0:
doError('update grub failed')
# Function: updateKernelParams
# Description:
# update boot conf for all kernels with our parameters
def updateKernelParams(restore=False):
# find the boot loader
sysvals.getBootLoader()
if sysvals.bootloader == 'grub':
updateGrub(restore)
# Function: doError Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if help == True:
printHelp()
print 'ERROR: %s\n' % msg
sys.exit()
# Function: printHelp
# Description:
# print out the help text
def printHelp():
print('')
print('%s v%s' % (sysvals.title, sysvals.version))
print('Usage: bootgraph <options> <command>')
print('')
print('Description:')
print(' This tool reads in a dmesg log of linux kernel boot and')
print(' creates an html representation of the boot timeline up to')
print(' the start of the init process.')
print('')
print(' If no specific command is given the tool reads the current dmesg')
print(' and/or ftrace log and creates a timeline')
print('')
print(' Generates output files in subdirectory: boot-yymmdd-HHMMSS')
print(' HTML output: <hostname>_boot.html')
print(' raw dmesg output: <hostname>_boot_dmesg.txt')
print(' raw ftrace output: <hostname>_boot_ftrace.txt')
print('')
print('Options:')
print(' -h Print this help text')
print(' -v Print the current tool version')
print(' -addlogs Add the dmesg log to the html output')
print(' -o name Overrides the output subdirectory name when running a new test')
print(' default: boot-{date}-{time}')
print(' [advanced]')
print(' -f Use ftrace to add function detail (default: disabled)')
print(' -callgraph Add callgraph detail, can be very large (default: disabled)')
print(' -maxdepth N limit the callgraph data to N call levels (default: 2)')
print(' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)')
print(' -timeprec N Number of significant digits in timestamps (0:S, 3:ms, [6:us])')
print(' -expandcg pre-expand the callgraph data in the html output (default: disabled)')
print(' -func list Limit ftrace to comma-delimited list of functions (default: do_one_initcall)')
print(' -cgfilter S Filter the callgraph output in the timeline')
print(' -bl name Use the following boot loader for kernel params (default: grub)')
print(' -reboot Reboot the machine automatically and generate a new timeline')
print(' -manual Show the steps to generate a new timeline manually (used with -reboot)')
print('')
print('Other commands:')
print(' -flistall Print all functions capable of being captured in ftrace')
print(' -sysinfo Print out system info extracted from BIOS')
print(' [redo]')
print(' -dmesg file Create HTML output using dmesg input (used with -ftrace)')
print(' -ftrace file Create HTML output using ftrace input (used with -dmesg)')
print('')
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
# loop through the command line arguments
cmd = ''
testrun = True
simplecmds = ['-sysinfo', '-kpupdate', '-flistall', '-checkbl']
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-h'):
printHelp()
sys.exit()
elif(arg == '-v'):
print("Version %s" % sysvals.version)
sys.exit()
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-f'):
sysvals.useftrace = True
elif(arg == '-callgraph'):
sysvals.useftrace = True
sysvals.usecallgraph = True
elif(arg == '-mincg'):
sysvals.mincglen = aslib.getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-cgfilter'):
try:
val = args.next()
except:
doError('No callgraph functions supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-bl'):
try:
val = args.next()
except:
doError('No boot loader name supplied', True)
if val.lower() not in ['grub']:
doError('Unknown boot loader: %s' % val, True)
sysvals.bootloader = val.lower()
elif(arg == '-timeprec'):
sysvals.setPrecision(aslib.getArgInt('-timeprec', args, 0, 6))
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = aslib.getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-func'):
try:
val = args.next()
except:
doError('No filter functions supplied', True)
sysvals.useftrace = True
sysvals.usecallgraph = True
sysvals.rootCheck(True)
sysvals.setGraphFilter(val)
elif(arg == '-ftrace'):
try:
val = args.next()
except:
doError('No ftrace file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
testrun = False
sysvals.ftracefile = val
elif(arg == '-addlogs'):
sysvals.dmesglog = True
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-dmesg'):
try:
val = args.next()
except:
doError('No dmesg file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
if(sysvals.htmlfile == val or sysvals.outfile == val):
doError('Output filename collision')
testrun = False
sysvals.dmesgfile = val
elif(arg == '-o'):
try:
val = args.next()
except:
doError('No subdirectory name supplied', True)
sysvals.testdir = sysvals.setOutputFolder(val)
elif(arg == '-reboot'):
sysvals.reboot = True
elif(arg == '-manual'):
sysvals.reboot = True
sysvals.manual = True
# remaining options are only for cron job use
elif(arg == '-cronjob'):
sysvals.iscronjob = True
else:
doError('Invalid argument: '+arg, True)
# compatibility errors and access checks
if(sysvals.iscronjob and (sysvals.reboot or \
sysvals.dmesgfile or sysvals.ftracefile or cmd)):
doError('-cronjob is meant for batch purposes only')
if(sysvals.reboot and (sysvals.dmesgfile or sysvals.ftracefile)):
doError('-reboot and -dmesg/-ftrace are incompatible')
if cmd or sysvals.reboot or sysvals.iscronjob or testrun:
sysvals.rootCheck(True)
if (testrun and sysvals.useftrace) or cmd == 'flistall':
if not sysvals.verifyFtrace():
doError('Ftrace is not properly enabled')
# run utility commands
sysvals.cpuInfo()
if cmd != '':
if cmd == 'kpupdate':
updateKernelParams()
elif cmd == 'flistall':
for f in sysvals.getBootFtraceFilterFunctions():
print f
elif cmd == 'checkbl':
sysvals.getBootLoader()
print 'Boot Loader: %s\n%s' % (sysvals.bootloader, sysvals.blexec)
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo()
sys.exit()
# reboot: update grub, setup a cronjob, and reboot
if sysvals.reboot:
if (sysvals.useftrace or sysvals.usecallgraph) and \
not sysvals.checkFtraceKernelVersion():
doError('Ftrace functionality requires kernel v4.10 or newer')
if not sysvals.manual:
updateKernelParams()
updateCron()
call('reboot')
else:
sysvals.manualRebootRequired()
sys.exit()
# cronjob: remove the cronjob, grub changes, and disable ftrace
if sysvals.iscronjob:
updateCron(True)
updateKernelParams(True)
try:
sysvals.fsetVal('0', 'tracing_on')
except:
pass
# testrun: generate copies of the logs
if testrun:
retrieveLogs()
else:
sysvals.setOutputFile()
# process the log data
if sysvals.dmesgfile:
data = parseKernelLog()
if(not data.valid):
doError('No initcall data found in %s' % sysvals.dmesgfile)
if sysvals.useftrace and sysvals.ftracefile:
parseTraceLog(data)
else:
doError('dmesg file required')
print(' Host: %s' % sysvals.hostname)
print(' Test time: %s' % sysvals.testtime)
print(' Boot time: %s' % data.boottime)
print('Kernel Version: %s' % sysvals.kernel)
print(' Kernel start: %.3f' % (data.start * 1000))
print('Usermode start: %.3f' % (data.tUserMode * 1000))
print('Last Init Call: %.3f' % (data.end * 1000))
# handle embedded output logs
if(sysvals.outfile and sysvals.embedded):
fp = open(sysvals.outfile, 'w')
fp.write('pass %s initstart %.3f end %.3f boot %s\n' %
(data.valid, data.tUserMode*1000, data.end*1000, data.boottime))
fp.close()
createBootGraph(data)
# if running as root, change output dir owner to sudo_user
if testrun and os.path.isdir(sysvals.testdir) and \
os.getuid() == 0 and 'SUDO_USER' in os.environ:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(os.environ['SUDO_USER'], sysvals.testdir), shell=True)
| gpl-2.0 | -6,030,210,531,176,947,000 | 30.125494 | 222 | 0.640147 | false |
sqlobject/sqlobject | sqlobject/sqlite/sqliteconnection.py | 2 | 16587 | import base64
import os
try:
from _thread import get_ident
except ImportError:
from thread import get_ident
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
from sqlobject import col
from sqlobject import dberrors
from sqlobject.dbconnection import DBAPI, Boolean
sqlite2_Binary = None
class ErrorMessage(str):
def __new__(cls, e):
obj = str.__new__(cls, e.args[0])
obj.code = None
obj.module = e.__module__
obj.exception = e.__class__.__name__
return obj
class SQLiteConnection(DBAPI):
supportTransactions = True
dbName = 'sqlite'
schemes = [dbName]
def __init__(self, filename, autoCommit=1, **kw):
drivers = kw.pop('driver', None) or \
'supersqlite,pysqlite2,sqlite3,sqlite'
for driver in drivers.split(','):
driver = driver.strip()
if not driver:
continue
try:
if driver == 'supersqlite':
from supersqlite import sqlite3 as sqlite
self.using_sqlite2 = True
elif driver in ('sqlite2', 'pysqlite2'):
from pysqlite2 import dbapi2 as sqlite
self.using_sqlite2 = True
elif driver == 'sqlite3':
import sqlite3 as sqlite
self.using_sqlite2 = True
elif driver in ('sqlite', 'sqlite1'):
import sqlite
self.using_sqlite2 = False
else:
raise ValueError(
'Unknown SQLite driver "%s", '
'expected supersqlite, pysqlite2, sqlite3 '
'or sqlite' % driver)
except ImportError:
pass
else:
break
else:
raise ImportError(
'Cannot find an SQLite driver, tried %s' % drivers)
if self.using_sqlite2:
sqlite.encode = base64.b64encode
sqlite.decode = base64.b64decode
self.module = sqlite
self.filename = filename # full path to sqlite-db-file
self._memory = filename == ':memory:'
if self._memory and not self.using_sqlite2:
raise ValueError("You must use sqlite2 to use in-memory databases")
# connection options
opts = {}
if self.using_sqlite2:
if autoCommit:
opts["isolation_level"] = None
global sqlite2_Binary
if sqlite2_Binary is None:
sqlite2_Binary = sqlite.Binary
sqlite.Binary = lambda s: sqlite2_Binary(sqlite.encode(s))
if 'factory' in kw:
factory = kw.pop('factory')
if isinstance(factory, str):
factory = globals()[factory]
opts['factory'] = factory(sqlite)
else:
opts['autocommit'] = Boolean(autoCommit)
if 'encoding' in kw:
opts['encoding'] = kw.pop('encoding')
if 'mode' in kw:
opts['mode'] = int(kw.pop('mode'), 0)
if 'timeout' in kw:
if self.using_sqlite2:
opts['timeout'] = float(kw.pop('timeout'))
else:
opts['timeout'] = int(float(kw.pop('timeout')) * 1000)
if 'check_same_thread' in kw:
opts["check_same_thread"] = Boolean(kw.pop('check_same_thread'))
# use only one connection for sqlite - supports multiple)
# cursors per connection
self._connOptions = opts
self.use_table_info = Boolean(kw.pop("use_table_info", True))
DBAPI.__init__(self, **kw)
self._threadPool = {}
self._threadOrigination = {}
if self._memory:
self.makeMemoryConnection()
@classmethod
def _connectionFromParams(cls, user, password, host, port, path, args):
assert host is None and port is None, (
"SQLite can only be used locally (with a URI like "
"sqlite:/file or sqlite:///file, not sqlite://%s%s)" %
(host, port and ':%r' % port or ''))
assert user is None and password is None, (
"You may not provide usernames or passwords for SQLite "
"databases")
if path == "/:memory:":
path = ":memory:"
return cls(filename=path, **args)
def oldUri(self):
path = self.filename
if path == ":memory:":
path = "/:memory:"
else:
path = "//" + path
return 'sqlite:%s' % path
def uri(self):
path = self.filename
if path == ":memory:":
path = "/:memory:"
else:
if path.startswith('/'):
path = "//" + path
else:
path = "///" + path
path = quote(path)
return 'sqlite:%s' % path
def getConnection(self):
# SQLite can't share connections between threads, and so can't
# pool connections. Since we are isolating threads here, we
# don't have to worry about locking as much.
if self._memory:
conn = self.makeConnection()
self._connectionNumbers[id(conn)] = self._connectionCount
self._connectionCount += 1
return conn
threadid = get_ident()
if (self._pool is not None and threadid in self._threadPool):
conn = self._threadPool[threadid]
del self._threadPool[threadid]
if conn in self._pool:
self._pool.remove(conn)
else:
conn = self.makeConnection()
if self._pool is not None:
self._threadOrigination[id(conn)] = threadid
self._connectionNumbers[id(conn)] = self._connectionCount
self._connectionCount += 1
if self.debug:
s = 'ACQUIRE'
if self._pool is not None:
s += ' pool=[%s]' % ', '.join(
[str(self._connectionNumbers[id(v)]) for v in self._pool])
self.printDebug(conn, s, 'Pool')
return conn
def releaseConnection(self, conn, explicit=False):
if self._memory:
return
threadid = self._threadOrigination.get(id(conn))
DBAPI.releaseConnection(self, conn, explicit=explicit)
if (self._pool is not None and threadid
and threadid not in self._threadPool):
self._threadPool[threadid] = conn
else:
if self._pool and conn in self._pool:
self._pool.remove(conn)
conn.close()
def _setAutoCommit(self, conn, auto):
if self.using_sqlite2:
if auto:
conn.isolation_level = None
else:
conn.isolation_level = ""
else:
conn.autocommit = auto
def _setIsolationLevel(self, conn, level):
if not self.using_sqlite2:
return
conn.isolation_level = level
def makeMemoryConnection(self):
self._memoryConn = self.module.connect(
self.filename, **self._connOptions)
# Convert text data from SQLite to str, not unicode -
# SQLObject converts it to unicode itself.
self._memoryConn.text_factory = str
def makeConnection(self):
if self._memory:
return self._memoryConn
conn = self.module.connect(self.filename, **self._connOptions)
conn.text_factory = str # Convert text data to str, not unicode
return conn
def close(self):
DBAPI.close(self)
self._threadPool = {}
if self._memory:
self._memoryConn.close()
self.makeMemoryConnection()
def _executeRetry(self, conn, cursor, query):
if self.debug:
self.printDebug(conn, query, 'QueryR')
try:
return cursor.execute(query)
except self.module.OperationalError as e:
raise dberrors.OperationalError(ErrorMessage(e))
except self.module.IntegrityError as e:
msg = ErrorMessage(e)
if msg.startswith('column') and msg.endswith('not unique') \
or msg.startswith('UNIQUE constraint failed:'):
raise dberrors.DuplicateEntryError(msg)
else:
raise dberrors.IntegrityError(msg)
except self.module.InternalError as e:
raise dberrors.InternalError(ErrorMessage(e))
except self.module.ProgrammingError as e:
raise dberrors.ProgrammingError(ErrorMessage(e))
except self.module.DataError as e:
raise dberrors.DataError(ErrorMessage(e))
except self.module.NotSupportedError as e:
raise dberrors.NotSupportedError(ErrorMessage(e))
except self.module.DatabaseError as e:
raise dberrors.DatabaseError(ErrorMessage(e))
except self.module.InterfaceError as e:
raise dberrors.InterfaceError(ErrorMessage(e))
except self.module.Warning as e:
raise Warning(ErrorMessage(e))
except self.module.Error as e:
raise dberrors.Error(ErrorMessage(e))
def _queryInsertID(self, conn, soInstance, id, names, values):
table = soInstance.sqlmeta.table
idName = soInstance.sqlmeta.idName
c = conn.cursor()
if id is not None:
names = [idName] + names
values = [id] + values
q = self._insertSQL(table, names, values)
if self.debug:
self.printDebug(conn, q, 'QueryIns')
self._executeRetry(conn, c, q)
# lastrowid is a DB-API extension from "PEP 0249":
if id is None:
id = int(c.lastrowid)
c.close()
if self.debugOutput:
self.printDebug(conn, id, 'QueryIns', 'result')
return id
def _insertSQL(self, table, names, values):
if not names:
assert not values
# INSERT INTO table () VALUES () isn't allowed in
# SQLite (though it is in other databases)
return ("INSERT INTO %s VALUES (NULL)" % table)
else:
return DBAPI._insertSQL(self, table, names, values)
@classmethod
def _queryAddLimitOffset(cls, query, start, end):
if not start:
return "%s LIMIT %i" % (query, end)
if not end:
return "%s LIMIT 0 OFFSET %i" % (query, start)
return "%s LIMIT %i OFFSET %i" % (query, end - start, start)
def createColumn(self, soClass, col):
return col.sqliteCreateSQL()
def createReferenceConstraint(self, soClass, col):
return None
def createIDColumn(self, soClass):
return self._createIDColumn(soClass.sqlmeta)
def _createIDColumn(self, sqlmeta):
if sqlmeta.idType == str:
return '%s TEXT PRIMARY KEY' % sqlmeta.idName
return '%s INTEGER PRIMARY KEY AUTOINCREMENT' % sqlmeta.idName
def joinSQLType(self, join):
return 'INT NOT NULL'
def tableExists(self, tableName):
result = self.queryOne(
"SELECT tbl_name FROM sqlite_master "
"WHERE type='table' AND tbl_name = '%s'" % tableName)
# turn it into a boolean:
return not not result
def createIndexSQL(self, soClass, index):
return index.sqliteCreateIndexSQL(soClass)
def addColumn(self, tableName, column):
self.query('ALTER TABLE %s ADD COLUMN %s' %
(tableName,
column.sqliteCreateSQL()))
self.query('VACUUM')
def delColumn(self, sqlmeta, column):
self.recreateTableWithoutColumn(sqlmeta, column)
def recreateTableWithoutColumn(self, sqlmeta, column):
new_name = sqlmeta.table + '_ORIGINAL'
self.query('ALTER TABLE %s RENAME TO %s' % (sqlmeta.table, new_name))
cols = [self._createIDColumn(sqlmeta)] + \
[self.createColumn(None, col)
for col in sqlmeta.columnList if col.name != column.name]
cols = ",\n".join([" %s" % c for c in cols])
self.query('CREATE TABLE %s (\n%s\n)' % (sqlmeta.table, cols))
all_columns = ', '.join(
[sqlmeta.idName] + [col.dbName for col in sqlmeta.columnList])
self.query('INSERT INTO %s (%s) SELECT %s FROM %s' % (
sqlmeta.table, all_columns, all_columns, new_name))
self.query('DROP TABLE %s' % new_name)
def columnsFromSchema(self, tableName, soClass):
if self.use_table_info:
return self._columnsFromSchemaTableInfo(tableName, soClass)
else:
return self._columnsFromSchemaParse(tableName, soClass)
def _columnsFromSchemaTableInfo(self, tableName, soClass):
colData = self.queryAll("PRAGMA table_info(%s)" % tableName)
results = []
for index, field, t, nullAllowed, default, key in colData:
if field == soClass.sqlmeta.idName:
continue
colClass, kw = self.guessClass(t)
if default == 'NULL':
nullAllowed = True
default = None
kw['name'] = soClass.sqlmeta.style.dbColumnToPythonAttr(field)
kw['dbName'] = field
kw['notNone'] = not nullAllowed
kw['default'] = default
# @@ skip key...
# @@ skip extra...
results.append(colClass(**kw))
return results
def _columnsFromSchemaParse(self, tableName, soClass):
colData = self.queryOne(
"SELECT sql FROM sqlite_master WHERE type='table' AND name='%s'" %
tableName)
if not colData:
raise ValueError(
'The table %s was not found in the database. Load failed.' %
tableName)
colData = colData[0].split('(', 1)[1].strip()[:-2]
while True:
start = colData.find('(')
if start == -1:
break
end = colData.find(')', start)
if end == -1:
break
colData = colData[:start] + colData[end + 1:]
results = []
for colDesc in colData.split(','):
parts = colDesc.strip().split(' ', 2)
field = parts[0].strip()
# skip comments
if field.startswith('--'):
continue
# get rid of enclosing quotes
if field[0] == field[-1] == '"':
field = field[1:-1]
if field == getattr(soClass.sqlmeta, 'idName', 'id'):
continue
colClass, kw = self.guessClass(parts[1].strip())
if len(parts) == 2:
index_info = ''
else:
index_info = parts[2].strip().upper()
kw['name'] = soClass.sqlmeta.style.dbColumnToPythonAttr(field)
kw['dbName'] = field
import re
nullble = re.search(r'(\b\S*)\sNULL', index_info)
default = re.search(
r"DEFAULT\s((?:\d[\dA-FX.]*)|(?:'[^']*')|(?:#[^#]*#))",
index_info)
kw['notNone'] = nullble and nullble.group(1) == 'NOT'
kw['default'] = default and default.group(1)
# @@ skip key...
# @@ skip extra...
results.append(colClass(**kw))
return results
def guessClass(self, t):
t = t.upper()
if t.find('INT') >= 0:
return col.IntCol, {}
elif t.find('TEXT') >= 0 or t.find('CHAR') >= 0 or t.find('CLOB') >= 0:
return col.StringCol, {'length': 2 ** 32 - 1}
elif t.find('BLOB') >= 0:
return col.BLOBCol, {"length": 2 ** 32 - 1}
elif t.find('REAL') >= 0 or t.find('FLOAT') >= 0:
return col.FloatCol, {}
elif t.find('DECIMAL') >= 0:
return col.DecimalCol, {'size': None, 'precision': None}
elif t.find('BOOL') >= 0:
return col.BoolCol, {}
else:
return col.Col, {}
def listTables(self):
return [v[0] for v in self.queryAll(
"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name")]
def listDatabases(self):
# The pragma returns a list of (index, name, filename)
return [v[1] for v in self.queryAll("PRAGMA database_list")]
def createEmptyDatabase(self):
if self._memory:
return
open(self.filename, 'w').close()
def dropDatabase(self):
if self._memory:
return
os.unlink(self.filename)
| lgpl-2.1 | -5,854,149,150,050,310,000 | 36.358108 | 79 | 0.544764 | false |
OrmondHugh/2048Bot | src/src/utility.py | 1 | 3044 | import readBoard as rb
import State
import copy
def getPossibilities(state):
possibilities = []
#Loop through each tile to find the spaces that can be filled by a spawning tile
for i in range(4):
for j in range(4):
#If we find an empty space, add a child to children where a two is spawned, and a child where a four is spawned
if state.board[i][i] == ' ':
for value in range(2,5,2):
child = copy.deepcopy(state)
if value == 4:
child.fourSpawned == True
child.board[i][j] = value
possibilities.append(child)
#Assign the probability of each state occuring to it's object
for node in possibilities:
if node.fourSpawned:
node.probability = 0.1 / len(possibilities)
else:
node.probability = 0.9 / len(possibilities)
return possibilities
def getUtility(board, parentBoard):
utility = 0
#Count the empty spaces
emptySpacesCount = 0
for i in range(4):
for j in range(4):
if board[i][j] == ' ':
emptySpacesCount = emptySpacesCount + 1
#A full board is very likely to either be a losing state, or be close to one
if emptySpacesCount == 0:
return 0
#50 of the total utility is allocated to how clear the board is.
#A full 50 is awarded if there is at least 7 clear squares, if there are less utilitly
#is added based on how much of the board is clear
if emptySpacesCount == 0:
return 0
elif emptySpacesCount >= 7:
utility = utility + 60.0
else:
utility = utility + 60.0*(emptySpacesCount/7)
#Find the biggest tile. If it is in the top right, add 0.3 to utility
biggest = 0
for i in range(4):
for j in range(4):
if board[i][j] != ' ' and board[i][j] > biggest:
biggest = board[i][j]
if board[0][3] == biggest:
utility = utility + 40.0
#If we also have a full top line of different values, add more utility
if board[0][2] != ' 'and board[0][2] != board[0][3]:
utility = utility + 10.0
if board[0][1] != ' 'and board[0][1] != board[0][2]:
utility = utility + 10.0
if board[0][0] != ' 'and board[0][0] != board[0][1]:
utility = utility + 10.0
#Give utility for making the main tiles at the top bigger
if board[0][3] == parentBoard[0][3] * 2:
utility = utility + 35.0
if board[0][2] == parentBoard[0][2] * 2:
utility = utility + 30.0
if board[0][1] != ' ' and board[0][1] == parentBoard[0][1] * 2:
utility = utility + 25.0
if board[0][0] != ' ' and board[0][0] == parentBoard[0][0] * 2:
utility = utility + 15.0
return utility
def getExpectedValue(state, possibilities):
EV = 0
for child in possibilities:
EV = EV + (child.probability * getUtility(child.board, state.board))
return EV
if __name__ == '__main__':
currentBoard = State.State(rb.readBoard(), '', False, 0, 0)
children = currentBoard.getChildren()
for child in children:
child.EV = getExpectedValue(child)
print("Direction: ", child.path)
print("Utility: ", child.EV) | mit | 3,313,365,850,199,771,600 | 26.46729 | 114 | 0.624179 | false |
mcfletch/AutobahnPython | autobahn/autobahn/websocket/interfaces.py | 10 | 11716 | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
__all__ = ['IWebSocketChannel',
'IWebSocketChannelFrameApi',
'IWebSocketChannelStreamingApi']
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class IWebSocketChannel(object):
"""
A WebSocket channel is a bidirectional, full-duplex, ordered, reliable message channel
over a WebSocket connection as specified in RFC6455.
This interface defines a message-based API to WebSocket plus auxiliary hooks
and methods.
"""
@abc.abstractmethod
def onConnect(self, requestOrResponse):
"""
Callback fired during WebSocket opening handshake when a client connects (with
request from client) or when server connection established (with response from
server).
:param requestOrResponse: Connection request or response.
:type requestOrResponse: Instance of :class:`autobahn.websocket.protocol.ConnectionRequest`
or :class:`autobahn.websocket.protocol.ConnectionResponse`.
"""
@abc.abstractmethod
def onOpen(self):
"""
Callback fired when the initial WebSocket opening handshake was completed.
You now can send and receive WebSocket messages.
"""
@abc.abstractmethod
def sendMessage(self, payload, isBinary = False, fragmentSize = None, sync = False, doNotCompress = False):
"""
Send a WebSocket message.
You can send text or binary messages, and optionally specifiy a payload fragment size.
When the latter is given, the payload will be split up into WebSocket frames each with
payload length `<= fragmentSize`.
:param payload: The message payload.
:type payload: bytes
:param isBinary: `True` iff payload is binary, else the payload must be UTF-8 encoded text.
:type isBinary: bool
:param fragmentSize: Fragment message into WebSocket fragments of this size.
:type fragmentSize: int
:param sync: Iff `True`, try to force data onto the wire immediately. Note: do NOT use
this normally unless you know what you are doing. Performance likely will
suffer significantly. This feature is mainly here for use by Autobahn|Testsuite.
:type sync: bool
:param doNotCompress: Iff `True`, never compress this message. This only applies to
Hybi-Mode and only when WebSocket compression has been negotiated on
the WebSocket connection. Use when you know the payload
uncompressible (e.g. encrypted or already compressed).
:type doNotCompress: bool
"""
@abc.abstractmethod
def onMessage(self, payload, isBinary):
"""
Callback fired when a complete WebSocket message was received.
:param payload: Message payload (UTF-8 encoded text or binary). Can also be empty when
the WebSocket message contained no payload.
:type payload: bytes
:param isBinary: `True` iff payload is binary, else the payload is UTF-8 encoded text.
:type isBinary: bool
"""
@abc.abstractmethod
def sendClose(self, code = None, reason = None):
"""
Starts a WebSocket closing handshake tearing down the WebSocket connection.
:param code: An optional close status code (`1000` for normal close or `3000-4999` for
application specific close).
:type code: int
:param reason: An optional close reason (a string that when present, a status
code MUST also be present).
:type reason: str
"""
@abc.abstractmethod
def onClose(self, wasClean, code, reason):
"""
Callback fired when the WebSocket connection has been closed (WebSocket closing
handshake has been finished or the connection was closed uncleanly).
:param wasClean: True, iff the WebSocket connection was closed cleanly.
:type wasClean: bool
:param code: None or close status code (as sent by the WebSocket peer).
:type code: int
:param reason: None or close reason (as sent by the WebSocket peer).
:type reason: str
"""
@abc.abstractmethod
def sendPreparedMessage(self, preparedMsg):
"""
Send a message that was previously prepared with :func:`autobahn.websocket.protocol.WebSocketFactory.prepareMessage`.
:param prepareMessage: A previsouly prepared message.
:type prepareMessage: Instance of :class:`autobahn.websocket.protocol.PreparedMessage`.
"""
@abc.abstractmethod
def sendPing(self, payload = None):
"""
Send a WebSocket ping to the peer.
A peer is expected to pong back the payload a soon as "practical". When more than
one ping is outstanding at a peer, the peer may elect to respond only to the last ping.
:param payload: An (optional) arbitrary payload of length `<126` octets.
:type payload: bytes
"""
@abc.abstractmethod
def onPing(self, payload):
"""
Callback fired when a WebSocket ping was received. A default implementation responds
by sending a WebSocket pong.
:param payload: Payload of ping (when there was any). Can be arbitrary, up to `125` octets.
:type payload: bytes
"""
@abc.abstractmethod
def sendPong(self, payload = None):
"""
Send a WebSocket pong to the peer.
A WebSocket pong may be sent unsolicited. This serves as a unidirectional heartbeat.
A response to an unsolicited pong is "not expected".
:param payload: An (optional) arbitrary payload of length < 126 octets.
:type payload: bytes
"""
@abc.abstractmethod
def onPong(self, payload):
"""
Callback fired when a WebSocket pong was received. A default implementation does nothing.
:param payload: Payload of pong (when there was any). Can be arbitrary, up to 125 octets.
:type payload: bytes
"""
class IWebSocketChannelFrameApi(IWebSocketChannel):
"""
Frame-based API to a WebSocket channel.
"""
@abc.abstractmethod
def onMessageBegin(self, isBinary):
"""
Callback fired when receiving of a new WebSocket message has begun.
:param isBinary: `True` iff payload is binary, else the payload is UTF-8 encoded text.
:type isBinary: bool
"""
@abc.abstractmethod
def onMessageFrame(self, payload):
"""
Callback fired when a complete WebSocket message frame for a previously begun
WebSocket message has been received.
:param payload: Message frame payload (a list of chunks received).
:type payload: list of bytes
"""
@abc.abstractmethod
def onMessageEnd(self):
"""
Callback fired when a WebSocket message has been completely received (the last
WebSocket frame for that message has been received).
"""
@abc.abstractmethod
def beginMessage(self, isBinary = False, doNotCompress = False):
"""
Begin sending a new WebSocket message.
:param isBinary: `True` iff payload is binary, else the payload must be UTF-8 encoded text.
:type isBinary: bool
:param doNotCompress: Iff `True`, never compress this message. This only applies to
Hybi-Mode and only when WebSocket compression has been negotiated on
the WebSocket connection. Use when you know the payload
uncompressible (e.g. encrypted or already compressed).
:type doNotCompress: bool
"""
@abc.abstractmethod
def sendMessageFrame(self, payload, sync = False):
"""
When a message has been previously begun, send a complete message frame in one go.
:param payload: The message frame payload. When sending a text message, the payload must
be UTF-8 encoded already.
:type payload: bytes
:param sync: Iff `True`, try to force data onto the wire immediately. Note: do NOT use
this normally unless you know what you are doing. Performance likely will
suffer significantly. This feature is mainly here for use by Autobahn|Testsuite.
:type sync: bool
"""
@abc.abstractmethod
def endMessage(self):
"""
End a message previously begun message. No more frames may be sent (for that message).
You have to begin a new message before sending again.
"""
class IWebSocketChannelStreamingApi(IWebSocketChannelFrameApi):
"""
Streaming API to a WebSocket channel.
"""
@abc.abstractmethod
def onMessageFrameBegin(self, length):
"""
Callback fired when receiving a new message frame has begun.
A default implementation will prepare to buffer message frame data.
:param length: Payload length of message frame which is subsequently received.
:type length: int
"""
@abc.abstractmethod
def onMessageFrameData(self, payload):
"""
Callback fired when receiving data within a previously begun message frame.
A default implementation will buffer data for frame.
:param payload: Partial payload for message frame.
:type payload: bytes
"""
@abc.abstractmethod
def onMessageFrameEnd(self):
"""
Callback fired when a previously begun message frame has been completely received.
A default implementation will flatten the buffered frame data and
fire `onMessageFrame`.
"""
@abc.abstractmethod
def beginMessageFrame(self, length):
"""
Begin sending a new message frame.
:param length: Length of the frame which is to be started. Must be `>= 0` and `<= 2^63`.
:type length: int
"""
@abc.abstractmethod
def sendMessageFrameData(self, payload, sync = False):
"""
Send out data when within a message frame (message was begun, frame was begun).
Note that the frame is automatically ended when enough data has been sent.
In other words, there is no `endMessageFrame`, since you have begun the frame
specifying the frame length, which implicitly defined the frame end. This is different
from messages, which you begin *and* end explicitly , since a message can contain
an unlimited number of frames.
:param payload: Frame payload to send.
:type payload: bytes
:param sync: Iff `True`, try to force data onto the wire immediately. Note: do NOT use
this normally unless you know what you are doing. Performance likely will
suffer significantly. This feature is mainly here for use by Autobahn|Testsuite.
:type sync: bool
:returns: int -- When the currently sent message frame is still incomplete,
returns octets remaining to be sent. When the frame is complete,
returns `0`, when `< 0`, the amount of unconsumed data in payload
argument.
"""
| apache-2.0 | 7,840,253,424,867,178,000 | 35.049231 | 123 | 0.659696 | false |
jgelens/AutobahnTestSuite | autobahntestsuite/autobahntestsuite/case/case7_1_3.py | 14 | 1670 | ###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case7_1_3(Case):
DESCRIPTION = """Send a ping after close message"""
EXPECTATION = """Clean close with normal code, no pong."""
def init(self):
self.suppressClose = True
def onConnectionLost(self, failedByMe):
Case.onConnectionLost(self, failedByMe)
if self.behaviorClose == Case.WRONG_CODE:
self.behavior = Case.FAILED
self.passed = False
self.result = self.resultClose
def onOpen(self):
payload = "Hello World!"
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":True,"closeCode":[self.p.CLOSE_STATUS_CODE_NORMAL],"requireClean":True}
#self.p.sendFrame(opcode = 1, payload = payload)
self.p.sendClose(self.p.CLOSE_STATUS_CODE_NORMAL)
self.p.sendFrame(opcode = 9)
self.p.killAfter(1)
| apache-2.0 | 7,240,432,347,486,264,000 | 33.531915 | 112 | 0.590419 | false |
KyleMao/simple-ner-ie | .ropeproject/config.py | 387 | 3461 | # The default ``config.py``
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
#prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
#prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
#prefs.add('python_path', '~/python/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
| mit | 1,864,530,390,412,672,500 | 39.717647 | 71 | 0.661947 | false |
myerpengine/odoo | addons/product_extended/__init__.py | 374 | 1068 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_extended
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,024,408,698,511,193,600 | 45.434783 | 79 | 0.61985 | false |
mediathread/mdtprint | app/bower_components/phantom/src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/text_format.py | 261 | 21737 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in text format."""
__author__ = '[email protected] (Kenton Varda)'
import cStringIO
import re
from collections import deque
from google.protobuf.internal import type_checkers
from google.protobuf import descriptor
__all__ = [ 'MessageToString', 'PrintMessage', 'PrintField',
'PrintFieldValue', 'Merge' ]
# Infinity and NaN are not explicitly supported by Python pre-2.6, and
# float('inf') does not work on Windows (pre-2.6).
_INFINITY = 1e10000 # overflows, thus will actually be infinity.
_NAN = _INFINITY * 0
class ParseError(Exception):
"""Thrown in case of ASCII parsing error."""
def MessageToString(message, as_utf8=False, as_one_line=False):
out = cStringIO.StringIO()
PrintMessage(message, out, as_utf8=as_utf8, as_one_line=as_one_line)
result = out.getvalue()
out.close()
if as_one_line:
return result.rstrip()
return result
def PrintMessage(message, out, indent=0, as_utf8=False, as_one_line=False):
for field, value in message.ListFields():
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for element in value:
PrintField(field, element, out, indent, as_utf8, as_one_line)
else:
PrintField(field, value, out, indent, as_utf8, as_one_line)
def PrintField(field, value, out, indent=0, as_utf8=False, as_one_line=False):
"""Print a single field name/value pair. For repeated fields, the value
should be a single element."""
out.write(' ' * indent);
if field.is_extension:
out.write('[')
if (field.containing_type.GetOptions().message_set_wire_format and
field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type == field.extension_scope and
field.label == descriptor.FieldDescriptor.LABEL_OPTIONAL):
out.write(field.message_type.full_name)
else:
out.write(field.full_name)
out.write(']')
elif field.type == descriptor.FieldDescriptor.TYPE_GROUP:
# For groups, use the capitalized name.
out.write(field.message_type.name)
else:
out.write(field.name)
if field.cpp_type != descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# The colon is optional in this case, but our cross-language golden files
# don't include it.
out.write(': ')
PrintFieldValue(field, value, out, indent, as_utf8, as_one_line)
if as_one_line:
out.write(' ')
else:
out.write('\n')
def PrintFieldValue(field, value, out, indent=0,
as_utf8=False, as_one_line=False):
"""Print a single field value (not including name). For repeated fields,
the value should be a single element."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if as_one_line:
out.write(' { ')
PrintMessage(value, out, indent, as_utf8, as_one_line)
out.write('}')
else:
out.write(' {\n')
PrintMessage(value, out, indent + 2, as_utf8, as_one_line)
out.write(' ' * indent + '}')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
out.write(field.enum_type.values_by_number[value].name)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write('\"')
if type(value) is unicode:
out.write(_CEscape(value.encode('utf-8'), as_utf8))
else:
out.write(_CEscape(value, as_utf8))
out.write('\"')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write("true")
else:
out.write("false")
else:
out.write(str(value))
def Merge(text, message):
"""Merges an ASCII representation of a protocol message into a message.
Args:
text: Message ASCII representation.
message: A protocol buffer message to merge into.
Raises:
ParseError: On ASCII parsing problems.
"""
tokenizer = _Tokenizer(text)
while not tokenizer.AtEnd():
_MergeField(tokenizer, message)
def _MergeField(tokenizer, message):
"""Merges a single protocol message field into a message.
Args:
tokenizer: A tokenizer to parse the field name and values.
message: A protocol message to record the data.
Raises:
ParseError: In case of ASCII parsing problems.
"""
message_descriptor = message.DESCRIPTOR
if tokenizer.TryConsume('['):
name = [tokenizer.ConsumeIdentifier()]
while tokenizer.TryConsume('.'):
name.append(tokenizer.ConsumeIdentifier())
name = '.'.join(name)
if not message_descriptor.is_extendable:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" does not have extensions.' %
message_descriptor.full_name)
field = message.Extensions._FindExtensionByName(name)
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" not registered.' % name)
elif message_descriptor != field.containing_type:
raise tokenizer.ParseErrorPreviousToken(
'Extension "%s" does not extend message type "%s".' % (
name, message_descriptor.full_name))
tokenizer.Consume(']')
else:
name = tokenizer.ConsumeIdentifier()
field = message_descriptor.fields_by_name.get(name, None)
# Group names are expected to be capitalized as they appear in the
# .proto file, which actually matches their type names, not their field
# names.
if not field:
field = message_descriptor.fields_by_name.get(name.lower(), None)
if field and field.type != descriptor.FieldDescriptor.TYPE_GROUP:
field = None
if (field and field.type == descriptor.FieldDescriptor.TYPE_GROUP and
field.message_type.name != name):
field = None
if not field:
raise tokenizer.ParseErrorPreviousToken(
'Message type "%s" has no field named "%s".' % (
message_descriptor.full_name, name))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
tokenizer.TryConsume(':')
if tokenizer.TryConsume('<'):
end_token = '>'
else:
tokenizer.Consume('{')
end_token = '}'
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
sub_message = message.Extensions[field].add()
else:
sub_message = getattr(message, field.name).add()
else:
if field.is_extension:
sub_message = message.Extensions[field]
else:
sub_message = getattr(message, field.name)
sub_message.SetInParent()
while not tokenizer.TryConsume(end_token):
if tokenizer.AtEnd():
raise tokenizer.ParseErrorPreviousToken('Expected "%s".' % (end_token))
_MergeField(tokenizer, sub_message)
else:
_MergeScalarField(tokenizer, message, field)
def _MergeScalarField(tokenizer, message, field):
"""Merges a single protocol message scalar field into a message.
Args:
tokenizer: A tokenizer to parse the field value.
message: A protocol message to record the data.
field: The descriptor of the field to be merged.
Raises:
ParseError: In case of ASCII parsing problems.
RuntimeError: On runtime errors.
"""
tokenizer.Consume(':')
value = None
if field.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_SFIXED32):
value = tokenizer.ConsumeInt32()
elif field.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_SINT64,
descriptor.FieldDescriptor.TYPE_SFIXED64):
value = tokenizer.ConsumeInt64()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_FIXED32):
value = tokenizer.ConsumeUint32()
elif field.type in (descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_FIXED64):
value = tokenizer.ConsumeUint64()
elif field.type in (descriptor.FieldDescriptor.TYPE_FLOAT,
descriptor.FieldDescriptor.TYPE_DOUBLE):
value = tokenizer.ConsumeFloat()
elif field.type == descriptor.FieldDescriptor.TYPE_BOOL:
value = tokenizer.ConsumeBool()
elif field.type == descriptor.FieldDescriptor.TYPE_STRING:
value = tokenizer.ConsumeString()
elif field.type == descriptor.FieldDescriptor.TYPE_BYTES:
value = tokenizer.ConsumeByteString()
elif field.type == descriptor.FieldDescriptor.TYPE_ENUM:
# Enum can be specified by a number (the enum value), or by
# a string literal (the enum name).
enum_descriptor = field.enum_type
if tokenizer.LookingAtInteger():
number = tokenizer.ConsumeInt32()
enum_value = enum_descriptor.values_by_number.get(number, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value with number %d.' % (
enum_descriptor.full_name, number))
else:
identifier = tokenizer.ConsumeIdentifier()
enum_value = enum_descriptor.values_by_name.get(identifier, None)
if enum_value is None:
raise tokenizer.ParseErrorPreviousToken(
'Enum type "%s" has no value named %s.' % (
enum_descriptor.full_name, identifier))
value = enum_value.number
else:
raise RuntimeError('Unknown field type %d' % field.type)
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if field.is_extension:
message.Extensions[field].append(value)
else:
getattr(message, field.name).append(value)
else:
if field.is_extension:
message.Extensions[field] = value
else:
setattr(message, field.name, value)
class _Tokenizer(object):
"""Protocol buffer ASCII representation tokenizer.
This class handles the lower level string parsing by splitting it into
meaningful tokens.
It was directly ported from the Java protocol buffer API.
"""
_WHITESPACE = re.compile('(\\s|(#.*$))+', re.MULTILINE)
_TOKEN = re.compile(
'[a-zA-Z_][0-9a-zA-Z_+-]*|' # an identifier
'[0-9+-][0-9a-zA-Z_.+-]*|' # a number
'\"([^\"\n\\\\]|\\\\.)*(\"|\\\\?$)|' # a double-quoted string
'\'([^\'\n\\\\]|\\\\.)*(\'|\\\\?$)') # a single-quoted string
_IDENTIFIER = re.compile('\w+')
_INTEGER_CHECKERS = [type_checkers.Uint32ValueChecker(),
type_checkers.Int32ValueChecker(),
type_checkers.Uint64ValueChecker(),
type_checkers.Int64ValueChecker()]
_FLOAT_INFINITY = re.compile('-?inf(inity)?f?', re.IGNORECASE)
_FLOAT_NAN = re.compile("nanf?", re.IGNORECASE)
def __init__(self, text_message):
self._text_message = text_message
self._position = 0
self._line = -1
self._column = 0
self._token_start = None
self.token = ''
self._lines = deque(text_message.split('\n'))
self._current_line = ''
self._previous_line = 0
self._previous_column = 0
self._SkipWhitespace()
self.NextToken()
def AtEnd(self):
"""Checks the end of the text was reached.
Returns:
True iff the end was reached.
"""
return self.token == ''
def _PopLine(self):
while len(self._current_line) <= self._column:
if not self._lines:
self._current_line = ''
return
self._line += 1
self._column = 0
self._current_line = self._lines.popleft()
def _SkipWhitespace(self):
while True:
self._PopLine()
match = self._WHITESPACE.match(self._current_line, self._column)
if not match:
break
length = len(match.group(0))
self._column += length
def TryConsume(self, token):
"""Tries to consume a given piece of text.
Args:
token: Text to consume.
Returns:
True iff the text was consumed.
"""
if self.token == token:
self.NextToken()
return True
return False
def Consume(self, token):
"""Consumes a piece of text.
Args:
token: Text to consume.
Raises:
ParseError: If the text couldn't be consumed.
"""
if not self.TryConsume(token):
raise self._ParseError('Expected "%s".' % token)
def LookingAtInteger(self):
"""Checks if the current token is an integer.
Returns:
True iff the current token is an integer.
"""
if not self.token:
return False
c = self.token[0]
return (c >= '0' and c <= '9') or c == '-' or c == '+'
def ConsumeIdentifier(self):
"""Consumes protocol message field identifier.
Returns:
Identifier string.
Raises:
ParseError: If an identifier couldn't be consumed.
"""
result = self.token
if not self._IDENTIFIER.match(result):
raise self._ParseError('Expected identifier.')
self.NextToken()
return result
def ConsumeInt32(self):
"""Consumes a signed 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint32(self):
"""Consumes an unsigned 32bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 32bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=False)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeInt64(self):
"""Consumes a signed 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If a signed 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=True, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeUint64(self):
"""Consumes an unsigned 64bit integer number.
Returns:
The integer parsed.
Raises:
ParseError: If an unsigned 64bit integer couldn't be consumed.
"""
try:
result = self._ParseInteger(self.token, is_signed=False, is_long=True)
except ValueError, e:
raise self._IntegerParseError(e)
self.NextToken()
return result
def ConsumeFloat(self):
"""Consumes an floating point number.
Returns:
The number parsed.
Raises:
ParseError: If a floating point number couldn't be consumed.
"""
text = self.token
if self._FLOAT_INFINITY.match(text):
self.NextToken()
if text.startswith('-'):
return -_INFINITY
return _INFINITY
if self._FLOAT_NAN.match(text):
self.NextToken()
return _NAN
try:
result = float(text)
except ValueError, e:
raise self._FloatParseError(e)
self.NextToken()
return result
def ConsumeBool(self):
"""Consumes a boolean value.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if self.token in ('true', 't', '1'):
self.NextToken()
return True
elif self.token in ('false', 'f', '0'):
self.NextToken()
return False
else:
raise self._ParseError('Expected "true" or "false".')
def ConsumeString(self):
"""Consumes a string value.
Returns:
The string parsed.
Raises:
ParseError: If a string value couldn't be consumed.
"""
bytes = self.ConsumeByteString()
try:
return unicode(bytes, 'utf-8')
except UnicodeDecodeError, e:
raise self._StringParseError(e)
def ConsumeByteString(self):
"""Consumes a byte array value.
Returns:
The array parsed (as a string).
Raises:
ParseError: If a byte array value couldn't be consumed.
"""
list = [self._ConsumeSingleByteString()]
while len(self.token) > 0 and self.token[0] in ('\'', '"'):
list.append(self._ConsumeSingleByteString())
return "".join(list)
def _ConsumeSingleByteString(self):
"""Consume one token of a string literal.
String literals (whether bytes or text) can come in multiple adjacent
tokens which are automatically concatenated, like in C or Python. This
method only consumes one token.
"""
text = self.token
if len(text) < 1 or text[0] not in ('\'', '"'):
raise self._ParseError('Exptected string.')
if len(text) < 2 or text[-1] != text[0]:
raise self._ParseError('String missing ending quote.')
try:
result = _CUnescape(text[1:-1])
except ValueError, e:
raise self._ParseError(str(e))
self.NextToken()
return result
def _ParseInteger(self, text, is_signed=False, is_long=False):
"""Parses an integer.
Args:
text: The text to parse.
is_signed: True if a signed integer must be parsed.
is_long: True if a long integer must be parsed.
Returns:
The integer value.
Raises:
ValueError: Thrown Iff the text is not a valid integer.
"""
pos = 0
if text.startswith('-'):
pos += 1
base = 10
if text.startswith('0x', pos) or text.startswith('0X', pos):
base = 16
elif text.startswith('0', pos):
base = 8
# Do the actual parsing. Exception handling is propagated to caller.
result = int(text, base)
# Check if the integer is sane. Exceptions handled by callers.
checker = self._INTEGER_CHECKERS[2 * int(is_long) + int(is_signed)]
checker.CheckValue(result)
return result
def ParseErrorPreviousToken(self, message):
"""Creates and *returns* a ParseError for the previously read token.
Args:
message: A message to set for the exception.
Returns:
A ParseError instance.
"""
return ParseError('%d:%d : %s' % (
self._previous_line + 1, self._previous_column + 1, message))
def _ParseError(self, message):
"""Creates and *returns* a ParseError for the current token."""
return ParseError('%d:%d : %s' % (
self._line + 1, self._column - len(self.token) + 1, message))
def _IntegerParseError(self, e):
return self._ParseError('Couldn\'t parse integer: ' + str(e))
def _FloatParseError(self, e):
return self._ParseError('Couldn\'t parse number: ' + str(e))
def _StringParseError(self, e):
return self._ParseError('Couldn\'t parse string: ' + str(e))
def NextToken(self):
"""Reads the next meaningful token."""
self._previous_line = self._line
self._previous_column = self._column
self._column += len(self.token)
self._SkipWhitespace()
if not self._lines and len(self._current_line) <= self._column:
self.token = ''
return
match = self._TOKEN.match(self._current_line, self._column)
if match:
token = match.group(0)
self.token = token
else:
self.token = self._current_line[self._column]
# text.encode('string_escape') does not seem to satisfy our needs as it
# encodes unprintable characters using two-digit hex escapes whereas our
# C++ unescaping function allows hex escapes to be any length. So,
# "\0011".encode('string_escape') ends up being "\\x011", which will be
# decoded in C++ as a single-character string with char code 0x11.
def _CEscape(text, as_utf8):
def escape(c):
o = ord(c)
if o == 10: return r"\n" # optional escape
if o == 13: return r"\r" # optional escape
if o == 9: return r"\t" # optional escape
if o == 39: return r"\'" # optional escape
if o == 34: return r'\"' # necessary escape
if o == 92: return r"\\" # necessary escape
# necessary escapes
if not as_utf8 and (o >= 127 or o < 32): return "\\%03o" % o
return c
return "".join([escape(c) for c in text])
_CUNESCAPE_HEX = re.compile('\\\\x([0-9a-fA-F]{2}|[0-9a-fA-F])')
def _CUnescape(text):
def ReplaceHex(m):
return chr(int(m.group(0)[2:], 16))
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
return result.decode('string_escape')
| mit | 5,885,096,438,276,071,000 | 30.457308 | 79 | 0.655426 | false |
zofuthan/airmozilla | airmozilla/surveys/views.py | 9 | 3959 | from collections import defaultdict
from django.shortcuts import get_object_or_404, render, redirect
from django.db import transaction
from django import forms
from .models import Survey, Question, Answer
@transaction.atomic
def load(request, id):
survey = get_object_or_404(Survey, id=id, active=True)
context = {'survey': survey}
questions = Question.objects.filter(survey=survey)
answers = Answer.objects.filter(question__in=questions)
if request.method == 'POST' and request.POST.get('resetmine'):
answers.filter(user=request.user).delete()
return redirect('surveys:load', survey.id)
show_answers = True # default
if request.user.is_authenticated():
# don't show answers if this is POST
your_answers = answers.filter(user=request.user)
if request.method == 'POST' or not your_answers:
show_answers = False
else:
your_answers = answers.none()
if show_answers:
# make a map of question -> [answers]
_answers = defaultdict(list)
for answer in answers:
_answers[answer.question].append(answer)
questions_dicts = []
for question in questions:
if not question.question:
continue
item = {
'label': question.question['question'], # ugly
'choices': []
}
choices = defaultdict(int)
total_answers = 0
for answer in _answers[question]:
choices[answer.answer['answer']] += 1
total_answers += 1
try:
your_answer = your_answers.get(question=question)
except Answer.DoesNotExist:
your_answer = None
for choice in question.question['choices']:
try:
percent = 100.0 * choices[choice] / total_answers
except ZeroDivisionError:
percent = 0.0
choice_item = {
'number': choices[choice],
'percent': percent,
'answer': choice,
'your_answer': (
your_answer and choice == your_answer.answer['answer']
)
}
item['choices'].append(choice_item)
questions_dicts.append(item)
context['questions'] = questions_dicts
context['answers'] = answers
return render(request, 'surveys/answers.html', context)
if request.method == 'POST':
form = forms.Form(request.POST)
else:
form = forms.Form()
for question in questions:
if not question.question:
# it's empty
continue
q = question.question
if q.get('question') and q.get('choices'):
field = forms.ChoiceField(q.get('question'))
field.label = q.get('question')
field.widget = forms.widgets.RadioSelect()
field.choices = [
(x, x) for x in q.get('choices')
]
field.required = False
form.fields[str(question.id)] = field
if request.method == 'POST':
if form.is_valid():
# delete any previous answers
Answer.objects.filter(
question=question,
user=request.user
).delete()
for question_id, answer in form.cleaned_data.items():
if not answer:
continue
question = questions.get(id=question_id)
Answer.objects.create(
question=question,
user=request.user,
answer={
'answer': answer
}
)
return redirect('surveys:load', survey.id)
context['form'] = form
return render(request, 'surveys/questions.html', context)
| bsd-3-clause | 156,609,386,087,001,900 | 32.837607 | 78 | 0.532458 | false |
waseem18/bedrock | vendor-local/packages/chardet/chardet/langthaimodel.py | 235 | 11298 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import constants
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = ( \
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = ( \
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = { \
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': constants.False,
'charsetName': "TIS-620"
}
| mpl-2.0 | 2,781,892,771,507,009,000 | 55.49 | 74 | 0.541069 | false |
idea4bsd/idea4bsd | python/helpers/pydev/tests_pydevd_python/performance_check.py | 12 | 6676 | import debugger_unittest
import sys
import re
import os
CHECK_BASELINE, CHECK_REGULAR, CHECK_CYTHON = 'baseline', 'regular', 'cython'
class PerformanceWriterThread(debugger_unittest.AbstractWriterThread):
CHECK = None
debugger_unittest.AbstractWriterThread.get_environ # overrides
def get_environ(self):
env = os.environ.copy()
if self.CHECK == CHECK_BASELINE:
env['PYTHONPATH'] = r'X:\PyDev.Debugger.baseline'
elif self.CHECK == CHECK_CYTHON:
env['PYDEVD_USE_CYTHON'] = 'YES'
elif self.CHECK == CHECK_REGULAR:
env['PYDEVD_USE_CYTHON'] = 'NO'
else:
raise AssertionError("Don't know what to check.")
return env
debugger_unittest.AbstractWriterThread.get_pydevd_file # overrides
def get_pydevd_file(self):
if self.CHECK == CHECK_BASELINE:
return os.path.abspath(os.path.join(r'X:\PyDev.Debugger.baseline', 'pydevd.py'))
dirname = os.path.dirname(__file__)
dirname = os.path.dirname(dirname)
return os.path.abspath(os.path.join(dirname, 'pydevd.py'))
class WriterThreadPerformance1(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_with_breakpoint'
def run(self):
self.start_socket()
self.write_add_breakpoint(17, 'method')
self.write_make_initial_run()
self.finished_ok = True
class WriterThreadPerformance2(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_without_breakpoint'
def run(self):
self.start_socket()
self.write_make_initial_run()
self.finished_ok = True
class WriterThreadPerformance3(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_with_step_over'
def run(self):
self.start_socket()
self.write_add_breakpoint(26, None)
self.write_make_initial_run()
thread_id, frame_id, line = self.wait_for_breakpoint_hit('111', True)
self.write_step_over(thread_id)
thread_id, frame_id, line = self.wait_for_breakpoint_hit('108', True)
self.write_run_thread(thread_id)
self.finished_ok = True
class WriterThreadPerformance4(PerformanceWriterThread):
TEST_FILE = debugger_unittest._get_debugger_test_file('_performance_1.py')
BENCHMARK_NAME = 'method_calls_with_exception_breakpoint'
def run(self):
self.start_socket()
self.write_add_exception_breakpoint('ValueError')
self.write_make_initial_run()
self.finished_ok = True
class CheckDebuggerPerformance(debugger_unittest.DebuggerRunner):
def get_command_line(self):
return [sys.executable]
def _get_time_from_result(self, result):
stdout = ''.join(result['stdout'])
match = re.search('TotalTime>>((\d|\.)+)<<', stdout)
time_taken = match.group(1)
return float(time_taken)
def obtain_results(self, writer_thread_class):
time_when_debugged = self._get_time_from_result(self.check_case(writer_thread_class))
args = self.get_command_line()
args.append(writer_thread_class.TEST_FILE)
regular_time = self._get_time_from_result(self.run_process(args, writer_thread=None))
simple_trace_time = self._get_time_from_result(self.run_process(args+['--regular-trace'], writer_thread=None))
print(writer_thread_class.BENCHMARK_NAME, time_when_debugged, regular_time, simple_trace_time)
if 'SPEEDTIN_AUTHORIZATION_KEY' in os.environ:
SPEEDTIN_AUTHORIZATION_KEY = os.environ['SPEEDTIN_AUTHORIZATION_KEY']
# sys.path.append(r'X:\speedtin\pyspeedtin')
import pyspeedtin # If the authorization key is there, pyspeedtin must be available
import pydevd
pydevd_cython_project_id, pydevd_pure_python_project_id = 6, 7
if writer_thread_class.CHECK == CHECK_BASELINE:
project_ids = (pydevd_cython_project_id, pydevd_pure_python_project_id)
elif writer_thread_class.CHECK == CHECK_REGULAR:
project_ids = (pydevd_pure_python_project_id,)
elif writer_thread_class.CHECK == CHECK_CYTHON:
project_ids = (pydevd_cython_project_id,)
else:
raise AssertionError('Wrong check: %s' % (writer_thread_class.CHECK))
for project_id in project_ids:
api = pyspeedtin.PySpeedTinApi(authorization_key=SPEEDTIN_AUTHORIZATION_KEY, project_id=project_id)
benchmark_name = writer_thread_class.BENCHMARK_NAME
if writer_thread_class.CHECK == CHECK_BASELINE:
version = '0.0.1_baseline'
return # No longer commit the baseline (it's immutable right now).
else:
version=pydevd.__version__,
commit_id, branch, commit_date = api.git_commit_id_branch_and_date_from_path(pydevd.__file__)
api.add_benchmark(benchmark_name)
api.add_measurement(
benchmark_name,
value=time_when_debugged,
version=version,
released=False,
branch=branch,
commit_id=commit_id,
commit_date=commit_date,
)
api.commit()
def check_performance1(self):
self.obtain_results(WriterThreadPerformance1)
def check_performance2(self):
self.obtain_results(WriterThreadPerformance2)
def check_performance3(self):
self.obtain_results(WriterThreadPerformance3)
def check_performance4(self):
self.obtain_results(WriterThreadPerformance4)
if __name__ == '__main__':
debugger_unittest.SHOW_WRITES_AND_READS = False
debugger_unittest.SHOW_OTHER_DEBUG_INFO = False
debugger_unittest.SHOW_STDOUT = False
for check in (
# CHECK_BASELINE, -- Checks against the version checked out at X:\PyDev.Debugger.baseline.
CHECK_REGULAR,
CHECK_CYTHON
):
PerformanceWriterThread.CHECK = check
print('Checking: %s' % (check,))
check_debugger_performance = CheckDebuggerPerformance()
check_debugger_performance.check_performance1()
check_debugger_performance.check_performance2()
check_debugger_performance.check_performance3()
check_debugger_performance.check_performance4()
| apache-2.0 | 342,902,274,577,849,000 | 37.148571 | 118 | 0.63601 | false |
HyperBaton/ansible | lib/ansible/modules/network/f5/bigip_sys_daemon_log_tmm.py | 38 | 13357 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_sys_daemon_log_tmm
short_description: Manage BIG-IP tmm daemon log settings
description:
- Manage BIG-IP tmm log settings.
version_added: 2.8
options:
arp_log_level:
description:
- Specifies the lowest level of ARP messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- error
- informational
- notice
- warning
http_compression_log_level:
description:
- Specifies the lowest level of HTTP compression messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- error
- informational
- notice
- warning
http_log_level:
description:
- Specifies the lowest level of HTTP messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- error
- informational
- notice
- warning
ip_log_level:
description:
- Specifies the lowest level of IP address messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- informational
- notice
- warning
irule_log_level:
description:
- Specifies the lowest level of iRule messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- error
- informational
- notice
- warning
layer4_log_level:
description:
- Specifies the lowest level of Layer 4 messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- informational
- notice
net_log_level:
description:
- Specifies the lowest level of network messages from the tmm daemon
to include in the system log.
type: str
choices:
- critical
- debug
- error
- informational
- notice
- warning
os_log_level:
description:
- Specifies the lowest level of operating system messages from the tmm daemon
to include in the system log.
type: str
choices:
- alert
- critical
- debug
- emergency
- error
- informational
- notice
- warning
pva_log_level:
description:
- Specifies the lowest level of PVA messages from the tmm daemon
to include in the system log.
type: str
choices:
- debug
- informational
- notice
ssl_log_level:
description:
- Specifies the lowest level of SSL messages from the tmm daemon
to include in the system log.
type: str
choices:
- alert
- critical
- debug
- emergency
- error
- informational
- notice
- warning
state:
description:
- The state of the log level on the system. When C(present), guarantees
that an existing log level is set to C(value).
type: str
choices:
- present
default: present
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Set SSL log level to debug
bigip_sys_daemon_log_tmm:
provider:
password: secret
server: lb.mydomain.com
user: admin
ssl_log_level: debug
delegate_to: localhost
'''
RETURN = r'''
arp_log_level:
description: Lowest level of ARP messages from the tmm daemon to log.
returned: changed
type: str
sample: error
http_compression_log_level:
description: Lowest level of HTTP compression messages from the tmm daemon to log.
returned: changed
type: str
sample: debug
http_log_level:
description: Lowest level of HTTP messages from the tmm daemon to log.
returned: changed
type: str
sample: notice
ip_log_level:
description: Lowest level of IP address messages from the tmm daemon to log.
returned: changed
type: str
sample: warning
irule_log_level:
description: Lowest level of iRule messages from the tmm daemon to log.
returned: changed
type: str
sample: error
layer4_log_level:
description: Lowest level of Layer 4 messages from the tmm daemon to log.
returned: changed
type: str
sample: notice
net_log_level:
description: Lowest level of network messages from the tmm daemon to log.
returned: changed
type: str
sample: critical
os_log_level:
description: Lowest level of operating system messages from the tmm daemon to log.
returned: changed
type: str
sample: critical
pva_log_level:
description: Lowest level of PVA messages from the tmm daemon to log.
returned: changed
type: str
sample: debug
ssl_log_level:
description: Lowest level of SSL messages from the tmm daemon to log.
returned: changed
type: str
sample: critical
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
class Parameters(AnsibleF5Parameters):
api_map = {
'arpLogLevel': 'arp_log_level',
'httpCompressionLogLevel': 'http_compression_log_level',
'httpLogLevel': 'http_log_level',
'ipLogLevel': 'ip_log_level',
'iruleLogLevel': 'irule_log_level',
'layer4LogLevel': 'layer4_log_level',
'netLogLevel': 'net_log_level',
'osLogLevel': 'os_log_level',
'pvaLogLevel': 'pva_log_level',
'sslLogLevel': 'ssl_log_level',
}
api_attributes = [
'arpLogLevel',
'httpCompressionLogLevel',
'httpLogLevel',
'ipLogLevel',
'iruleLogLevel',
'layer4LogLevel',
'netLogLevel',
'osLogLevel',
'pvaLogLevel',
'sslLogLevel',
]
returnables = [
'arp_log_level',
'http_compression_log_level',
'http_log_level',
'ip_log_level',
'irule_log_level',
'layer4_log_level',
'net_log_level',
'os_log_level',
'pva_log_level',
'ssl_log_level',
]
updatables = [
'arp_log_level',
'http_compression_log_level',
'http_log_level',
'ip_log_level',
'irule_log_level',
'layer4_log_level',
'net_log_level',
'os_log_level',
'pva_log_level',
'ssl_log_level',
]
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
result = dict()
changed = self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
return self.update()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/daemon-log-settings/tmm".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/daemon-log-settings/tmm".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.choices_min = ['debug', 'informational', 'notice']
self.choices_common = self.choices_min + ['warning', 'error']
self.choices_all = self.choices_common + ['alert', 'critical', 'emergency']
argument_spec = dict(
arp_log_level=dict(
choices=self.choices_common
),
http_compression_log_level=dict(
choices=self.choices_common
),
http_log_level=dict(
choices=self.choices_common
),
ip_log_level=dict(
choices=self.choices_min + ['warning']
),
irule_log_level=dict(
choices=self.choices_common
),
layer4_log_level=dict(
choices=self.choices_min
),
net_log_level=dict(
choices=self.choices_common + ['critical']
),
os_log_level=dict(
choices=self.choices_all
),
pva_log_level=dict(
choices=self.choices_min
),
ssl_log_level=dict(
choices=self.choices_all
),
state=dict(default='present', choices=['present'])
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 | 6,186,121,262,179,803,000 | 26.093306 | 91 | 0.595493 | false |
Hasimir/brython | www/src/Lib/test/test_with.py | 83 | 26527 | #!/usr/bin/env python3
"""Unit tests for the with statement specified in PEP 343."""
__author__ = "Mike Bland"
__email__ = "mbland at acm dot org"
import sys
import unittest
from collections import deque
from contextlib import _GeneratorContextManager, contextmanager
from test.support import run_unittest
class MockContextManager(_GeneratorContextManager):
def __init__(self, func, *args, **kwds):
super().__init__(func, *args, **kwds)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return _GeneratorContextManager.__enter__(self)
def __exit__(self, type, value, traceback):
self.exit_called = True
self.exit_args = (type, value, traceback)
return _GeneratorContextManager.__exit__(self, type,
value, traceback)
def mock_contextmanager(func):
def helper(*args, **kwds):
return MockContextManager(func, *args, **kwds)
return helper
class MockResource(object):
def __init__(self):
self.yielded = False
self.stopped = False
@mock_contextmanager
def mock_contextmanager_generator():
mock = MockResource()
try:
mock.yielded = True
yield mock
finally:
mock.stopped = True
class Nested(object):
def __init__(self, *managers):
self.managers = managers
self.entered = None
def __enter__(self):
if self.entered is not None:
raise RuntimeError("Context is not reentrant")
self.entered = deque()
vars = []
try:
for mgr in self.managers:
vars.append(mgr.__enter__())
self.entered.appendleft(mgr)
except:
if not self.__exit__(*sys.exc_info()):
raise
return vars
def __exit__(self, *exc_info):
# Behave like nested with statements
# first in, last out
# New exceptions override old ones
ex = exc_info
for mgr in self.entered:
try:
if mgr.__exit__(*ex):
ex = (None, None, None)
except:
ex = sys.exc_info()
self.entered = None
if ex is not exc_info:
raise ex[0](ex[1]).with_traceback(ex[2])
class MockNested(Nested):
def __init__(self, *managers):
Nested.__init__(self, *managers)
self.enter_called = False
self.exit_called = False
self.exit_args = None
def __enter__(self):
self.enter_called = True
return Nested.__enter__(self)
def __exit__(self, *exc_info):
self.exit_called = True
self.exit_args = exc_info
return Nested.__exit__(self, *exc_info)
class FailureTestCase(unittest.TestCase):
def testNameError(self):
def fooNotDeclared():
with foo: pass
self.assertRaises(NameError, fooNotDeclared)
def testEnterAttributeError(self):
class LacksEnter(object):
def __exit__(self, type, value, traceback):
pass
def fooLacksEnter():
foo = LacksEnter()
with foo: pass
self.assertRaises(AttributeError, fooLacksEnter)
def testExitAttributeError(self):
class LacksExit(object):
def __enter__(self):
pass
def fooLacksExit():
foo = LacksExit()
with foo: pass
self.assertRaises(AttributeError, fooLacksExit)
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, '', 'single')
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testAssignmentToNoneError(self):
self.assertRaisesSyntaxError('with mock as None:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None):\n'
' pass')
def testAssignmentToEmptyTupleError(self):
self.assertRaisesSyntaxError(
'with mock as ():\n'
' pass')
def testAssignmentToTupleOnlyContainingNoneError(self):
self.assertRaisesSyntaxError('with mock as None,:\n pass')
self.assertRaisesSyntaxError(
'with mock as (None,):\n'
' pass')
def testAssignmentToTupleContainingNoneError(self):
self.assertRaisesSyntaxError(
'with mock as (foo, None, bar):\n'
' pass')
def testEnterThrows(self):
class EnterThrows(object):
def __enter__(self):
raise RuntimeError("Enter threw")
def __exit__(self, *args):
pass
def shouldThrow():
ct = EnterThrows()
self.foo = None
with ct as self.foo:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertEqual(self.foo, None)
def testExitThrows(self):
class ExitThrows(object):
def __enter__(self):
return
def __exit__(self, *args):
raise RuntimeError(42)
def shouldThrow():
with ExitThrows():
pass
self.assertRaises(RuntimeError, shouldThrow)
class ContextmanagerAssertionMixin(object):
def setUp(self):
self.TEST_EXCEPTION = RuntimeError("test exception")
def assertInWithManagerInvariants(self, mock_manager):
self.assertTrue(mock_manager.enter_called)
self.assertFalse(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, None)
def assertAfterWithManagerInvariants(self, mock_manager, exit_args):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
self.assertEqual(mock_manager.exit_args, exit_args)
def assertAfterWithManagerInvariantsNoError(self, mock_manager):
self.assertAfterWithManagerInvariants(mock_manager,
(None, None, None))
def assertInWithGeneratorInvariants(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertFalse(mock_generator.stopped)
def assertAfterWithGeneratorInvariantsNoError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
def raiseTestException(self):
raise self.TEST_EXCEPTION
def assertAfterWithManagerInvariantsWithError(self, mock_manager,
exc_type=None):
self.assertTrue(mock_manager.enter_called)
self.assertTrue(mock_manager.exit_called)
if exc_type is None:
self.assertEqual(mock_manager.exit_args[1], self.TEST_EXCEPTION)
exc_type = type(self.TEST_EXCEPTION)
self.assertEqual(mock_manager.exit_args[0], exc_type)
# Test the __exit__ arguments. Issue #7853
self.assertIsInstance(mock_manager.exit_args[1], exc_type)
self.assertIsNot(mock_manager.exit_args[2], None)
def assertAfterWithGeneratorInvariantsWithError(self, mock_generator):
self.assertTrue(mock_generator.yielded)
self.assertTrue(mock_generator.stopped)
class NonexceptionalTestCase(unittest.TestCase, ContextmanagerAssertionMixin):
def testInlineGeneratorSyntax(self):
with mock_contextmanager_generator():
pass
def testUnboundGenerator(self):
mock = mock_contextmanager_generator()
with mock:
pass
self.assertAfterWithManagerInvariantsNoError(mock)
def testInlineGeneratorBoundSyntax(self):
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
# FIXME: In the future, we'll try to keep the bound names from leaking
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToExistingVariable(self):
foo = None
with mock_contextmanager_generator() as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithGeneratorInvariantsNoError(foo)
def testInlineGeneratorBoundToDottedVariable(self):
with mock_contextmanager_generator() as self.foo:
self.assertInWithGeneratorInvariants(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.foo)
def testBoundGenerator(self):
mock = mock_contextmanager_generator()
with mock as foo:
self.assertInWithGeneratorInvariants(foo)
self.assertInWithManagerInvariants(mock)
self.assertAfterWithGeneratorInvariantsNoError(foo)
self.assertAfterWithManagerInvariantsNoError(mock)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
with mock_a as foo:
mock_b = mock_contextmanager_generator()
with mock_b as bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(foo)
self.assertInWithGeneratorInvariants(bar)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsNoError(bar)
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(foo)
self.assertAfterWithManagerInvariantsNoError(mock_a)
self.assertAfterWithGeneratorInvariantsNoError(foo)
class NestedNonexceptionalTestCase(unittest.TestCase,
ContextmanagerAssertionMixin):
def testSingleArgInlineGeneratorSyntax(self):
with Nested(mock_contextmanager_generator()):
pass
def testSingleArgBoundToNonTuple(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as foo:
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToSingleElementParenthesizedList(self):
m = mock_contextmanager_generator()
# This will bind all the arguments to nested() into a single list
# assigned to foo.
with Nested(m) as (foo):
self.assertInWithManagerInvariants(m)
self.assertAfterWithManagerInvariantsNoError(m)
def testSingleArgBoundToMultipleElementTupleError(self):
def shouldThrowValueError():
with Nested(mock_contextmanager_generator()) as (foo, bar):
pass
self.assertRaises(ValueError, shouldThrowValueError)
def testSingleArgUnbound(self):
mock_contextmanager = mock_contextmanager_generator()
mock_nested = MockNested(mock_contextmanager)
with mock_nested:
self.assertInWithManagerInvariants(mock_contextmanager)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(mock_contextmanager)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgUnbound(self):
m = mock_contextmanager_generator()
n = mock_contextmanager_generator()
o = mock_contextmanager_generator()
mock_nested = MockNested(m, n, o)
with mock_nested:
self.assertInWithManagerInvariants(m)
self.assertInWithManagerInvariants(n)
self.assertInWithManagerInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithManagerInvariantsNoError(m)
self.assertAfterWithManagerInvariantsNoError(n)
self.assertAfterWithManagerInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
def testMultipleArgBound(self):
mock_nested = MockNested(mock_contextmanager_generator(),
mock_contextmanager_generator(), mock_contextmanager_generator())
with mock_nested as (m, n, o):
self.assertInWithGeneratorInvariants(m)
self.assertInWithGeneratorInvariants(n)
self.assertInWithGeneratorInvariants(o)
self.assertInWithManagerInvariants(mock_nested)
self.assertAfterWithGeneratorInvariantsNoError(m)
self.assertAfterWithGeneratorInvariantsNoError(n)
self.assertAfterWithGeneratorInvariantsNoError(o)
self.assertAfterWithManagerInvariantsNoError(mock_nested)
class ExceptionalTestCase(ContextmanagerAssertionMixin, unittest.TestCase):
def testSingleResource(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
self.assertInWithManagerInvariants(cm)
self.assertInWithGeneratorInvariants(self.resource)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm)
self.assertAfterWithGeneratorInvariantsWithError(self.resource)
def testExceptionNormalized(self):
cm = mock_contextmanager_generator()
def shouldThrow():
with cm as self.resource:
# Note this relies on the fact that 1 // 0 produces an exception
# that is not normalized immediately.
1 // 0
self.assertRaises(ZeroDivisionError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm, ZeroDivisionError)
def testNestedSingleStatements(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsWithError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsWithError(self.bar)
def testMultipleResourcesInSingleStatement(self):
cm_a = mock_contextmanager_generator()
cm_b = mock_contextmanager_generator()
mock_nested = MockNested(cm_a, cm_b)
def shouldThrow():
with mock_nested as (self.resource_a, self.resource_b):
self.assertInWithManagerInvariants(cm_a)
self.assertInWithManagerInvariants(cm_b)
self.assertInWithManagerInvariants(mock_nested)
self.assertInWithGeneratorInvariants(self.resource_a)
self.assertInWithGeneratorInvariants(self.resource_b)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(cm_a)
self.assertAfterWithManagerInvariantsWithError(cm_b)
self.assertAfterWithManagerInvariantsWithError(mock_nested)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_a)
self.assertAfterWithGeneratorInvariantsWithError(self.resource_b)
def testNestedExceptionBeforeInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
self.bar = None
def shouldThrow():
with mock_a as self.foo:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithGeneratorInvariants(self.foo)
self.raiseTestException()
with mock_b as self.bar:
pass
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
# The inner statement stuff should never have been touched
self.assertEqual(self.bar, None)
self.assertFalse(mock_b.enter_called)
self.assertFalse(mock_b.exit_called)
self.assertEqual(mock_b.exit_args, None)
def testNestedExceptionAfterInnerStatement(self):
mock_a = mock_contextmanager_generator()
mock_b = mock_contextmanager_generator()
def shouldThrow():
with mock_a as self.foo:
with mock_b as self.bar:
self.assertInWithManagerInvariants(mock_a)
self.assertInWithManagerInvariants(mock_b)
self.assertInWithGeneratorInvariants(self.foo)
self.assertInWithGeneratorInvariants(self.bar)
self.raiseTestException()
self.assertRaises(RuntimeError, shouldThrow)
self.assertAfterWithManagerInvariantsWithError(mock_a)
self.assertAfterWithManagerInvariantsNoError(mock_b)
self.assertAfterWithGeneratorInvariantsWithError(self.foo)
self.assertAfterWithGeneratorInvariantsNoError(self.bar)
def testRaisedStopIteration1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration2(self):
# From bug 1462485
class cm(object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise StopIteration("from with")
self.assertRaises(StopIteration, shouldThrow)
def testRaisedStopIteration3(self):
# Another variant where the exception hasn't been instantiated
# From bug 1705170
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise next(iter([]))
self.assertRaises(StopIteration, shouldThrow)
def testRaisedGeneratorExit1(self):
# From bug 1462485
@contextmanager
def cm():
yield
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testRaisedGeneratorExit2(self):
# From bug 1462485
class cm (object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def shouldThrow():
with cm():
raise GeneratorExit("from with")
self.assertRaises(GeneratorExit, shouldThrow)
def testErrorsInBool(self):
# issue4589: __exit__ return code may raise an exception
# when looking at its truth value.
class cm(object):
def __init__(self, bool_conversion):
class Bool:
def __bool__(self):
return bool_conversion()
self.exit_result = Bool()
def __enter__(self):
return 3
def __exit__(self, a, b, c):
return self.exit_result
def trueAsBool():
with cm(lambda: True):
self.fail("Should NOT see this")
trueAsBool()
def falseAsBool():
with cm(lambda: False):
self.fail("Should raise")
self.assertRaises(AssertionError, falseAsBool)
def failAsBool():
with cm(lambda: 1//0):
self.fail("Should NOT see this")
self.assertRaises(ZeroDivisionError, failAsBool)
class NonLocalFlowControlTestCase(unittest.TestCase):
def testWithBreak(self):
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
break
counter += 100 # Not reached
self.assertEqual(counter, 11)
def testWithContinue(self):
counter = 0
while True:
counter += 1
if counter > 2:
break
with mock_contextmanager_generator():
counter += 10
continue
counter += 100 # Not reached
self.assertEqual(counter, 12)
def testWithReturn(self):
def foo():
counter = 0
while True:
counter += 1
with mock_contextmanager_generator():
counter += 10
return counter
counter += 100 # Not reached
self.assertEqual(foo(), 11)
def testWithYield(self):
def gen():
with mock_contextmanager_generator():
yield 12
yield 13
x = list(gen())
self.assertEqual(x, [12, 13])
def testWithRaise(self):
counter = 0
try:
counter += 1
with mock_contextmanager_generator():
counter += 10
raise RuntimeError
counter += 100 # Not reached
except RuntimeError:
self.assertEqual(counter, 11)
else:
self.fail("Didn't raise RuntimeError")
class AssignmentTargetTestCase(unittest.TestCase):
def testSingleComplexTarget(self):
targets = {1: [0, 1, 2]}
with mock_contextmanager_generator() as targets[1][0]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][0].__class__, MockResource)
with mock_contextmanager_generator() as list(targets.values())[0][1]:
self.assertEqual(list(targets.keys()), [1])
self.assertEqual(targets[1][1].__class__, MockResource)
with mock_contextmanager_generator() as targets[2]:
keys = list(targets.keys())
keys.sort()
self.assertEqual(keys, [1, 2])
class C: pass
blah = C()
with mock_contextmanager_generator() as blah.foo:
self.assertEqual(hasattr(blah, "foo"), True)
def testMultipleComplexTargets(self):
class C:
def __enter__(self): return 1, 2, 3
def __exit__(self, t, v, tb): pass
targets = {1: [0, 1, 2]}
with C() as (targets[1][0], targets[1][1], targets[1][2]):
self.assertEqual(targets, {1: [1, 2, 3]})
with C() as (list(targets.values())[0][2], list(targets.values())[0][1], list(targets.values())[0][0]):
self.assertEqual(targets, {1: [3, 2, 1]})
with C() as (targets[1], targets[2], targets[3]):
self.assertEqual(targets, {1: 1, 2: 2, 3: 3})
class B: pass
blah = B()
with C() as (blah.one, blah.two, blah.three):
self.assertEqual(blah.one, 1)
self.assertEqual(blah.two, 2)
self.assertEqual(blah.three, 3)
class ExitSwallowsExceptionTestCase(unittest.TestCase):
def testExitTrueSwallowsException(self):
class AfricanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return True
try:
with AfricanSwallow():
1/0
except ZeroDivisionError:
self.fail("ZeroDivisionError should have been swallowed")
def testExitFalseDoesntSwallowException(self):
class EuropeanSwallow:
def __enter__(self): pass
def __exit__(self, t, v, tb): return False
try:
with EuropeanSwallow():
1/0
except ZeroDivisionError:
pass
else:
self.fail("ZeroDivisionError should have been raised")
class NestedWith(unittest.TestCase):
class Dummy(object):
def __init__(self, value=None, gobble=False):
if value is None:
value = self
self.value = value
self.gobble = gobble
self.enter_called = False
self.exit_called = False
def __enter__(self):
self.enter_called = True
return self.value
def __exit__(self, *exc_info):
self.exit_called = True
self.exc_info = exc_info
if self.gobble:
return True
class InitRaises(object):
def __init__(self): raise RuntimeError()
class EnterRaises(object):
def __enter__(self): raise RuntimeError()
def __exit__(self, *exc_info): pass
class ExitRaises(object):
def __enter__(self): pass
def __exit__(self, *exc_info): raise RuntimeError()
def testNoExceptions(self):
with self.Dummy() as a, self.Dummy() as b:
self.assertTrue(a.enter_called)
self.assertTrue(b.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(b.exit_called)
def testExceptionInExprList(self):
try:
with self.Dummy() as a, self.InitRaises():
pass
except:
pass
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInEnter(self):
try:
with self.Dummy() as a, self.EnterRaises():
self.fail('body of bad with executed')
except RuntimeError:
pass
else:
self.fail('RuntimeError not reraised')
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
def testExceptionInExit(self):
body_executed = False
with self.Dummy(gobble=True) as a, self.ExitRaises():
body_executed = True
self.assertTrue(a.enter_called)
self.assertTrue(a.exit_called)
self.assertTrue(body_executed)
self.assertNotEqual(a.exc_info[0], None)
def testEnterReturnsTuple(self):
with self.Dummy(value=(1,2)) as (a1, a2), \
self.Dummy(value=(10, 20)) as (b1, b2):
self.assertEqual(1, a1)
self.assertEqual(2, a2)
self.assertEqual(10, b1)
self.assertEqual(20, b2)
def test_main():
run_unittest(FailureTestCase, NonexceptionalTestCase,
NestedNonexceptionalTestCase, ExceptionalTestCase,
NonLocalFlowControlTestCase,
AssignmentTargetTestCase,
ExitSwallowsExceptionTestCase,
NestedWith)
if __name__ == '__main__':
test_main()
| bsd-3-clause | 8,240,573,627,408,270,000 | 34.275266 | 111 | 0.613375 | false |
APCVSRepo/sdl_core | src/3rd_party-static/jsoncpp/scons-tools/globtool.py | 256 | 1667 | import fnmatch
import os
def generate( env ):
def Glob( env, includes = None, excludes = None, dir = '.' ):
"""Adds Glob( includes = Split( '*' ), excludes = None, dir = '.')
helper function to environment.
Glob both the file-system files.
includes: list of file name pattern included in the return list when matched.
excludes: list of file name pattern exluced from the return list.
Example:
sources = env.Glob( ("*.cpp", '*.h'), "~*.cpp", "#src" )
"""
def filterFilename(path):
abs_path = os.path.join( dir, path )
if not os.path.isfile(abs_path):
return 0
fn = os.path.basename(path)
match = 0
for include in includes:
if fnmatch.fnmatchcase( fn, include ):
match = 1
break
if match == 1 and not excludes is None:
for exclude in excludes:
if fnmatch.fnmatchcase( fn, exclude ):
match = 0
break
return match
if includes is None:
includes = ('*',)
elif type(includes) in ( type(''), type(u'') ):
includes = (includes,)
if type(excludes) in ( type(''), type(u'') ):
excludes = (excludes,)
dir = env.Dir(dir).abspath
paths = os.listdir( dir )
def makeAbsFileNode( path ):
return env.File( os.path.join( dir, path ) )
nodes = filter( filterFilename, paths )
return map( makeAbsFileNode, nodes )
from SCons.Script import Environment
Environment.Glob = Glob
def exists(env):
"""
Tool always exists.
"""
return True
| bsd-3-clause | -6,193,818,490,681,867,000 | 30.45283 | 84 | 0.54769 | false |
yangleo/cloud-github | openstack_dashboard/dashboards/admin/networks/agents/tests.py | 9 | 7633 | # Copyright 2012 NEC Corporation
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
NETWORKS_DETAIL_URL = 'horizon:admin:networks:detail'
class NetworkAgentTests(test.BaseAdminViewTests):
@test.create_stubs({api.neutron: ('agent_list',
'network_get',
'list_dhcp_agent_hosting_networks',)})
def test_agent_add_get(self):
network = self.networks.first()
api.neutron.agent_list(IsA(http.HttpRequest), agent_type='DHCP agent')\
.AndReturn(self.agents.list())
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network.id)\
.AndReturn(self.agents.list())
self.mox.ReplayAll()
url = reverse('horizon:admin:networks:adddhcpagent',
args=[network.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'admin/networks/agents/add.html')
@test.create_stubs({api.neutron: ('agent_list',
'network_get',
'list_dhcp_agent_hosting_networks',
'add_network_to_dhcp_agent',)})
def test_agent_add_post(self):
network = self.networks.first()
agent_id = self.agents.first().id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network.id)\
.AndReturn([self.agents.list()[1]])
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.agent_list(IsA(http.HttpRequest), agent_type='DHCP agent')\
.AndReturn(self.agents.list())
api.neutron.add_network_to_dhcp_agent(IsA(http.HttpRequest),
agent_id, network.id)\
.AndReturn(True)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'network_name': network.name,
'agent': agent_id}
url = reverse('horizon:admin:networks:adddhcpagent',
args=[network.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[network.id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('agent_list',
'network_get',
'list_dhcp_agent_hosting_networks',
'add_network_to_dhcp_agent',)})
def test_agent_add_post_exception(self):
network = self.networks.first()
agent_id = self.agents.first().id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network.id)\
.AndReturn([self.agents.list()[1]])
api.neutron.network_get(IsA(http.HttpRequest), network.id)\
.AndReturn(network)
api.neutron.agent_list(IsA(http.HttpRequest), agent_type='DHCP agent')\
.AndReturn(self.agents.list())
api.neutron.add_network_to_dhcp_agent(IsA(http.HttpRequest),
agent_id, network.id)\
.AndRaise(self.exceptions.neutron)
self.mox.ReplayAll()
form_data = {'network_id': network.id,
'network_name': network.name,
'agent': agent_id}
url = reverse('horizon:admin:networks:adddhcpagent',
args=[network.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redir_url = reverse(NETWORKS_DETAIL_URL, args=[network.id])
self.assertRedirectsNoFollow(res, redir_url)
@test.create_stubs({api.neutron: ('subnet_list',
'port_list',
'list_dhcp_agent_hosting_networks',
'is_extension_supported',
'remove_network_from_dhcp_agent',)})
def test_agent_delete(self):
network_id = self.networks.first().id
agent_id = self.agents.first().id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network_id).\
AndReturn(self.agents.list())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.remove_network_from_dhcp_agent(IsA(http.HttpRequest),
agent_id, network_id)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(False)
self.mox.ReplayAll()
form_data = {'action': 'agents__delete__%s' % agent_id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({api.neutron: ('subnet_list',
'port_list',
'list_dhcp_agent_hosting_networks',
'is_extension_supported',
'remove_network_from_dhcp_agent',)})
def test_agent_delete_exception(self):
network_id = self.networks.first().id
agent_id = self.agents.first().id
api.neutron.list_dhcp_agent_hosting_networks(IsA(http.HttpRequest),
network_id).\
AndReturn(self.agents.list())
api.neutron.subnet_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.subnets.first()])
api.neutron.port_list(IsA(http.HttpRequest), network_id=network_id)\
.AndReturn([self.ports.first()])
api.neutron.remove_network_from_dhcp_agent(IsA(http.HttpRequest),
agent_id, network_id)\
.AndRaise(self.exceptions.neutron)
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'mac-learning')\
.AndReturn(False)
self.mox.ReplayAll()
form_data = {'action': 'agents__delete__%s' % agent_id}
url = reverse(NETWORKS_DETAIL_URL, args=[network_id])
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
| apache-2.0 | 5,939,888,068,796,748,000 | 45.542683 | 79 | 0.55129 | false |
xtao/code | tools/update_issue.py | 3 | 1349 | # -*- coding: utf-8 -*-
from vilya.libs.store import store
def main():
rs = store.execute("select id, type "
"from issues "
"where type='project'")
for r in rs:
id, _ = r
rs1 = store.execute("select id, project_id, issue_id "
"from project_issues "
"where issue_id=%s",
id)
if rs1 and rs1[0]:
_, target_id, _ = rs1[0]
store.execute("update issues "
"set target_id=%s "
"where id=%s",
(target_id, id))
store.commit()
rs = store.execute("select id, type "
"from issues "
"where type='team'")
for r in rs:
id, _ = r
rs1 = store.execute("select id, team_id, issue_id "
"from team_issues "
"where issue_id=%s",
id)
if rs1 and rs1[0]:
_, target_id, _ = rs1[0]
store.execute("update issues "
"set target_id=%s "
"where id=%s",
(target_id, id))
store.commit()
if __name__ == "__main__":
main()
| bsd-3-clause | -7,259,733,509,700,299,000 | 29.659091 | 62 | 0.368421 | false |
yglazko/socorro | webapp-django/bin/linting.py | 9 | 1558 | #!/usr/bin/env python
"""
Use like this:
find somedir | xargs flake8 | python linting.py
or:
flake8 somedir | python linting.py
or:
git ls-files somedir | python linting.py
"""
import os
import sys
# Enter any part of a warning that we deem OK.
# It can be a pep8 warning error code or any other part of a string.
#
# NOTE! Be as specific as you possibly can!
# Only blanket whole files if you desperately have to
#
EXCEPTIONS = (
# has a exceptional use of `...import *`
'settings/base.py:4:',
# has a well known `...import *` trick that we like
'settings/__init__.py',
# ignore south migrations
'/migrations/',
# all downloaded libs to be ignored
'/js/lib/',
# See https://bugzilla.mozilla.org/show_bug.cgi?id=997270
'/js/jquery/',
'/js/flot',
'/js/timeago/',
'jquery.tablesorter.min.js',
'async-local-storage-with-Promise.min.js',
'underscore-min.js',
'moment.min.js',
'jquery.metadata.js',
)
EXTENSIONS_ONLY = (
'.py',
# commented out until we clean up our .js files
# See https://bugzilla.mozilla.org/show_bug.cgi?id=997272
# '.js'
)
def main():
errors = 0
for line in sys.stdin:
if not line.strip():
continue
_, ext = os.path.splitext(line.split(':')[0])
if ext not in EXTENSIONS_ONLY:
continue
if [f for f in EXCEPTIONS if f in line]:
continue
errors += 1
sys.stderr.write(line)
return errors
if __name__ == '__main__':
sys.exit(main())
| mpl-2.0 | -4,406,283,696,547,520,000 | 19.773333 | 68 | 0.600128 | false |
DrPantera/gsiege | resistencia/tests/test_round.py | 3 | 5377 | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# This file is part of Resistencia Cadiz 1812.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Copyright (C) 2010, Pablo Recio Quijano
#----------------------------------------------------------------------
"""
Contains the class that specializes the round class of the competition, to
be used on the tests.
"""
import csv
import os.path
import os
from guadaboard import guada_board
#from resistencia import xdg
from resistencia.contest import round as contest_round
class TestRound(contest_round.Round):
"""
This class includes the same values that a contest round.
On the __init__ method, teams is a tuple with the fist value a list of
matchs, and the second has the translator of the keys that contains
the real team
"""
def __init__ (self, teams, num_turns = 150,
log_file=None, player = 0, logFolder = None):
#player must be 0 or 1
contest_round.Round.__init__(self, teams[0], teams[1],
num_turns)
self.player_team = player
self.log_file = log_file
self.logFolder = logFolder
self.round_stats = {}
self.round_stats['wins'] = 0
self.round_stats['looses'] = 0
self.round_stats['draws'] = 0
self.round_stats['turns_winning'] = 0
self.round_stats['turns_losing'] = 0
self.round_stats['num_pieces'] = 0
self.round_stats['val_pieces'] = 0
self.round_stats['max_death'] = 0
print "Creating test round"
def get_round_stats(self):
"""
Returns a list with the stats of the tests rounds
"""
if self.completed:
return self.round_stats
else:
raise contest_round.RoundError('Not all games played')
def _merge_stats(self, match_stats):
"""
Merge the stats of a match with the general stats.
"""
for k in match_stats:
self.round_stats[k] = self.round_stats[k] + match_stats[k]
def play_match(self, fast=None, cant_draw=None):
"""
Run a simulation of the next game on the round
"""
teams_keys = {}
teams_keys['a'] = self.round[self.next_game][0][0]
teams_keys['b'] = self.round[self.next_game][0][1]
team_a = (self.translator[teams_keys['a']],)
team_b = (self.translator[teams_keys['b']],)
logFileName = [""]
result, stats = guada_board.run(team_a, team_b, fast=True,
get_stats=True,
number_turns=self.num_turns,
logNameReference = logFileName)
# print "LogName from outside:", logFileName[0]
# print "Basename", os.path.basename(logFileName[0])
# print "LogFolder", self.logFolder
os.rename(logFileName[0], os.path.join(self.logFolder, os.path.basename(logFileName[0])))
stats_writer = csv.writer(open(self.log_file, 'a'), delimiter=',')#,
#quotechar='|', quoting=csv.QUOTE_MINIMAL)
key_result = ''
player_stats = stats[self.player_team]
number_of_turns = 0
key = ''
team = ''
if self.player_team == 0:
key = teams_keys['b']
team = 'A'
if result == 1:
number_of_turns = player_stats['turns_winning']
key_result = 'A'
elif result == -1:
number_of_turns = player_stats['turns_losing']
key_result = 'B'
else:
key_result = 'D'
else:
key = teams_keys['a']
team = 'B'
if result == -1:
number_of_turns = player_stats['turns_winning']
key_result = 'A'
elif result == 1:
number_of_turns = player_stats['turns_losing']
key_result = 'B'
else:
key_result = 'D'
write_results = [key, team, key_result, number_of_turns,
player_stats['num_pieces'],
player_stats['val_pieces'],
player_stats['max_death']]
#print write_results
stats_writer.writerow(write_results)
self.round[self.next_game] = (self.round[self.next_game][0],
True, result)
self.next_game = self.next_game + 1
self.completed = (self.next_game == self.number_games)
self._merge_stats(stats[self.player_team])
return (self.round[self.next_game-1][0],
self.round[self.next_game-1][2])
| gpl-3.0 | -8,459,068,154,937,609,000 | 34.375 | 97 | 0.540264 | false |
blackzw/openwrt_sdk_dev1 | staging_dir/host/lib/python2.7/compileall.py | 144 | 7763 | """Module/script to byte-compile all .py files to .pyc (or .pyo) files.
When called as a script with arguments, this compiles the directories
given as arguments recursively; the -l option prevents it from
recursing into directories.
Without arguments, if compiles all modules on sys.path, without
recursing into subdirectories. (Even though it should do so for
packages -- for now, you'll have to deal with packages separately.)
See module py_compile for details of the actual byte-compilation.
"""
import os
import sys
import py_compile
import struct
import imp
__all__ = ["compile_dir","compile_file","compile_path"]
def compile_dir(dir, maxlevels=10, ddir=None,
force=0, rx=None, quiet=0):
"""Byte-compile all modules in the given directory tree.
Arguments (only dir is required):
dir: the directory to byte-compile
maxlevels: maximum recursion level (default 10)
ddir: the directory that will be prepended to the path to the
file as it is compiled into each byte-code file.
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
if not quiet:
print 'Listing', dir, '...'
try:
names = os.listdir(dir)
except os.error:
print "Can't list", dir
names = []
names.sort()
success = 1
for name in names:
fullname = os.path.join(dir, name)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if not os.path.isdir(fullname):
if not compile_file(fullname, ddir, force, rx, quiet):
success = 0
elif maxlevels > 0 and \
name != os.curdir and name != os.pardir and \
os.path.isdir(fullname) and \
not os.path.islink(fullname):
if not compile_dir(fullname, maxlevels - 1, dfile, force, rx,
quiet):
success = 0
return success
def compile_file(fullname, ddir=None, force=0, rx=None, quiet=0):
"""Byte-compile one file.
Arguments (only fullname is required):
fullname: the file to byte-compile
ddir: if given, the directory name compiled in to the
byte-code file.
force: if 1, force compilation, even if timestamps are up-to-date
quiet: if 1, be quiet during compilation
"""
success = 1
name = os.path.basename(fullname)
if ddir is not None:
dfile = os.path.join(ddir, name)
else:
dfile = None
if rx is not None:
mo = rx.search(fullname)
if mo:
return success
if os.path.isfile(fullname):
head, tail = name[:-3], name[-3:]
if tail == '.py':
if not force:
try:
mtime = int(os.stat(fullname).st_mtime)
expect = struct.pack('<4sl', imp.get_magic(), mtime)
cfile = fullname + (__debug__ and 'c' or 'o')
with open(cfile, 'rb') as chandle:
actual = chandle.read(8)
if expect == actual:
return success
except IOError:
pass
if not quiet:
print 'Compiling', fullname, '...'
try:
ok = py_compile.compile(fullname, None, dfile, True)
except py_compile.PyCompileError,err:
if quiet:
print 'Compiling', fullname, '...'
print err.msg
success = 0
except IOError, e:
print "Sorry", e
success = 0
else:
if ok == 0:
success = 0
return success
def compile_path(skip_curdir=1, maxlevels=0, force=0, quiet=0):
"""Byte-compile all module on sys.path.
Arguments (all optional):
skip_curdir: if true, skip current directory (default true)
maxlevels: max recursion level (default 0)
force: as for compile_dir() (default 0)
quiet: as for compile_dir() (default 0)
"""
success = 1
for dir in sys.path:
if (not dir or dir == os.curdir) and skip_curdir:
print 'Skipping current directory'
else:
success = success and compile_dir(dir, maxlevels, None,
force, quiet=quiet)
return success
def expand_args(args, flist):
"""read names in flist and append to args"""
expanded = args[:]
if flist:
try:
if flist == '-':
fd = sys.stdin
else:
fd = open(flist)
while 1:
line = fd.readline()
if not line:
break
expanded.append(line[:-1])
except IOError:
print "Error reading file list %s" % flist
raise
return expanded
def main():
"""Script main program."""
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'lfqd:x:i:')
except getopt.error, msg:
print msg
print "usage: python compileall.py [-l] [-f] [-q] [-d destdir] " \
"[-x regexp] [-i list] [directory|file ...]"
print
print "arguments: zero or more file and directory names to compile; " \
"if no arguments given, "
print " defaults to the equivalent of -l sys.path"
print
print "options:"
print "-l: don't recurse into subdirectories"
print "-f: force rebuild even if timestamps are up-to-date"
print "-q: output only error messages"
print "-d destdir: directory to prepend to file paths for use in " \
"compile-time tracebacks and in"
print " runtime tracebacks in cases where the source " \
"file is unavailable"
print "-x regexp: skip files matching the regular expression regexp; " \
"the regexp is searched for"
print " in the full path of each file considered for " \
"compilation"
print "-i file: add all the files and directories listed in file to " \
"the list considered for"
print ' compilation; if "-", names are read from stdin'
sys.exit(2)
maxlevels = 10
ddir = None
force = 0
quiet = 0
rx = None
flist = None
for o, a in opts:
if o == '-l': maxlevels = 0
if o == '-d': ddir = a
if o == '-f': force = 1
if o == '-q': quiet = 1
if o == '-x':
import re
rx = re.compile(a)
if o == '-i': flist = a
if ddir:
if len(args) != 1 and not os.path.isdir(args[0]):
print "-d destdir require exactly one directory argument"
sys.exit(2)
success = 1
try:
if args or flist:
try:
if flist:
args = expand_args(args, flist)
except IOError:
success = 0
if success:
for arg in args:
if os.path.isdir(arg):
if not compile_dir(arg, maxlevels, ddir,
force, rx, quiet):
success = 0
else:
if not compile_file(arg, ddir, force, rx, quiet):
success = 0
else:
success = compile_path()
except KeyboardInterrupt:
print "\n[interrupted]"
success = 0
return success
if __name__ == '__main__':
exit_status = int(not main())
sys.exit(exit_status)
| gpl-2.0 | -8,446,471,044,155,858,000 | 33.198238 | 80 | 0.526343 | false |
chromium/chromium | third_party/blink/tools/blinkpy/w3c/test_importer.py | 5 | 28640 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Fetches a copy of the latest state of a W3C test repository and commits.
If this script is given the argument --auto-update, it will also:
1. Upload a CL.
2. Trigger try jobs and wait for them to complete.
3. Make any changes that are required for new failing tests.
4. Attempt to land the CL.
"""
import argparse
import datetime
import json
import logging
import re
from blinkpy.common.net.git_cl import GitCL
from blinkpy.common.net.network_transaction import NetworkTimeout
from blinkpy.common.path_finder import PathFinder
from blinkpy.common.system.executive import ScriptError
from blinkpy.common.system.log_utils import configure_logging
from blinkpy.w3c.android_wpt_expectations_updater import AndroidWPTExpectationsUpdater
from blinkpy.w3c.chromium_exportable_commits import exportable_commits_over_last_n_commits
from blinkpy.w3c.common import read_credentials, is_testharness_baseline, is_file_exportable, WPT_GH_URL
from blinkpy.w3c.directory_owners_extractor import DirectoryOwnersExtractor
from blinkpy.w3c.import_notifier import ImportNotifier
from blinkpy.w3c.local_wpt import LocalWPT
from blinkpy.w3c.test_copier import TestCopier
from blinkpy.w3c.wpt_expectations_updater import WPTExpectationsUpdater
from blinkpy.w3c.wpt_github import WPTGitHub
from blinkpy.w3c.wpt_manifest import WPTManifest, BASE_MANIFEST_NAME
from blinkpy.web_tests.port.base import Port
from blinkpy.web_tests.models.test_expectations import TestExpectations
# Settings for how often to check try job results and how long to wait.
POLL_DELAY_SECONDS = 2 * 60
TIMEOUT_SECONDS = 210 * 60
# Sheriff calendar URL, used for getting the ecosystem infra sheriff to cc.
ROTATIONS_URL = 'https://chrome-ops-rotation-proxy.appspot.com/current/grotation:chrome-ecosystem-infra'
SHERIFF_EMAIL_FALLBACK = '[email protected]'
RUBBER_STAMPER_BOT = '[email protected]'
_log = logging.getLogger(__file__)
class TestImporter(object):
def __init__(self, host, wpt_github=None, wpt_manifests=None):
self.host = host
self.wpt_github = wpt_github
self.executive = host.executive
self.fs = host.filesystem
self.finder = PathFinder(self.fs)
self.chromium_git = self.host.git(self.finder.chromium_base())
self.dest_path = self.finder.path_from_web_tests('external', 'wpt')
# A common.net.git_cl.GitCL instance.
self.git_cl = None
# Another Git instance with local WPT as CWD, which can only be
# instantiated after the working directory is created.
self.wpt_git = None
# The WPT revision we are importing and the one imported last time.
self.wpt_revision = None
self.last_wpt_revision = None
# A set of rebaselined tests and a dictionary of new test expectations
# mapping failing tests to platforms to
# wpt_expectations_updater.SimpleTestResult.
self.rebaselined_tests = set()
self.new_test_expectations = {}
# New override expectations for Android targets. A dictionary mapping
# products to dictionary that maps test names to test expectation lines
self.new_override_expectations = {}
self.verbose = False
args = ['--clean-up-affected-tests-only',
'--clean-up-test-expectations']
self._expectations_updater = WPTExpectationsUpdater(
self.host, args, wpt_manifests)
args = [
'--android-product',
'android_weblayer'
]
self._android_expectations_updater = AndroidWPTExpectationsUpdater(
self.host, args, wpt_manifests)
def main(self, argv=None):
# TODO(robertma): Test this method! Split it to make it easier to test
# if necessary.
options = self.parse_args(argv)
self.verbose = options.verbose
log_level = logging.DEBUG if self.verbose else logging.INFO
configure_logging(logging_level=log_level, include_time=True)
# Having the full output when executive.run_command fails is useful when
# investigating a failed import, as all we have are logs.
self.executive.error_output_limit = None
if options.auto_update and options.auto_upload:
_log.error(
'--auto-upload and --auto-update cannot be used together.')
return 1
if not self.checkout_is_okay():
return 1
credentials = read_credentials(self.host, options.credentials_json)
gh_user = credentials.get('GH_USER')
gh_token = credentials.get('GH_TOKEN')
if not gh_user or not gh_token:
_log.warning('You have not set your GitHub credentials. This '
'script may fail with a network error when making '
'an API request to GitHub.')
_log.warning('See https://chromium.googlesource.com/chromium/src'
'/+/main/docs/testing/web_platform_tests.md'
'#GitHub-credentials for instructions on how to set '
'your credentials up.')
self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user,
gh_token)
self.git_cl = GitCL(
self.host, auth_refresh_token_json=options.auth_refresh_token_json)
_log.debug('Noting the current Chromium revision.')
chromium_revision = self.chromium_git.latest_git_commit()
# Instantiate Git after local_wpt.fetch() to make sure the path exists.
local_wpt = LocalWPT(self.host, gh_token=gh_token)
local_wpt.fetch()
self.wpt_git = self.host.git(local_wpt.path)
if options.revision is not None:
_log.info('Checking out %s', options.revision)
self.wpt_git.run(['checkout', options.revision])
_log.debug('Noting the revision we are importing.')
self.wpt_revision = self.wpt_git.latest_git_commit()
self.last_wpt_revision = self._get_last_imported_wpt_revision()
import_commit = 'wpt@%s' % self.wpt_revision
_log.info('Importing %s to Chromium %s', import_commit,
chromium_revision)
if options.ignore_exportable_commits:
commit_message = self._commit_message(chromium_revision,
import_commit)
else:
commits = self.apply_exportable_commits_locally(local_wpt)
if commits is None:
_log.error('Could not apply some exportable commits cleanly.')
_log.error('Aborting import to prevent clobbering commits.')
return 1
commit_message = self._commit_message(
chromium_revision,
import_commit,
locally_applied_commits=commits)
self._clear_out_dest_path()
_log.info('Copying the tests from the temp repo to the destination.')
test_copier = TestCopier(self.host, local_wpt.path)
test_copier.do_import()
# TODO(robertma): Implement `add --all` in Git (it is different from `commit --all`).
self.chromium_git.run(['add', '--all', self.dest_path])
# Remove expectations for tests that were deleted and rename tests in
# expectations for renamed tests. This requires the old WPT manifest, so
# must happen before we regenerate it.
self._expectations_updater.cleanup_test_expectations_files()
self._generate_manifest()
# TODO(crbug.com/800570 robertma): Re-enable it once we fix the bug.
# self._delete_orphaned_baselines()
if not self.chromium_git.has_working_directory_changes():
_log.info('Done: no changes to import.')
return 0
if self._only_wpt_manifest_changed():
_log.info('Only manifest was updated; skipping the import.')
return 0
with self._expectations_updater.prepare_smoke_tests(self.chromium_git):
self._commit_changes(commit_message)
_log.info('Changes imported and committed.')
if not options.auto_upload and not options.auto_update:
return 0
self._upload_cl()
_log.info('Issue: %s', self.git_cl.run(['issue']).strip())
if not self.update_expectations_for_cl():
return 1
if not options.auto_update:
return 0
if not self.run_commit_queue_for_cl():
return 1
if not self.send_notifications(local_wpt, options.auto_file_bugs,
options.monorail_auth_json):
return 1
return 0
def update_expectations_for_cl(self):
"""Performs the expectation-updating part of an auto-import job.
This includes triggering try jobs and waiting; then, if applicable,
writing new baselines and TestExpectation lines, committing, and
uploading a new patchset.
This assumes that there is CL associated with the current branch.
Returns True if everything is OK to continue, or False on failure.
"""
_log.info('Triggering try jobs for updating expectations.')
self.git_cl.trigger_try_jobs(self.blink_try_bots())
cl_status = self.git_cl.wait_for_try_jobs(
poll_delay_seconds=POLL_DELAY_SECONDS,
timeout_seconds=TIMEOUT_SECONDS)
if not cl_status:
_log.error('No initial try job results, aborting.')
self.git_cl.run(['set-close'])
return False
if cl_status.status == 'closed':
_log.error('The CL was closed, aborting.')
return False
_log.info('All jobs finished.')
try_results = cl_status.try_job_results
if try_results and self.git_cl.some_failed(try_results):
self.fetch_new_expectations_and_baselines()
self.fetch_wpt_override_expectations()
if self.chromium_git.has_working_directory_changes():
self._generate_manifest()
message = 'Update test expectations and baselines.'
self._commit_changes(message)
self._upload_patchset(message)
return True
def run_commit_queue_for_cl(self):
"""Triggers CQ and either commits or aborts; returns True on success."""
_log.info('Triggering CQ try jobs.')
self.git_cl.run(['try'])
cl_status = self.git_cl.wait_for_try_jobs(
poll_delay_seconds=POLL_DELAY_SECONDS,
timeout_seconds=TIMEOUT_SECONDS,
cq_only=True)
if not cl_status:
self.git_cl.run(['set-close'])
_log.error('Timed out waiting for CQ; aborting.')
return False
if cl_status.status == 'closed':
_log.error('The CL was closed; aborting.')
return False
_log.info('All jobs finished.')
cq_try_results = cl_status.try_job_results
if not cq_try_results:
_log.error('No CQ try results found in try results')
self.git_cl.run(['set-close'])
return False
if not self.git_cl.all_success(cq_try_results):
_log.error('CQ appears to have failed; aborting.')
self.git_cl.run(['set-close'])
return False
_log.info(
'CQ appears to have passed; sending to the rubber-stamper bot for '
'CR+1 and commit.')
_log.info(
'If the rubber-stamper bot rejects the CL, you either need to '
'modify the benign file patterns, or manually CR+1 and land the '
'import yourself if it touches code files. See https://chromium.'
'googlesource.com/infra/infra/+/refs/heads/main/go/src/infra/'
'appengine/rubber-stamper/README.md')
# `--send-mail` is required to take the CL out of WIP mode.
self.git_cl.run([
'upload', '-f', '--send-mail', '--enable-auto-submit',
'--reviewers', RUBBER_STAMPER_BOT
])
if self.git_cl.wait_for_closed_status():
_log.info('Update completed.')
return True
_log.error('Cannot submit CL; aborting.')
try:
self.git_cl.run(['set-close'])
except ScriptError as e:
if e.output and 'Conflict: change is merged' in e.output:
_log.error('CL is already merged; treating as success.')
return True
else:
raise e
return False
def blink_try_bots(self):
"""Returns the collection of builders used for updating expectations."""
return self.host.builders.filter_builders(is_try=True)
def parse_args(self, argv):
parser = argparse.ArgumentParser()
parser.description = __doc__
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='log extra details that may be helpful when debugging')
parser.add_argument(
'--ignore-exportable-commits',
action='store_true',
help='do not check for exportable commits that would be clobbered')
parser.add_argument('-r', '--revision', help='target wpt revision')
parser.add_argument(
'--auto-upload',
action='store_true',
help='upload a CL, update expectations, but do NOT trigger CQ')
parser.add_argument(
'--auto-update',
action='store_true',
help='upload a CL, update expectations, and trigger CQ')
parser.add_argument(
'--auto-file-bugs',
action='store_true',
help='file new failures automatically to crbug.com')
parser.add_argument(
'--auth-refresh-token-json',
help='authentication refresh token JSON file used for try jobs, '
'generally not necessary on developer machines')
parser.add_argument(
'--credentials-json',
help='A JSON file with GitHub credentials, '
'generally not necessary on developer machines')
parser.add_argument(
'--monorail-auth-json',
help='A JSON file containing the private key of a service account '
'to access Monorail (crbug.com), only needed when '
'--auto-file-bugs is used')
return parser.parse_args(argv)
def checkout_is_okay(self):
if self.chromium_git.has_working_directory_changes():
_log.warning('Checkout is dirty; aborting.')
return False
# TODO(robertma): Add a method in Git to query a range of commits.
local_commits = self.chromium_git.run(
['log', '--oneline', 'origin/main..HEAD'])
if local_commits:
_log.warning('Checkout has local commits before import.')
return True
def apply_exportable_commits_locally(self, local_wpt):
"""Applies exportable Chromium changes to the local WPT repo.
The purpose of this is to avoid clobbering changes that were made in
Chromium but not yet merged upstream. By applying these changes to the
local copy of web-platform-tests before copying files over, we make
it so that the resulting change in Chromium doesn't undo the
previous Chromium change.
Args:
A LocalWPT instance for our local copy of WPT.
Returns:
A list of commits applied (could be empty), or None if any
of the patches could not be applied cleanly.
"""
commits = self.exportable_but_not_exported_commits(local_wpt)
for commit in commits:
_log.info('Applying exportable commit locally:')
_log.info(commit.url())
_log.info('Subject: %s', commit.subject().strip())
# Log a note about the corresponding PR.
# This might not be necessary, and could potentially be removed.
pull_request = self.wpt_github.pr_for_chromium_commit(commit)
if pull_request:
_log.info('PR: %spull/%d', WPT_GH_URL, pull_request.number)
else:
_log.warning('No pull request found.')
error = local_wpt.apply_patch(commit.format_patch())
if error:
_log.error('Commit cannot be applied cleanly:')
_log.error(error)
return None
self.wpt_git.commit_locally_with_message(
'Applying patch %s' % commit.sha)
return commits
def exportable_but_not_exported_commits(self, local_wpt):
"""Returns a list of commits that would be clobbered by importer.
The list contains all exportable but not exported commits, not filtered
by whether they can apply cleanly.
"""
# The errors returned by exportable_commits_over_last_n_commits are
# irrelevant and ignored here, because it tests patches *individually*
# while the importer tries to reapply these patches *cumulatively*.
commits, _ = exportable_commits_over_last_n_commits(
self.host,
local_wpt,
self.wpt_github,
require_clean=False,
verify_merged_pr=True)
return commits
def _generate_manifest(self):
"""Generates MANIFEST.json for imported tests.
Runs the (newly-updated) manifest command if it's found, and then
stages the generated MANIFEST.json in the git index, ready to commit.
"""
_log.info('Generating MANIFEST.json')
WPTManifest.generate_manifest(self.host.port_factory.get(),
self.dest_path)
manifest_path = self.fs.join(self.dest_path, 'MANIFEST.json')
assert self.fs.exists(manifest_path)
manifest_base_path = self.fs.normpath(
self.fs.join(self.dest_path, '..', BASE_MANIFEST_NAME))
self.copyfile(manifest_path, manifest_base_path)
self.chromium_git.add_list([manifest_base_path])
def _clear_out_dest_path(self):
"""Removes all files that are synced with upstream from Chromium WPT.
Instead of relying on TestCopier to overwrite these files, cleaning up
first ensures if upstream deletes some files, we also delete them.
"""
_log.info('Cleaning out tests from %s.', self.dest_path)
should_remove = lambda fs, dirname, basename: (
is_file_exportable(fs.relpath(fs.join(dirname, basename), self.finder.chromium_base())))
files_to_delete = self.fs.files_under(
self.dest_path, file_filter=should_remove)
for subpath in files_to_delete:
self.remove(self.finder.path_from_web_tests('external', subpath))
def _commit_changes(self, commit_message):
_log.info('Committing changes.')
self.chromium_git.commit_locally_with_message(commit_message)
def _only_wpt_manifest_changed(self):
changed_files = self.chromium_git.changed_files()
wpt_base_manifest = self.fs.relpath(
self.fs.join(self.dest_path, '..', BASE_MANIFEST_NAME),
self.finder.chromium_base())
return changed_files == [wpt_base_manifest]
def _commit_message(self,
chromium_commit_sha,
import_commit_sha,
locally_applied_commits=None):
message = 'Import {}\n\nUsing wpt-import in Chromium {}.\n'.format(
import_commit_sha, chromium_commit_sha)
if locally_applied_commits:
message += 'With Chromium commits locally applied on WPT:\n'
message += '\n'.join(
str(commit) for commit in locally_applied_commits)
message += '\nNo-Export: true'
return message
def _delete_orphaned_baselines(self):
_log.info('Deleting any orphaned baselines.')
is_baseline_filter = lambda fs, dirname, basename: is_testharness_baseline(basename)
baselines = self.fs.files_under(
self.dest_path, file_filter=is_baseline_filter)
# Note about possible refactoring:
# - the manifest path could be factored out to a common location, and
# - the logic for reading the manifest could be factored out from here
# and the Port class.
manifest_path = self.finder.path_from_web_tests(
'external', 'wpt', 'MANIFEST.json')
manifest = WPTManifest(self.fs.read_text_file(manifest_path))
wpt_urls = manifest.all_urls()
# Currently baselines for tests with query strings are merged,
# so that the tests foo.html?r=1 and foo.html?r=2 both have the same
# baseline, foo-expected.txt.
# TODO(qyearsley): Remove this when this behavior is fixed.
wpt_urls = [url.split('?')[0] for url in wpt_urls]
wpt_dir = self.finder.path_from_web_tests('external', 'wpt')
for full_path in baselines:
rel_path = self.fs.relpath(full_path, wpt_dir)
if not self._has_corresponding_test(rel_path, wpt_urls):
self.fs.remove(full_path)
def _has_corresponding_test(self, rel_path, wpt_urls):
# TODO(qyearsley): Ensure that this works with platform baselines and
# virtual baselines, and add unit tests.
base = '/' + rel_path.replace('-expected.txt', '')
return any(
(base + ext) in wpt_urls for ext in Port.supported_file_extensions)
def copyfile(self, source, destination):
_log.debug('cp %s %s', source, destination)
self.fs.copyfile(source, destination)
def remove(self, dest):
_log.debug('rm %s', dest)
self.fs.remove(dest)
def _upload_patchset(self, message):
self.git_cl.run(['upload', '--bypass-hooks', '-f', '-t', message])
def _upload_cl(self):
_log.info('Uploading change list.')
directory_owners = self.get_directory_owners()
description = self._cl_description(directory_owners)
sheriff_email = self.sheriff_email()
temp_file, temp_path = self.fs.open_text_tempfile()
temp_file.write(description)
temp_file.close()
self.git_cl.run([
'upload',
'--bypass-hooks',
'-f',
'--message-file',
temp_path,
'--cc',
sheriff_email,
])
self.fs.remove(temp_path)
def get_directory_owners(self):
"""Returns a mapping of email addresses to owners of changed tests."""
_log.info('Gathering directory owners emails to CC.')
changed_files = self.chromium_git.changed_files()
extractor = DirectoryOwnersExtractor(self.host)
return extractor.list_owners(changed_files)
def _cl_description(self, directory_owners):
"""Returns a CL description string.
Args:
directory_owners: A dict of tuples of owner names to lists of directories.
"""
# TODO(robertma): Add a method in Git for getting the commit body.
description = self.chromium_git.run(['log', '-1', '--format=%B'])
description += (
'Note to sheriffs: This CL imports external tests and adds\n'
'expectations for those tests; if this CL is large and causes\n'
'a few new failures, please fix the failures by adding new\n'
'lines to TestExpectations rather than reverting. See:\n'
'https://chromium.googlesource.com'
'/chromium/src/+/main/docs/testing/web_platform_tests.md\n\n')
if directory_owners:
description += self._format_directory_owners(
directory_owners) + '\n\n'
# Prevent FindIt from auto-reverting import CLs.
description += 'NOAUTOREVERT=true\n'
# Move any No-Export tag to the end of the description.
description = description.replace('No-Export: true', '')
description = description.replace('\n\n\n\n', '\n\n')
description += 'No-Export: true\n'
# Add the wptrunner MVP tryjobs as blocking trybots, to catch any test
# changes or infrastructure changes from upstream.
#
# If this starts blocking the importer unnecessarily, revert
# https://chromium-review.googlesource.com/c/chromium/src/+/2451504
description += (
'Cq-Include-Trybots: luci.chromium.try:linux-wpt-identity-fyi-rel,'
'linux-wpt-input-fyi-rel')
return description
@staticmethod
def _format_directory_owners(directory_owners):
message_lines = ['Directory owners for changes in this CL:']
for owner_tuple, directories in sorted(directory_owners.items()):
message_lines.append(', '.join(owner_tuple) + ':')
message_lines.extend(' ' + d for d in directories)
return '\n'.join(message_lines)
def sheriff_email(self):
"""Returns the sheriff email address to cc.
This tries to fetch the current ecosystem infra sheriff, but falls back
in case of error.
"""
email = ''
try:
email = self._fetch_ecosystem_infra_sheriff_email()
except (IOError, KeyError, ValueError) as error:
_log.error('Exception while fetching current sheriff: %s', error)
return email or SHERIFF_EMAIL_FALLBACK
def _fetch_ecosystem_infra_sheriff_email(self):
try:
content = self.host.web.get_binary(ROTATIONS_URL)
except NetworkTimeout:
_log.error('Cannot fetch %s', ROTATIONS_URL)
return ''
data = json.loads(content)
if not data.get('emails'):
_log.error(
'No email found for current sheriff. Retrieved content: %s',
content)
return ''
return data['emails'][0]
def fetch_new_expectations_and_baselines(self):
"""Modifies expectation lines and baselines based on try job results.
Assuming that there are some try job results available, this
adds new expectation lines to TestExpectations and downloads new
baselines based on the try job results.
This is the same as invoking the `wpt-update-expectations` script.
"""
_log.info('Adding test expectations lines to TestExpectations.')
self.rebaselined_tests, self.new_test_expectations = (
self._expectations_updater.update_expectations())
def fetch_wpt_override_expectations(self):
"""Modifies WPT Override expectations based on try job results.
Assuming that there are some try job results available, this
adds new expectation lines to WPT Override Expectation files,
e.g. WebLayerWPTOverrideExpectations
This is the same as invoking the `wpt-update-expectations` script.
"""
_log.info('Adding test expectations lines to Override Expectations.')
_, self.new_override_expectations = (
self._android_expectations_updater.update_expectations())
def _get_last_imported_wpt_revision(self):
"""Finds the last imported WPT revision."""
# TODO(robertma): Only match commit subjects.
output = self.chromium_git.most_recent_log_matching(
'^Import wpt@', self.finder.chromium_base())
# No line-start anchor (^) below because of the formatting of output.
result = re.search(r'Import wpt@(\w+)', output)
if result:
return result.group(1)
else:
_log.error('Cannot find last WPT import.')
return None
def send_notifications(self, local_wpt, auto_file_bugs,
monorail_auth_json):
issue = self.git_cl.run(['status', '--field=id']).strip()
patchset = self.git_cl.run(['status', '--field=patch']).strip()
# Construct the notifier here so that any errors won't affect the import.
notifier = ImportNotifier(self.host, self.chromium_git, local_wpt)
notifier.main(
self.last_wpt_revision,
self.wpt_revision,
self.rebaselined_tests,
self.new_test_expectations,
self.new_override_expectations,
issue,
patchset,
dry_run=not auto_file_bugs,
service_account_key_json=monorail_auth_json)
return True
| bsd-3-clause | 8,107,221,156,276,999,000 | 40.871345 | 104 | 0.617493 | false |
aliclark/libquic | src/third_party/protobuf/objectivec/DevTools/pddm_tests.py | 66 | 16785 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2015 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for pddm.py."""
import io
import unittest
import pddm
class TestParsingMacros(unittest.TestCase):
def testParseEmpty(self):
f = io.StringIO(u'')
result = pddm.MacroCollection(f)
self.assertEqual(len(result._macros), 0)
def testParseOne(self):
f = io.StringIO(u"""PDDM-DEFINE foo( )
body""")
result = pddm.MacroCollection(f)
self.assertEqual(len(result._macros), 1)
macro = result._macros.get('foo')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'foo')
self.assertEquals(macro.args, tuple())
self.assertEquals(macro.body, 'body')
def testParseGeneral(self):
# Tests multiple defines, spaces in all places, etc.
f = io.StringIO(u"""
PDDM-DEFINE noArgs( )
body1
body2
PDDM-DEFINE-END
PDDM-DEFINE oneArg(foo)
body3
PDDM-DEFINE twoArgs( bar_ , baz )
body4
body5""")
result = pddm.MacroCollection(f)
self.assertEqual(len(result._macros), 3)
macro = result._macros.get('noArgs')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'noArgs')
self.assertEquals(macro.args, tuple())
self.assertEquals(macro.body, 'body1\nbody2\n')
macro = result._macros.get('oneArg')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'oneArg')
self.assertEquals(macro.args, ('foo',))
self.assertEquals(macro.body, 'body3')
macro = result._macros.get('twoArgs')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'twoArgs')
self.assertEquals(macro.args, ('bar_', 'baz'))
self.assertEquals(macro.body, 'body4\nbody5')
# Add into existing collection
f = io.StringIO(u"""
PDDM-DEFINE another(a,b,c)
body1
body2""")
result.ParseInput(f)
self.assertEqual(len(result._macros), 4)
macro = result._macros.get('another')
self.assertIsNotNone(macro)
self.assertEquals(macro.name, 'another')
self.assertEquals(macro.args, ('a', 'b', 'c'))
self.assertEquals(macro.body, 'body1\nbody2')
def testParseDirectiveIssues(self):
test_list = [
# Unknown directive
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINED foo\nbaz',
'Hit a line with an unknown directive: '),
# End without begin
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nPDDM-DEFINE-END\n',
'Got DEFINE-END directive without an active macro: '),
# Line not in macro block
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE-END\nmumble\n',
'Hit a line that wasn\'t a directive and no open macro definition: '),
# Redefine macro
(u'PDDM-DEFINE foo()\nbody\nPDDM-DEFINE foo(a)\nmumble\n',
'Attempt to redefine macro: '),
]
for idx, (input_str, expected_prefix) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
result = pddm.MacroCollection(f)
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertTrue(e.message.startswith(expected_prefix),
'Entry %d failed: %r' % (idx, e))
def testParseBeginIssues(self):
test_list = [
# 1. No name
(u'PDDM-DEFINE\nmumble',
'Failed to parse macro definition: '),
# 2. No name (with spaces)
(u'PDDM-DEFINE \nmumble',
'Failed to parse macro definition: '),
# 3. No open paren
(u'PDDM-DEFINE foo\nmumble',
'Failed to parse macro definition: '),
# 4. No close paren
(u'PDDM-DEFINE foo(\nmumble',
'Failed to parse macro definition: '),
# 5. No close paren (with args)
(u'PDDM-DEFINE foo(a, b\nmumble',
'Failed to parse macro definition: '),
# 6. No name before args
(u'PDDM-DEFINE (a, b)\nmumble',
'Failed to parse macro definition: '),
# 7. No name before args
(u'PDDM-DEFINE foo bar(a, b)\nmumble',
'Failed to parse macro definition: '),
# 8. Empty arg name
(u'PDDM-DEFINE foo(a, ,b)\nmumble',
'Empty arg name in macro definition: '),
(u'PDDM-DEFINE foo(a,,b)\nmumble',
'Empty arg name in macro definition: '),
# 10. Duplicate name
(u'PDDM-DEFINE foo(a,b,a,c)\nmumble',
'Arg name "a" used more than once in macro definition: '),
# 11. Invalid arg name
(u'PDDM-DEFINE foo(a b,c)\nmumble',
'Invalid arg name "a b" in macro definition: '),
(u'PDDM-DEFINE foo(a.b,c)\nmumble',
'Invalid arg name "a.b" in macro definition: '),
(u'PDDM-DEFINE foo(a-b,c)\nmumble',
'Invalid arg name "a-b" in macro definition: '),
(u'PDDM-DEFINE foo(a,b,c.)\nmumble',
'Invalid arg name "c." in macro definition: '),
# 15. Extra stuff after the name
(u'PDDM-DEFINE foo(a,c) foo\nmumble',
'Failed to parse macro definition: '),
(u'PDDM-DEFINE foo(a,c) foo)\nmumble',
'Failed to parse macro definition: '),
]
for idx, (input_str, expected_prefix) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
result = pddm.MacroCollection(f)
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertTrue(e.message.startswith(expected_prefix),
'Entry %d failed: %r' % (idx, e))
class TestExpandingMacros(unittest.TestCase):
def testExpandBasics(self):
f = io.StringIO(u"""
PDDM-DEFINE noArgs( )
body1
body2
PDDM-DEFINE-END
PDDM-DEFINE oneArg(a)
body3 a
PDDM-DEFINE-END
PDDM-DEFINE twoArgs(b,c)
body4 b c
body5
PDDM-DEFINE-END
""")
mc = pddm.MacroCollection(f)
test_list = [
(u'noArgs()',
'body1\nbody2\n'),
(u'oneArg(wee)',
'body3 wee\n'),
(u'twoArgs(having some, fun)',
'body4 having some fun\nbody5'),
# One arg, pass empty.
(u'oneArg()',
'body3 \n'),
# Two args, gets empty in each slot.
(u'twoArgs(, empty)',
'body4 empty\nbody5'),
(u'twoArgs(empty, )',
'body4 empty \nbody5'),
(u'twoArgs(, )',
'body4 \nbody5'),
]
for idx, (input_str, expected) in enumerate(test_list, 1):
result = mc.Expand(input_str)
self.assertEqual(result, expected,
'Entry %d --\n Result: %r\n Expected: %r' %
(idx, result, expected))
def testExpandArgOptions(self):
f = io.StringIO(u"""
PDDM-DEFINE bar(a)
a-a$S-a$l-a$L-a$u-a$U
PDDM-DEFINE-END
""")
mc = pddm.MacroCollection(f)
self.assertEqual(mc.Expand('bar(xYz)'), 'xYz- -xYz-xyz-XYz-XYZ')
self.assertEqual(mc.Expand('bar(MnoP)'), 'MnoP- -mnoP-mnop-MnoP-MNOP')
# Test empty
self.assertEqual(mc.Expand('bar()'), '-----')
def testExpandSimpleMacroErrors(self):
f = io.StringIO(u"""
PDDM-DEFINE foo(a, b)
<a-z>
PDDM-DEFINE baz(a)
a - a$z
""")
mc = pddm.MacroCollection(f)
test_list = [
# 1. Unknown macro
(u'bar()',
'No macro named "bar".'),
(u'bar(a)',
'No macro named "bar".'),
# 3. Arg mismatch
(u'foo()',
'Expected 2 args, got: "foo()".'),
(u'foo(a b)',
'Expected 2 args, got: "foo(a b)".'),
(u'foo(a,b,c)',
'Expected 2 args, got: "foo(a,b,c)".'),
# 6. Unknown option in expansion
(u'baz(mumble)',
'Unknown arg option "a$z" while expanding "baz(mumble)".'),
]
for idx, (input_str, expected_err) in enumerate(test_list, 1):
try:
result = mc.Expand(input_str)
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message, expected_err,
'Entry %d failed: %r' % (idx, e))
def testExpandReferences(self):
f = io.StringIO(u"""
PDDM-DEFINE StartIt()
foo(abc, def)
foo(ghi, jkl)
PDDM-DEFINE foo(a, b)
bar(a, int)
bar(b, NSString *)
PDDM-DEFINE bar(n, t)
- (t)n;
- (void)set##n$u##:(t)value;
""")
mc = pddm.MacroCollection(f)
expected = """- (int)abc;
- (void)setAbc:(int)value;
- (NSString *)def;
- (void)setDef:(NSString *)value;
- (int)ghi;
- (void)setGhi:(int)value;
- (NSString *)jkl;
- (void)setJkl:(NSString *)value;
"""
self.assertEqual(mc.Expand('StartIt()'), expected)
def testCatchRecursion(self):
f = io.StringIO(u"""
PDDM-DEFINE foo(a, b)
bar(1, a)
bar(2, b)
PDDM-DEFINE bar(x, y)
foo(x, y)
""")
mc = pddm.MacroCollection(f)
try:
result = mc.Expand('foo(A,B)')
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message,
'Found macro recusion, invoking "foo(1, A)":\n...while expanding "bar(1, A)".\n...while expanding "foo(A,B)".')
class TestParsingSource(unittest.TestCase):
def testBasicParse(self):
test_list = [
# 1. no directives
(u'a\nb\nc',
(3,) ),
# 2. One define
(u'a\n//%PDDM-DEFINE foo()\n//%body\nc',
(1, 2, 1) ),
# 3. Two defines
(u'a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE bar()\n//%body2\nc',
(1, 4, 1) ),
# 4. Two defines with ends
(u'a\n//%PDDM-DEFINE foo()\n//%body\n//%PDDM-DEFINE-END\n'
u'//%PDDM-DEFINE bar()\n//%body2\n//%PDDM-DEFINE-END\nc',
(1, 6, 1) ),
# 5. One expand, one define (that runs to end of file)
(u'a\n//%PDDM-EXPAND foo()\nbody\n//%PDDM-EXPAND-END\n'
u'//%PDDM-DEFINE bar()\n//%body2\n',
(1, 1, 2) ),
# 6. One define ended with an expand.
(u'a\nb\n//%PDDM-DEFINE bar()\n//%body2\n'
u'//%PDDM-EXPAND bar()\nbody2\n//%PDDM-EXPAND-END\n',
(2, 2, 1) ),
# 7. Two expands (one end), one define.
(u'a\n//%PDDM-EXPAND foo(1)\nbody\n//%PDDM-EXPAND foo(2)\nbody2\n//%PDDM-EXPAND-END\n'
u'//%PDDM-DEFINE foo()\n//%body2\n',
(1, 2, 2) ),
]
for idx, (input_str, line_counts) in enumerate(test_list, 1):
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
sf._ParseFile()
self.assertEqual(len(sf._sections), len(line_counts),
'Entry %d -- %d != %d' %
(idx, len(sf._sections), len(line_counts)))
for idx2, (sec, expected) in enumerate(zip(sf._sections, line_counts), 1):
self.assertEqual(sec.num_lines_captured, expected,
'Entry %d, section %d -- %d != %d' %
(idx, idx2, sec.num_lines_captured, expected))
def testErrors(self):
test_list = [
# 1. Directive within expansion
(u'//%PDDM-EXPAND a()\n//%PDDM-BOGUS',
'Ran into directive ("//%PDDM-BOGUS", line 2) while in "//%PDDM-EXPAND a()".'),
(u'//%PDDM-EXPAND a()\n//%PDDM-DEFINE a()\n//%body\n',
'Ran into directive ("//%PDDM-DEFINE", line 2) while in "//%PDDM-EXPAND a()".'),
# 3. Expansion ran off end of file
(u'//%PDDM-EXPAND a()\na\nb\n',
'Hit the end of the file while in "//%PDDM-EXPAND a()".'),
# 4. Directive within define
(u'//%PDDM-DEFINE a()\n//%body\n//%PDDM-BOGUS',
'Ran into directive ("//%PDDM-BOGUS", line 3) while in "//%PDDM-DEFINE a()".'),
(u'//%PDDM-DEFINE a()\n//%body\n//%PDDM-EXPAND-END a()',
'Ran into directive ("//%PDDM-EXPAND-END", line 3) while in "//%PDDM-DEFINE a()".'),
# 6. Directives that shouldn't start sections
(u'a\n//%PDDM-DEFINE-END a()\n//a\n',
'Unexpected line 2: "//%PDDM-DEFINE-END a()".'),
(u'a\n//%PDDM-EXPAND-END a()\n//a\n',
'Unexpected line 2: "//%PDDM-EXPAND-END a()".'),
(u'//%PDDM-BOGUS\n//a\n',
'Unexpected line 1: "//%PDDM-BOGUS".'),
]
for idx, (input_str, expected_err) in enumerate(test_list, 1):
f = io.StringIO(input_str)
try:
pddm.SourceFile(f)._ParseFile()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message, expected_err,
'Entry %d failed: %r' % (idx, e))
class TestProcessingSource(unittest.TestCase):
def testBasics(self):
input_str = u"""
//%PDDM-IMPORT-DEFINES ImportFile
foo
//%PDDM-EXPAND mumble(abc)
//%PDDM-EXPAND-END
bar
//%PDDM-EXPAND mumble(def)
//%PDDM-EXPAND mumble(ghi)
//%PDDM-EXPAND-END
baz
//%PDDM-DEFINE mumble(a_)
//%a_: getName(a_)
"""
input_str2 = u"""
//%PDDM-DEFINE getName(x_)
//%do##x_$u##(int x_);
"""
expected = u"""
//%PDDM-IMPORT-DEFINES ImportFile
foo
//%PDDM-EXPAND mumble(abc)
// This block of code is generated, do not edit it directly.
abc: doAbc(int abc);
//%PDDM-EXPAND-END mumble(abc)
bar
//%PDDM-EXPAND mumble(def)
// This block of code is generated, do not edit it directly.
def: doDef(int def);
//%PDDM-EXPAND mumble(ghi)
// This block of code is generated, do not edit it directly.
ghi: doGhi(int ghi);
//%PDDM-EXPAND-END (2 expansions)
baz
//%PDDM-DEFINE mumble(a_)
//%a_: getName(a_)
"""
expected_stripped = u"""
//%PDDM-IMPORT-DEFINES ImportFile
foo
//%PDDM-EXPAND mumble(abc)
//%PDDM-EXPAND-END mumble(abc)
bar
//%PDDM-EXPAND mumble(def)
//%PDDM-EXPAND mumble(ghi)
//%PDDM-EXPAND-END (2 expansions)
baz
//%PDDM-DEFINE mumble(a_)
//%a_: getName(a_)
"""
def _Resolver(name):
self.assertEqual(name, 'ImportFile')
return io.StringIO(input_str2)
f = io.StringIO(input_str)
sf = pddm.SourceFile(f, _Resolver)
sf.ProcessContent()
self.assertEqual(sf.processed_content, expected)
# Feed it through and nothing should change.
f2 = io.StringIO(sf.processed_content)
sf2 = pddm.SourceFile(f2, _Resolver)
sf2.ProcessContent()
self.assertEqual(sf2.processed_content, expected)
self.assertEqual(sf2.processed_content, sf.processed_content)
# Test stripping (with the original input and expanded version).
f2 = io.StringIO(input_str)
sf2 = pddm.SourceFile(f2)
sf2.ProcessContent(strip_expansion=True)
self.assertEqual(sf2.processed_content, expected_stripped)
f2 = io.StringIO(sf.processed_content)
sf2 = pddm.SourceFile(f2, _Resolver)
sf2.ProcessContent(strip_expansion=True)
self.assertEqual(sf2.processed_content, expected_stripped)
def testProcessFileWithMacroParseError(self):
input_str = u"""
foo
//%PDDM-DEFINE mumble(a_)
//%body
//%PDDM-DEFINE mumble(x_)
//%body2
"""
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
try:
sf.ProcessContent()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message,
'Attempt to redefine macro: "PDDM-DEFINE mumble(x_)"\n'
'...while parsing section that started:\n'
' Line 3: //%PDDM-DEFINE mumble(a_)')
def testProcessFileWithExpandError(self):
input_str = u"""
foo
//%PDDM-DEFINE mumble(a_)
//%body
//%PDDM-EXPAND foobar(x_)
//%PDDM-EXPAND-END
"""
f = io.StringIO(input_str)
sf = pddm.SourceFile(f)
try:
sf.ProcessContent()
self.fail('Should throw exception, entry %d' % idx)
except pddm.PDDMError as e:
self.assertEqual(e.message,
'No macro named "foobar".\n'
'...while expanding "foobar(x_)" from the section that'
' started:\n Line 5: //%PDDM-EXPAND foobar(x_)')
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 3,504,362,308,927,901,700 | 31.592233 | 134 | 0.614001 | false |
tianzhihen/python-mode | pymode/libs/pylama/lint/pylama_pyflakes/pyflakes/messages.py | 42 | 3808 | """
Provide the class Message and its subclasses.
"""
class Message(object):
message = ''
message_args = ()
def __init__(self, filename, loc):
self.filename = filename
self.lineno = loc.lineno
self.col = getattr(loc, 'col_offset', 0)
def __str__(self):
return '%s:%s: %s' % (self.filename, self.lineno,
self.message % self.message_args)
class UnusedImport(Message):
message = '%r imported but unused'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class RedefinedWhileUnused(Message):
message = 'redefinition of unused %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class RedefinedInListComp(Message):
message = 'list comprehension redefines %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportShadowedByLoopVar(Message):
message = 'import %r from line %r shadowed by loop variable'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class ImportStarUsed(Message):
message = "'from %s import *' used; unable to detect undefined names"
def __init__(self, filename, loc, modname):
Message.__init__(self, filename, loc)
self.message_args = (modname,)
class UndefinedName(Message):
message = 'undefined name %r'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class DoctestSyntaxError(Message):
message = 'syntax error in doctest'
def __init__(self, filename, loc, position=None):
Message.__init__(self, filename, loc)
if position:
(self.lineno, self.col) = position
self.message_args = ()
class UndefinedExport(Message):
message = 'undefined name %r in __all__'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class UndefinedLocal(Message):
message = ('local variable %r (defined in enclosing scope on line %r) '
'referenced before assignment')
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class DuplicateArgument(Message):
message = 'duplicate argument %r in function definition'
def __init__(self, filename, loc, name):
Message.__init__(self, filename, loc)
self.message_args = (name,)
class Redefined(Message):
message = 'redefinition of %r from line %r'
def __init__(self, filename, loc, name, orig_loc):
Message.__init__(self, filename, loc)
self.message_args = (name, orig_loc.lineno)
class LateFutureImport(Message):
message = 'future import(s) %r after other statements'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class UnusedVariable(Message):
"""
Indicates that a variable has been explicity assigned to but not actually
used.
"""
message = 'local variable %r is assigned to but never used'
def __init__(self, filename, loc, names):
Message.__init__(self, filename, loc)
self.message_args = (names,)
class ReturnWithArgsInsideGenerator(Message):
"""
Indicates a return statement with arguments inside a generator.
"""
message = '\'return\' with argument inside generator'
| lgpl-3.0 | -2,205,412,686,024,001,500 | 27.207407 | 77 | 0.623687 | false |
sienatime/python_koans | python2/libs/colorama/win32.py | 86 | 2730 |
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort
)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def GetConsoleScreenBufferInfo(stream_id):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
# This fails when imported via setup.py when installing using 'pip'
# presumably the fix is that running setup.py should not trigger all
# this activity.
# assert success
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
success = windll.kernel32.SetConsoleTextAttribute(handle, attrs)
assert success
def SetConsoleCursorPosition(stream_id, position):
handle = handles[stream_id]
position = COORD(*position)
success = windll.kernel32.SetConsoleCursorPosition(handle, position)
assert success
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = TCHAR(char)
length = DWORD(length)
start = COORD(*start)
num_written = DWORD(0)
# AttributeError: function 'FillConsoleOutputCharacter' not found
# could it just be that my types are wrong?
success = windll.kernel32.FillConsoleOutputCharacter(
handle, char, length, start, byref(num_written))
assert success
return num_written.value
if __name__=='__main__':
x = GetConsoleScreenBufferInfo(STDOUT)
print(x.dwSize)
print(x.dwCursorPosition)
print(x.wAttributes)
print(x.srWindow)
print(x.dwMaximumWindowSize)
| mit | 4,412,570,073,133,768,700 | 27.736842 | 76 | 0.595604 | false |
2015fallproject/2015fallcase1 | static/Brython3.2.0-20150701-214155/Lib/heapq.py | 628 | 18065 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
#_heapq does not exist in brython, so lets just comment it out.
#try:
# from _heapq import *
#except ImportError:
# pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| agpl-3.0 | -6,920,392,999,057,494,000 | 36.870021 | 81 | 0.646701 | false |
RT-Thread/rt-thread | bsp/qemu-vexpress-a9/rtconfig.py | 11 | 2352 | import os
import uuid
def get_mac_address():
mac=uuid.UUID(int = uuid.getnode()).hex[-12:]
return "#define AUTOMAC".join([str(int(e/2) + 1) + ' 0x' + mac[e:e+2] + '\n' for e in range(5,11,2)])
header = '''
#ifndef __MAC_AUTO_GENERATE_H__
#define __MAC_AUTO_GENERATE_H__
/* Automatically generated file; DO NOT EDIT. */
/* mac configure file for RT-Thread qemu */
#define AUTOMAC0 0x52
#define AUTOMAC1 0x54
#define AUTOMAC2 0x00
#define AUTOMAC'''
end = '''
#endif
'''
automac_h_fn = os.path.join(os.path.dirname(__file__), 'drivers', 'automac.h')
with open(automac_h_fn, 'w') as f:
f.write(header + get_mac_address() + end)
# toolchains options
ARCH='arm'
CPU='cortex-a'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
# only support GNU GCC compiler.
PLATFORM = 'gcc'
EXEC_PATH = '/usr/bin'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
STRIP = PREFIX + 'strip'
DEVICE = ' -march=armv7-a -marm -msoft-float'
CFLAGS = DEVICE + ' -Wall'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__ -I.'
LINK_SCRIPT = 'link.lds'
LFLAGS = DEVICE + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,system_vectors'+\
' -T %s' % LINK_SCRIPT
CPATH = ''
LPATH = ''
# generate debug info in all cases
AFLAGS += ' -gdwarf-2'
CFLAGS += ' -g -gdwarf-2'
if BUILD == 'debug':
CFLAGS += ' -O0'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS + ' -Woverloaded-virtual -fno-exceptions -fno-rtti'
M_CFLAGS = CFLAGS + ' -mlong-calls -fPIC '
M_CXXFLAGS = CXXFLAGS + ' -mlong-calls -fPIC'
M_LFLAGS = DEVICE + CXXFLAGS + ' -Wl,--gc-sections,-z,max-page-size=0x4' +\
' -shared -fPIC -nostartfiles -nostdlib -static-libgcc'
M_POST_ACTION = STRIP + ' -R .hash $TARGET\n' + SIZE + ' $TARGET \n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' +\
SIZE + ' $TARGET \n'
| apache-2.0 | 145,130,191,266,513,860 | 26.034483 | 106 | 0.574405 | false |
2013Commons/hue | desktop/core/ext-py/tablib-develop/tablib/packages/openpyxl/shared/exc.py | 118 | 2259 | # file openpyxl/shared/exc.py
# Copyright (c) 2010 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: Eric Gazoni
"""Definitions for openpyxl shared exception classes."""
class CellCoordinatesException(Exception):
"""Error for converting between numeric and A1-style cell references."""
class ColumnStringIndexException(Exception):
"""Error for bad column names in A1-style cell references."""
class DataTypeException(Exception):
"""Error for any data type inconsistencies."""
class NamedRangeException(Exception):
"""Error for badly formatted named ranges."""
class SheetTitleException(Exception):
"""Error for bad sheet names."""
class InsufficientCoordinatesException(Exception):
"""Error for partially specified cell coordinates."""
class OpenModeError(Exception):
"""Error for fileobj opened in non-binary mode."""
class InvalidFileException(Exception):
"""Error for trying to open a non-ooxml file."""
class ReadOnlyWorkbookException(Exception):
"""Error for trying to modify a read-only workbook"""
class MissingNumberFormat(Exception):
"""Error when a referenced number format is not in the stylesheet"""
| apache-2.0 | 2,244,202,596,040,726,300 | 37.288136 | 79 | 0.762727 | false |
loic/django | tests/middleware/test_security.py | 4 | 7727 | from django.http import HttpResponse
from django.test import RequestFactory, SimpleTestCase
from django.test.utils import override_settings
class SecurityMiddlewareTest(SimpleTestCase):
@property
def middleware(self):
from django.middleware.security import SecurityMiddleware
return SecurityMiddleware()
@property
def secure_request_kwargs(self):
return {"wsgi.url_scheme": "https"}
def response(self, *args, **kwargs):
headers = kwargs.pop("headers", {})
response = HttpResponse(*args, **kwargs)
for k, v in headers.items():
response[k] = v
return response
def process_response(self, *args, **kwargs):
request_kwargs = {}
if kwargs.pop("secure", False):
request_kwargs.update(self.secure_request_kwargs)
request = (kwargs.pop("request", None) or
self.request.get("/some/url", **request_kwargs))
ret = self.middleware.process_request(request)
if ret:
return ret
return self.middleware.process_response(
request, self.response(*args, **kwargs))
request = RequestFactory()
def process_request(self, method, *args, **kwargs):
if kwargs.pop("secure", False):
kwargs.update(self.secure_request_kwargs)
req = getattr(self.request, method.lower())(*args, **kwargs)
return self.middleware.process_request(req)
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_on(self):
"""
With HSTS_SECONDS=3600, the middleware adds
"strict-transport-security: max-age=3600" to the response.
"""
self.assertEqual(
self.process_response(secure=True)["strict-transport-security"],
"max-age=3600")
@override_settings(SECURE_HSTS_SECONDS=3600)
def test_sts_already_present(self):
"""
The middleware will not override a "strict-transport-security" header
already present in the response.
"""
response = self.process_response(
secure=True,
headers={"strict-transport-security": "max-age=7200"})
self.assertEqual(response["strict-transport-security"], "max-age=7200")
@override_settings(HSTS_SECONDS=3600)
def test_sts_only_if_secure(self):
"""
The "strict-transport-security" header is not added to responses going
over an insecure connection.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=False))
@override_settings(HSTS_SECONDS=0)
def test_sts_off(self):
"""
With HSTS_SECONDS of 0, the middleware does not add a
"strict-transport-security" header to the response.
"""
self.assertNotIn("strict-transport-security", self.process_response(secure=True))
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=True)
def test_sts_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
True, the middleware adds a "strict-transport-security" header with the
"includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600; includeSubDomains")
@override_settings(
SECURE_HSTS_SECONDS=600, SECURE_HSTS_INCLUDE_SUBDOMAINS=False)
def test_sts_no_include_subdomains(self):
"""
With HSTS_SECONDS non-zero and HSTS_INCLUDE_SUBDOMAINS
False, the middleware adds a "strict-transport-security" header without
the "includeSubDomains" tag to the response.
"""
response = self.process_response(secure=True)
self.assertEqual(response["strict-transport-security"], "max-age=600")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=True)
def test_content_type_on(self):
"""
With CONTENT_TYPE_NOSNIFF set to True, the middleware adds
"x-content-type-options: nosniff" header to the response.
"""
self.assertEqual(self.process_response()["x-content-type-options"], "nosniff")
@override_settings(SECURE_CONTENT_TYPE_NO_SNIFF=True)
def test_content_type_already_present(self):
"""
The middleware will not override an "x-content-type-options" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-content-type-options": "foo"})
self.assertEqual(response["x-content-type-options"], "foo")
@override_settings(SECURE_CONTENT_TYPE_NOSNIFF=False)
def test_content_type_off(self):
"""
With CONTENT_TYPE_NOSNIFF False, the middleware does not add an
"x-content-type-options" header to the response.
"""
self.assertNotIn("x-content-type-options", self.process_response())
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_on(self):
"""
With BROWSER_XSS_FILTER set to True, the middleware adds
"s-xss-protection: 1; mode=block" header to the response.
"""
self.assertEqual(
self.process_response()["x-xss-protection"],
"1; mode=block")
@override_settings(SECURE_BROWSER_XSS_FILTER=True)
def test_xss_filter_already_present(self):
"""
The middleware will not override an "x-xss-protection" header
already present in the response.
"""
response = self.process_response(secure=True, headers={"x-xss-protection": "foo"})
self.assertEqual(response["x-xss-protection"], "foo")
@override_settings(BROWSER_XSS_FILTER=False)
def test_xss_filter_off(self):
"""
With BROWSER_XSS_FILTER set to False, the middleware does not add an
"x-xss-protection" header to the response.
"""
self.assertNotIn("x-xss-protection", self.process_response())
@override_settings(SECURE_SSL_REDIRECT=True)
def test_ssl_redirect_on(self):
"""
With SSL_REDIRECT True, the middleware redirects any non-secure
requests to the https:// version of the same URL.
"""
ret = self.process_request("get", "/some/url?query=string")
self.assertEqual(ret.status_code, 301)
self.assertEqual(
ret["Location"], "https://testserver/some/url?query=string")
@override_settings(SECURE_SSL_REDIRECT=True)
def test_no_redirect_ssl(self):
"""
The middleware does not redirect secure requests.
"""
ret = self.process_request("get", "/some/url", secure=True)
self.assertIsNone(ret)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_REDIRECT_EXEMPT=["^insecure/"])
def test_redirect_exempt(self):
"""
The middleware does not redirect requests with URL path matching an
exempt pattern.
"""
ret = self.process_request("get", "/insecure/page")
self.assertIsNone(ret)
@override_settings(
SECURE_SSL_REDIRECT=True, SECURE_SSL_HOST="secure.example.com")
def test_redirect_ssl_host(self):
"""
The middleware redirects to SSL_HOST if given.
"""
ret = self.process_request("get", "/some/url")
self.assertEqual(ret.status_code, 301)
self.assertEqual(ret["Location"], "https://secure.example.com/some/url")
@override_settings(SECURE_SSL_REDIRECT=False)
def test_ssl_redirect_off(self):
"""
With SSL_REDIRECT False, the middleware does no redirect.
"""
ret = self.process_request("get", "/some/url")
self.assertIsNone(ret)
| bsd-3-clause | 8,942,098,614,826,478,000 | 37.829146 | 97 | 0.63867 | false |
collmot/ardupilot | Tools/LogAnalyzer/tests/TestDupeLogData.py | 21 | 2700 | from __future__ import print_function
from LogAnalyzer import Test,TestResult
import DataflashLog
class TestDupeLogData(Test):
'''test for duplicated data in log, which has been happening on PX4/Pixhawk'''
def __init__(self):
Test.__init__(self)
self.name = "Dupe Log Data"
def __matchSample(self, sample, sampleStartIndex, logdata):
'''return the line number where a match is found, otherwise return False'''
# ignore if all data in sample is the same value
nSame = 0
for s in sample:
if s[1] == sample[0][1]:
nSame += 1
if nSame == 20:
return False
# c
data = logdata.channels["ATT"]["Pitch"].listData
for i in range(sampleStartIndex, len(data)):
#print("Checking against index %d" % i)
if i == sampleStartIndex:
continue # skip matching against ourselves
j = 0
while j<20 and (i+j)<len(data) and data[i+j][1] == sample[j][1]:
#print("### Match found, j=%d, data=%f, sample=%f, log data matched to sample at line %d" % (j,data[i+j][1],sample[j][1],data[i+j][0]))
j += 1
if j == 20: # all samples match
return data[i][0]
return False
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
# this could be made more flexible by not hard-coding to use ATT data, could make it dynamic based on whatever is available as long as it is highly variable
if "ATT" not in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# pick 10 sample points within the range of ATT data we have
sampleStartIndices = []
attStartIndex = 0
attEndIndex = len(logdata.channels["ATT"]["Pitch"].listData)-1
step = int(attEndIndex / 11)
for i in range(step,attEndIndex-step,step):
sampleStartIndices.append(i)
#print("Dupe data sample point index %d at line %d" % (i, logdata.channels["ATT"]["Pitch"].listData[i][0]))
# get 20 datapoints of pitch from each sample location and check for a match elsewhere
sampleIndex = 0
for i in range(sampleStartIndices[0], len(logdata.channels["ATT"]["Pitch"].listData)):
if i == sampleStartIndices[sampleIndex]:
#print("Checking sample %d" % i)
sample = logdata.channels["ATT"]["Pitch"].listData[i:i+20]
matchedLine = self.__matchSample(sample, i, logdata)
if matchedLine:
#print("Data from line %d found duplicated at line %d" % (sample[0][0],matchedLine))
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Duplicate data chunks found in log (%d and %d)" % (sample[0][0],matchedLine)
return
sampleIndex += 1
if sampleIndex >= len(sampleStartIndices):
break
| gpl-3.0 | 2,794,930,126,047,329,300 | 33.177215 | 158 | 0.681111 | false |
2015fallproject/2015fallcase2 | static/Brython3.2.0-20150701-214155/Lib/decimal.py | 623 | 230510 | # Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
import copy as _copy
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
#brython fixme
pass
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
#brython fix me
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
#brython fix me
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
#brython fix me
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
#brython fix me
pass
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
#brython fix me
pass
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
value=value.strip().lower()
if value.startswith("-"):
self._sign = 1
value=value[1:]
else:
self._sign = 0
if value in ('', 'nan'):
self._is_special = True
self._int = ''
#if m.group('signal'): #figure out what a signaling NaN is later
# self._exp = 'N'
#else:
# self._exp = 'n'
self._exp='n'
return self
if value in ('inf', 'infinity'):
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
import _jsre as re
_m=re.match("^\d*\.?\d*(e\+?\d*)?$", value)
if not _m:
self._is_special = True
self._int = ''
self._exp='n'
return self
if '.' in value:
intpart, fracpart=value.split('.')
if 'e' in fracpart:
fracpart, exp=fracpart.split('e')
exp=int(exp)
else:
exp=0
#self._int = str(int(intpart+fracpart))
self._int = intpart+fracpart
self._exp = exp - len(fracpart)
self._is_special = False
return self
else:
#is this a pure int?
self._is_special = False
if 'e' in value:
self._int, _exp=value.split('e')
self._exp=int(_exp)
#print(self._int, self._exp)
else:
self._int = value
self._exp = 0
return self
#m = _parser(value.strip())
#if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
#if m.group('sign') == "-":
# self._sign = 1
#else:
# self._sign = 0
#intpart = m.group('int')
#if intpart is not None:
# # finite number
# fracpart = m.group('frac') or ''
# exp = int(m.group('exp') or '0')
# self._int = str(int(intpart+fracpart))
# self._exp = exp - len(fracpart)
# self._is_special = False
#else:
# diag = m.group('diag')
# if diag is not None:
# # NaN
# self._int = str(int(diag or '0')).lstrip('0')
# if m.group('signal'):
# self._exp = 'N'
# else:
# self._exp = 'n'
# else:
# # infinity
# self._int = '0'
# self._exp = 'F'
# self._is_special = True
#return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=17, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=308,
Emin=-324,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
#import re
#_parser = re.compile(r""" # A numeric string consists of:
# \s*
# (?P<sign>[-+])? # an optional sign, followed by either...
# (
# (?=\d|\.\d) # ...a number (with at least one digit)
# (?P<int>\d*) # having a (possibly empty) integer part
# (\.(?P<frac>\d*))? # followed by an optional fractional part
# (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
# |
# Inf(inity)? # ...an infinity, or...
# |
# (?P<signal>s)? # ...an (optionally signaling)
# NaN # NaN
# (?P<diag>\d*) # with (possibly empty) diagnostic info.
# )
# \s*
# \Z
#""", re.VERBOSE | re.IGNORECASE).match
import _jsre as re
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
#_parse_format_specifier_regex = re.compile(r"""\A
#(?:
# (?P<fill>.)?
# (?P<align>[<>=^])
#)?
#(?P<sign>[-+ ])?
#(?P<alt>\#)?
#(?P<zeropad>0)?
#(?P<minimumwidth>(?!0)\d+)?
#(?P<thousands_sep>,)?
#(?:\.(?P<precision>0|(?!0)\d+))?
#(?P<type>[eEfFgGn%])?
#\Z
#""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
try:
import _decimal
except ImportError:
pass
else:
s1 = set(dir())
s2 = set(dir(_decimal))
for name in s1 - s2:
del globals()[name]
del s1, s2, name
from _decimal import *
if __name__ == '__main__':
import doctest, decimal
doctest.testmod(decimal)
| agpl-3.0 | 513,497,992,010,984,800 | 34.567042 | 100 | 0.551356 | false |
xcbat/vnpy | examples/CtaBacktesting/runBacktesting.py | 5 | 1089 | # encoding: UTF-8
"""
展示如何执行策略回测。
"""
from __future__ import division
from vnpy.trader.app.ctaStrategy.ctaBacktesting import BacktestingEngine, MINUTE_DB_NAME
if __name__ == '__main__':
from vnpy.trader.app.ctaStrategy.strategy.strategyAtrRsi import AtrRsiStrategy
# 创建回测引擎
engine = BacktestingEngine()
# 设置引擎的回测模式为K线
engine.setBacktestingMode(engine.BAR_MODE)
# 设置回测用的数据起始日期
engine.setStartDate('20120101')
# 设置产品相关参数
engine.setSlippage(0.2) # 股指1跳
engine.setRate(0.3/10000) # 万0.3
engine.setSize(300) # 股指合约大小
engine.setPriceTick(0.2) # 股指最小价格变动
# 设置使用的历史数据库
engine.setDatabase(MINUTE_DB_NAME, 'IF0000')
# 在引擎中创建策略对象
d = {'atrLength': 11}
engine.initStrategy(AtrRsiStrategy, d)
# 开始跑回测
engine.runBacktesting()
# 显示回测结果
engine.showBacktestingResult() | mit | 7,037,243,917,143,781,000 | 20.333333 | 88 | 0.649162 | false |
MiLk/ansible | lib/ansible/modules/network/avi/avi_networksecuritypolicy.py | 44 | 4118 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_networksecuritypolicy
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of NetworkSecurityPolicy Avi RESTful Object
description:
- This module is used to configure NetworkSecurityPolicy object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
cloud_config_cksum:
description:
- Checksum of cloud configuration for network sec policy.
- Internally set by cloud connector.
created_by:
description:
- Creator name.
description:
description:
- User defined description for the object.
name:
description:
- Name of the object.
rules:
description:
- List of networksecurityrule.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a network security policy to block clients represented by ip group known_attackers
avi_networksecuritypolicy:
controller: ''
username: ''
password: ''
name: vs-gurutest-ns
rules:
- action: NETWORK_SECURITY_POLICY_ACTION_TYPE_DENY
age: 0
enable: true
index: 1
log: false
match:
client_ip:
group_refs:
- Demo:known_attackers
match_criteria: IS_IN
name: Rule 1
tenant_ref: Demo
'''
RETURN = '''
obj:
description: NetworkSecurityPolicy (api/networksecuritypolicy) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
cloud_config_cksum=dict(type='str',),
created_by=dict(type='str',),
description=dict(type='str',),
name=dict(type='str',),
rules=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'networksecuritypolicy',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 | 6,513,402,620,016,340,000 | 29.058394 | 99 | 0.628946 | false |
blowekamp/SimpleITK | Examples/AdvancedImageReading/AdvancedImageReading.py | 4 | 3257 | #!/usr/bin/env python
# =========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =========================================================================
import sys
import SimpleITK as sitk
import numpy as np
if len(sys.argv) < 2:
print('Wrong number of arguments.', file=sys.stderr)
print('Usage: ' + __file__ + ' image_file_name', file=sys.stderr)
sys.exit(1)
# Read image information without reading the bulk data.
file_reader = sitk.ImageFileReader()
file_reader.SetFileName(sys.argv[1])
file_reader.ReadImageInformation()
print(f'image size: {file_reader.GetSize()}\nimage spacing: {file_reader.GetSpacing()}')
# Some files have a rich meta-data dictionary (e.g. DICOM)
for key in file_reader.GetMetaDataKeys():
print(key + ': ' + file_reader.GetMetaData(key))
print('-' * 20)
# When low on memory, we can incrementally work on sub-images. The following
# subtracts two images (ok, the same image) by reading them as multiple'
# sub-images.
image1_file_name = sys.argv[1]
image2_file_name = sys.argv[1]
parts = 5 # Number of sub-regions we use
file_reader = sitk.ImageFileReader()
file_reader.SetFileName(image1_file_name)
file_reader.ReadImageInformation()
image_size = file_reader.GetSize()
result_img = sitk.Image(file_reader.GetSize(), file_reader.GetPixelID(),
file_reader.GetNumberOfComponents())
result_img.SetSpacing(file_reader.GetSpacing())
result_img.SetOrigin(file_reader.GetOrigin())
result_img.SetDirection(file_reader.GetDirection())
extract_size = list(file_reader.GetSize())
extract_size[-1] = extract_size[-1] // parts
current_index = [0] * file_reader.GetDimension()
for i in range(parts):
if i == (parts - 1):
extract_size[-1] = image_size[-1] - current_index[-1]
file_reader.SetFileName(image1_file_name)
file_reader.SetExtractIndex(current_index)
file_reader.SetExtractSize(extract_size)
sub_image1 = file_reader.Execute()
file_reader.SetFileName(image2_file_name)
file_reader.SetExtractIndex(current_index)
file_reader.SetExtractSize(extract_size)
sub_image2 = file_reader.Execute()
idx = [slice(None,None)]*file_reader.GetDimension()
idx[-1] = current_index[-1]
result_img[idx] = sub_image1 - sub_image2
current_index[-1] += extract_size[-1]
del sub_image1
del sub_image2
# Check that our iterative approach is equivalent to reading the whole images.
if np.any(sitk.GetArrayViewFromImage(result_img
- sitk.ReadImage(image1_file_name)
+ sitk.ReadImage(image2_file_name))):
print('Subtraction error.')
sys.exit(1)
sys.exit(0)
| apache-2.0 | -7,246,275,664,811,980,000 | 36.011364 | 88 | 0.670556 | false |
kmod/icbd | stdlib/python2.5/ctypes/test/__init__.py | 15 | 6870 | import glob, os, sys, unittest, getopt, time
use_resources = []
class ResourceDenied(Exception):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. Resources are defined by test modules.
"""
def is_resource_enabled(resource):
"""Test whether a resource is enabled.
If the caller's module is __main__ then automatically return True."""
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return True
result = use_resources is not None and \
(resource in use_resources or "*" in use_resources)
if not result:
_unavail[resource] = None
return result
_unavail = {}
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
def find_package_modules(package, mask):
import fnmatch
if hasattr(package, "__loader__"):
path = package.__name__.replace(".", os.path.sep)
mask = os.path.join(path, mask)
for fnm in package.__loader__._files.iterkeys():
if fnmatch.fnmatchcase(fnm, mask):
yield os.path.splitext(fnm)[0].replace(os.path.sep, ".")
else:
path = package.__path__[0]
for fnm in os.listdir(path):
if fnmatch.fnmatchcase(fnm, mask):
yield "%s.%s" % (package.__name__, os.path.splitext(fnm)[0])
def get_tests(package, mask, verbosity):
"""Return a list of skipped test modules, and a list of test cases."""
tests = []
skipped = []
for modname in find_package_modules(package, mask):
try:
mod = __import__(modname, globals(), locals(), ['*'])
except ResourceDenied, detail:
skipped.append(modname)
if verbosity > 1:
print >> sys.stderr, "Skipped %s: %s" % (modname, detail)
continue
except Exception, detail:
print >> sys.stderr, "Warning: could not import %s: %s" % (modname, detail)
continue
for name in dir(mod):
if name.startswith("_"):
continue
o = getattr(mod, name)
if type(o) is type(unittest.TestCase) and issubclass(o, unittest.TestCase):
tests.append(o)
return skipped, tests
def usage():
print __doc__
return 1
def test_with_refcounts(runner, verbosity, testcase):
"""Run testcase several times, tracking reference counts."""
import gc
import ctypes
ptc = ctypes._pointer_type_cache.copy()
cfc = ctypes._c_functype_cache.copy()
wfc = ctypes._win_functype_cache.copy()
# when searching for refcount leaks, we have to manually reset any
# caches that ctypes has.
def cleanup():
ctypes._pointer_type_cache = ptc.copy()
ctypes._c_functype_cache = cfc.copy()
ctypes._win_functype_cache = wfc.copy()
gc.collect()
test = unittest.makeSuite(testcase)
for i in range(5):
rc = sys.gettotalrefcount()
runner.run(test)
cleanup()
COUNT = 5
refcounts = [None] * COUNT
for i in range(COUNT):
rc = sys.gettotalrefcount()
runner.run(test)
cleanup()
refcounts[i] = sys.gettotalrefcount() - rc
if filter(None, refcounts):
print "%s leaks:\n\t" % testcase, refcounts
elif verbosity:
print "%s: ok." % testcase
class TestRunner(unittest.TextTestRunner):
def run(self, test, skipped):
"Run the given test case or test suite."
# Same as unittest.TextTestRunner.run, except that it reports
# skipped tests.
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
if _unavail: #skipped:
requested = _unavail.keys()
requested.sort()
self.stream.writeln("Ran %d test%s in %.3fs (%s module%s skipped)" %
(run, run != 1 and "s" or "", timeTaken,
len(skipped),
len(skipped) != 1 and "s" or ""))
self.stream.writeln("Unavailable resources: %s" % ", ".join(requested))
else:
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
def main(*packages):
try:
opts, args = getopt.getopt(sys.argv[1:], "rqvu:")
except getopt.error:
return usage()
verbosity = 1
search_leaks = False
for flag, value in opts:
if flag == "-q":
verbosity -= 1
elif flag == "-v":
verbosity += 1
elif flag == "-r":
try:
sys.gettotalrefcount
except AttributeError:
print >> sys.stderr, "-r flag requires Python debug build"
return -1
search_leaks = True
elif flag == "-u":
use_resources.extend(value.split(","))
mask = "test_*.py"
if args:
mask = args[0]
for package in packages:
run_tests(package, mask, verbosity, search_leaks)
def run_tests(package, mask, verbosity, search_leaks):
skipped, testcases = get_tests(package, mask, verbosity)
runner = TestRunner(verbosity=verbosity)
suites = [unittest.makeSuite(o) for o in testcases]
suite = unittest.TestSuite(suites)
result = runner.run(suite, skipped)
if search_leaks:
# hunt for refcount leaks
runner = BasicTestRunner()
for t in testcases:
test_with_refcounts(runner, verbosity, t)
return bool(result.errors)
class BasicTestRunner:
def run(self, test):
result = unittest.TestResult()
test(result)
return result
| mit | -1,031,678,062,843,490,400 | 33.009901 | 87 | 0.572489 | false |
beezee/GAE-Django-site | django/db/backends/postgresql/creation.py | 247 | 3753 | from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'serial',
'BooleanField': 'boolean',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'timestamp with time zone',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:
return "WITH ENCODING '%s'" % self.connection.settings_dict['TEST_CHARSET']
return ''
def sql_indexes_for_field(self, model, f, style):
if f.db_index and not f.unique:
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
tablespace = f.db_tablespace or model._meta.db_tablespace
if tablespace:
sql = self.connection.ops.tablespace_sql(tablespace)
if sql:
tablespace_sql = ' ' + sql
else:
tablespace_sql = ''
else:
tablespace_sql = ''
def get_index_sql(index_name, opclass=''):
return (style.SQL_KEYWORD('CREATE INDEX') + ' ' +
style.SQL_TABLE(qn(truncate_name(index_name,self.connection.ops.max_name_length()))) + ' ' +
style.SQL_KEYWORD('ON') + ' ' +
style.SQL_TABLE(qn(db_table)) + ' ' +
"(%s%s)" % (style.SQL_FIELD(qn(f.column)), opclass) +
"%s;" % tablespace_sql)
output = [get_index_sql('%s_%s' % (db_table, f.column))]
# Fields with database column types of `varchar` and `text` need
# a second index that specifies their operator class, which is
# needed when performing correct LIKE queries outside the
# C locale. See #12234.
db_type = f.db_type(connection=self.connection)
if db_type.startswith('varchar'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' varchar_pattern_ops'))
elif db_type.startswith('text'):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops'))
else:
output = []
return output
| bsd-3-clause | -1,955,286,500,210,131,700 | 48.381579 | 146 | 0.548095 | false |
SubhasisDutta/subhasisdutta.com | src/controller/WorkAdminController.py | 2 | 10845 | import webapp2
import os
from google.appengine.ext.webapp import template
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import ndb
from google.appengine.api import images
from src.model.WorkModels import Work,PhotoModel,WorkResourceAttribute
class WorkAdminCreate(webapp2.RequestHandler):
#create project page render
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
template_values={
'pageTitle':"Create New Work Project",
}
path=os.path.join(os.path.dirname(__file__),'../../template/createWork.html')
page=template.render(path,template_values)
self.response.out.write(page)
#create project post
def post(self):
handle=self.request.get("handle")
category=self.request.get("category")
title=self.request.get("title")
description=self.request.get("description")
icon=self.request.get("iconImage")
largeImage=self.request.get("largeImage")
iconImg=db.Blob(icon)
largeImg=db.Blob(largeImage)
workItem=Work(id=handle)
workItem.category=category
workItem.title=title
workItem.order=int(self.request.get("order"))
workItem.description=description
workItem.iconImage=iconImg
workItem.bigImage=largeImg
workItem.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>New WORK Created</title>
</head>
<body>
<h3> New WORK Created</h3>
</body></html>
""")
class WorkAdminEdit(webapp2.RequestHandler):
#display the 3 form controller to get
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
work_name=self.request.get("name")
work_key=ndb.Key('Work',work_name)
work=work_key.get()
if work is None:
self.response.out.write(callNoSuchWorkPage())
else:
attrList=ndb.get_multi(work.attributes)
photoList=ndb.get_multi(work.photoGallery)
template_values={
'pageTitle':"Edit Work",
'work':work,
'attrList': attrList,
'photoList': photoList
}
path=os.path.join(os.path.dirname(__file__),'../../template/editWork.html')
page=template.render(path,template_values)
self.response.out.write(page)
def post(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
work_name=self.request.get("name")
work_key=ndb.Key('Work',work_name)
work=work_key.get()
if work is None:
self.response.out.write(callNoSuchWorkPage())
else:
title=self.request.get("title")
description=self.request.get("description")
work.title=title
work.description=description
work.order=int(self.request.get("order"))
work.publish=bool(self.request.get("publish"))
work.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>Project main Updated</title>
</head>
<body>
<h3> Project main Updated</h3>
</body></html>
""")
class AddWorkAttribute(webapp2.RequestHandler):
def post(self):
attribute=WorkResourceAttribute(name=self.request.get("name"),htmlDescription=self.request.get("htmlDescription"),order=int(self.request.get("order")))
attribute.put()
work_key=ndb.Key('Work',self.request.get("workHandle"))
work=work_key.get()
work.attributes.append(attribute.key)
work.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>New Attribute Added</title>
</head>
<body>
<h3> New Attribute Added</h3>
</body></html>
""")
class AddPhotoTOCollection(webapp2.RequestHandler):
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
template_values={
'pageTitle':"Create New Photo",
}
path=os.path.join(os.path.dirname(__file__),'../../template/createPhoto.html')
page=template.render(path,template_values)
self.response.out.write(page)
def post(self):
photoImage=self.request.get("image")
photoImg=db.Blob(photoImage)
thumbnail=images.resize(photoImage, 250, 170)
thumbnailImg=db.Blob(thumbnail)
photo=PhotoModel(title=self.request.get("title"),image=photoImg,type=self.request.get("type"),thumbnail=thumbnailImg,caption=self.request.get("caption"))
photo.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>Photo Added</title>
</head>
<body>
<h3> New Photo Added</h3>
</body></html>
""")
class MapPhotoToWork(webapp2.RequestHandler):
def get(self):
user=users.get_current_user()
if user is None:
self.redirect(users.create_login_url(self.request.uri))
else:
self.response.headers["Content-Type"]="text/html"
admins=['subhasisdutta300887','subhasistubai','[email protected]']
if user.nickname() not in admins:
self.response.out.write(callAccessDeniedPage())
else:
work_name=self.request.get("name")
work_key=ndb.Key('Work',work_name)
work=work_key.get()
if work is None:
self.response.out.write(callNoSuchWorkPage())
else:
attrList=ndb.get_multi(work.attributes)
photoList=ndb.get_multi(work.photoGallery)
photoCollection= PhotoModel.query()
template_values={
'pageTitle':"Map Photo To Work : ",
'work':work,
'attrList': attrList,
'photoList': photoList,
'photoCollection': photoCollection
}
path=os.path.join(os.path.dirname(__file__),'../../template/mapPhotoWork.html')
page=template.render(path,template_values)
self.response.out.write(page)
def post(self):
workPhoto_key=ndb.Key(urlsafe=self.request.get("photoKey"))
work_key=ndb.Key('Work',self.request.get("name"))
work=work_key.get()
work.photoGallery.append(workPhoto_key)
work.put()
self.response.headers["Content-Type"]="text/html"
self.response.out.write("""
<html>
<head>
<title>New Photo Added</title>
</head>
<body>
<h3> New Photo Added</h3>
</body></html>
""")
def callNoSuchWorkPage():
template_parameters={
'pageTitle':'No Such Work!!!!',
'title':"ERROR! Requested Work cannot be found",
'message':"ERROR!! The requested work was not found. Please check the name again."
}
error_template=os.path.join(os.path.dirname(__file__),'../../template/error.html')
page=template.render(error_template,template_parameters)
return page
def callAccessDeniedPage():
template_parameters={
'pageTitle':'Access Denied!!!!',
'title':"ERROR! You don't have access to this page",
'message':"ERROR!! You don't have access to this page."
}
error_template=os.path.join(os.path.dirname(__file__),'../../template/error.html')
page=template.render(error_template,template_parameters)
return page | mit | -768,249,547,126,028,700 | 43.633745 | 167 | 0.500415 | false |
jarussi/riotpy | riotpy/managers/match_history.py | 1 | 1786 | # coding: utf-8
from riotpy.resources.match import MatchResource
from base import Manager
class MatchHistoryManager(Manager):
"""
We are spliting this one and MatchManager because Riot did it.
That way, our lib is closer to their documentation.
"""
def get_summoner_match_history(self, summoner_id, begin_index=None, end_index=None,
champion_ids=None, ranked_queues=None):
"""
Get a summoner match history
:param summoner_id: The ID of the summoner.
:param begin_index: The begin index to use for fetching games.
:param end_index: The end index to use for fetching games.
:param champion_ids: Comma-separated list of champion IDs to use
for fetching games.
:param ranked_queue: Comma-separated list of ranked queue types
to use for fetching games. Non-ranked queue types will be ignored.
Ex: riotpy.constants.RankedQueues
:return: A resources.match.MatchResource list
"""
extra = {}
if begin_index:
try:
extra['beginIndex'] = int(begin_index)
except:
pass
if end_index:
try:
extra['endIndex'] = int(end_index)
except:
pass
if champion_ids:
extra['championIds'] = champion_ids
if ranked_queues:
extra['rankedQueues'] = ranked_queues
content = self._get('api/lol/{}/{}/matchhistory/{}'.format(
self.api.region,
self.version,
summoner_id), extra=extra
)
return self._dict_to_resource(content['matches'], resource_class=MatchResource)
| bsd-3-clause | -5,487,607,660,670,831,000 | 34.019608 | 87 | 0.570549 | false |
proxysh/Safejumper-for-Desktop | buildlinux/env64/lib/python2.7/site-packages/Crypto/PublicKey/__init__.py | 124 | 1876 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Public-key encryption and signature algorithms.
Public-key encryption uses two different keys, one for encryption and
one for decryption. The encryption key can be made public, and the
decryption key is kept private. Many public-key algorithms can also
be used to sign messages, and some can *only* be used for signatures.
======================== =============================================
Module Description
======================== =============================================
Crypto.PublicKey.DSA Digital Signature Algorithm (Signature only)
Crypto.PublicKey.ElGamal (Signing and encryption)
Crypto.PublicKey.RSA (Signing, encryption, and blinding)
======================== =============================================
:undocumented: _DSA, _RSA, _fastmath, _slowmath, pubkey
"""
__all__ = ['RSA', 'DSA', 'ElGamal']
__revision__ = "$Id$"
| gpl-2.0 | 1,403,783,864,913,097,200 | 44.756098 | 71 | 0.60661 | false |
10clouds/edx-platform | cms/djangoapps/course_creators/models.py | 62 | 4060 | """
Table for storing information about whether or not Studio users have course creation privileges.
"""
from django.db import models
from django.db.models.signals import post_init, post_save
from django.dispatch import receiver, Signal
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.translation import ugettext as _
# A signal that will be sent when users should be added or removed from the creator group
update_creator_state = Signal(providing_args=["caller", "user", "state"])
# A signal that will be sent when admin should be notified of a pending user request
send_admin_notification = Signal(providing_args=["user"])
# A signal that will be sent when user should be notified of change in course creator privileges
send_user_notification = Signal(providing_args=["user", "state"])
class CourseCreator(models.Model):
"""
Creates the database table model.
"""
UNREQUESTED = 'unrequested'
PENDING = 'pending'
GRANTED = 'granted'
DENIED = 'denied'
# Second value is the "human-readable" version.
STATES = (
(UNREQUESTED, _(u'unrequested')),
(PENDING, _(u'pending')),
(GRANTED, _(u'granted')),
(DENIED, _(u'denied')),
)
user = models.OneToOneField(User, help_text=_("Studio user"))
state_changed = models.DateTimeField('state last updated', auto_now_add=True,
help_text=_("The date when state was last updated"))
state = models.CharField(max_length=24, blank=False, choices=STATES, default=UNREQUESTED,
help_text=_("Current course creator state"))
note = models.CharField(max_length=512, blank=True, help_text=_("Optional notes about this user (for example, "
"why course creation access was denied)"))
def __unicode__(self):
return u"{0} | {1} [{2}]".format(self.user, self.state, self.state_changed)
@receiver(post_init, sender=CourseCreator)
def post_init_callback(sender, **kwargs):
"""
Extend to store previous state.
"""
instance = kwargs['instance']
instance.orig_state = instance.state
@receiver(post_save, sender=CourseCreator)
def post_save_callback(sender, **kwargs):
"""
Extend to update state_changed time and fire event to update course creator group, if appropriate.
"""
instance = kwargs['instance']
# We only wish to modify the state_changed time if the state has been modified. We don't wish to
# modify it for changes to the notes field.
if instance.state != instance.orig_state:
granted_state_change = instance.state == CourseCreator.GRANTED or instance.orig_state == CourseCreator.GRANTED
# If either old or new state is 'granted', we must manipulate the course creator
# group maintained by authz. That requires staff permissions (stored admin).
if granted_state_change:
assert hasattr(instance, 'admin'), 'Must have stored staff user to change course creator group'
update_creator_state.send(
sender=sender,
caller=instance.admin,
user=instance.user,
state=instance.state
)
# If user has been denied access, granted access, or previously granted access has been
# revoked, send a notification message to the user.
if instance.state == CourseCreator.DENIED or granted_state_change:
send_user_notification.send(
sender=sender,
user=instance.user,
state=instance.state
)
# If the user has gone into the 'pending' state, send a notification to interested admin.
if instance.state == CourseCreator.PENDING:
send_admin_notification.send(
sender=sender,
user=instance.user
)
instance.state_changed = timezone.now()
instance.orig_state = instance.state
instance.save()
| agpl-3.0 | 2,717,426,107,728,951,000 | 40.010101 | 118 | 0.646552 | false |
Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/scripts/addons/game_engine_save_as_runtime.py | 5 | 8665 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Save As Game Engine Runtime",
"author": "Mitchell Stokes (Moguri)",
"version": (0, 3, 1),
"blender": (2, 61, 0),
"location": "File > Export",
"description": "Bundle a .blend file with the Blenderplayer",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Game_Engine/Save_As_Runtime",
"category": "Game Engine",
}
import bpy
import os
import sys
import shutil
import tempfile
def CopyPythonLibs(dst, overwrite_lib, report=print):
import platform
# use python module to find pytohn's libpath
src = os.path.dirname(platform.__file__)
# dst points to lib/, but src points to current python's library path, eg:
# '/usr/lib/python3.2' vs '/usr/lib'
# append python's library dir name to destination, so only python's
# libraries would be copied
if os.name == 'posix':
dst = os.path.join(dst, os.path.basename(src))
if os.path.exists(src):
write = False
if os.path.exists(dst):
if overwrite_lib:
shutil.rmtree(dst)
write = True
else:
write = True
if write:
shutil.copytree(src, dst, ignore=lambda dir, contents: [i for i in contents if i == '__pycache__'])
else:
report({'WARNING'}, "Python not found in %r, skipping pythn copy" % src)
def WriteAppleRuntime(player_path, output_path, copy_python, overwrite_lib):
# Enforce the extension
if not output_path.endswith('.app'):
output_path += '.app'
# Use the system's cp command to preserve some meta-data
os.system('cp -R "%s" "%s"' % (player_path, output_path))
bpy.ops.wm.save_as_mainfile(filepath=os.path.join(output_path, "Contents/Resources/game.blend"),
relative_remap=False,
compress=False,
copy=True,
)
# Python doesn't need to be copied for OS X since it's already inside blenderplayer.app
def WriteRuntime(player_path, output_path, copy_python, overwrite_lib, copy_dlls, report=print):
import struct
# Check the paths
if not os.path.isfile(player_path) and not(os.path.exists(player_path) and player_path.endswith('.app')):
report({'ERROR'}, "The player could not be found! Runtime not saved")
return
# Check if we're bundling a .app
if player_path.endswith('.app'):
WriteAppleRuntime(player_path, output_path, copy_python, overwrite_lib)
return
# Enforce "exe" extension on Windows
if player_path.endswith('.exe') and not output_path.endswith('.exe'):
output_path += '.exe'
# Get the player's binary and the offset for the blend
file = open(player_path, 'rb')
player_d = file.read()
offset = file.tell()
file.close()
# Create a tmp blend file (Blenderplayer doesn't like compressed blends)
tempdir = tempfile.mkdtemp()
blend_path = os.path.join(tempdir, bpy.path.clean_name(output_path))
bpy.ops.wm.save_as_mainfile(filepath=blend_path,
relative_remap=False,
compress=False,
copy=True,
)
# Get the blend data
blend_file = open(blend_path, 'rb')
blend_d = blend_file.read()
blend_file.close()
# Get rid of the tmp blend, we're done with it
os.remove(blend_path)
os.rmdir(tempdir)
# Create a new file for the bundled runtime
output = open(output_path, 'wb')
# Write the player and blend data to the new runtime
print("Writing runtime...", end=" ")
output.write(player_d)
output.write(blend_d)
# Store the offset (an int is 4 bytes, so we split it up into 4 bytes and save it)
output.write(struct.pack('B', (offset>>24)&0xFF))
output.write(struct.pack('B', (offset>>16)&0xFF))
output.write(struct.pack('B', (offset>>8)&0xFF))
output.write(struct.pack('B', (offset>>0)&0xFF))
# Stuff for the runtime
output.write(b'BRUNTIME')
output.close()
print("done")
# Make the runtime executable on Linux
if os.name == 'posix':
os.chmod(output_path, 0o755)
# Copy bundled Python
blender_dir = os.path.dirname(bpy.app.binary_path)
runtime_dir = os.path.dirname(output_path)
if copy_python:
print("Copying Python files...", end=" ")
py_folder = os.path.join(bpy.app.version_string.split()[0], "python", "lib")
dst = os.path.join(runtime_dir, py_folder)
CopyPythonLibs(dst, overwrite_lib, report)
print("done")
# And DLLs
if copy_dlls:
print("Copying DLLs...", end=" ")
for file in [i for i in os.listdir(blender_dir) if i.lower().endswith('.dll')]:
src = os.path.join(blender_dir, file)
dst = os.path.join(runtime_dir, file)
shutil.copy2(src, dst)
print("done")
from bpy.props import *
class SaveAsRuntime(bpy.types.Operator):
bl_idname = "wm.save_as_runtime"
bl_label = "Save As Game Engine Runtime"
bl_options = {'REGISTER'}
if sys.platform == 'darwin':
# XXX, this line looks suspicious, could be done better?
blender_bin_dir = '/' + os.path.join(*bpy.app.binary_path.split('/')[0:-4])
ext = '.app'
else:
blender_bin_path = bpy.app.binary_path
blender_bin_dir = os.path.dirname(blender_bin_path)
ext = os.path.splitext(blender_bin_path)[-1].lower()
default_player_path = os.path.join(blender_bin_dir, 'blenderplayer' + ext)
player_path = StringProperty(
name="Player Path",
description="The path to the player to use",
default=default_player_path,
subtype='FILE_PATH',
)
filepath = StringProperty(
subtype='FILE_PATH',
)
copy_python = BoolProperty(
name="Copy Python",
description="Copy bundle Python with the runtime",
default=True,
)
overwrite_lib = BoolProperty(
name="Overwrite 'lib' folder",
description="Overwrites the lib folder (if one exists) with the bundled Python lib folder",
default=False,
)
# Only Windows has dlls to copy
if ext == '.exe':
copy_dlls = BoolProperty(
name="Copy DLLs",
description="Copy all needed DLLs with the runtime",
default=True,
)
else:
copy_dlls = False
def execute(self, context):
import time
start_time = time.clock()
print("Saving runtime to %r" % self.filepath)
WriteRuntime(self.player_path,
self.filepath,
self.copy_python,
self.overwrite_lib,
self.copy_dlls,
self.report,
)
print("Finished in %.4fs" % (time.clock()-start_time))
return {'FINISHED'}
def invoke(self, context, event):
if not self.filepath:
ext = '.app' if sys.platform == 'darwin' else os.path.splitext(bpy.app.binary_path)[-1]
self.filepath = bpy.path.ensure_ext(bpy.data.filepath, ext)
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
def menu_func(self, context):
self.layout.operator(SaveAsRuntime.bl_idname)
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
| gpl-3.0 | -7,549,425,269,370,830,000 | 32.585271 | 111 | 0.594114 | false |
wrightni/OSSP | segment.py | 1 | 6298 | # title: Watershed Transform
# author: Nick Wright
# adapted from: Justin Chen, Arnold Song
import numpy as np
import gc
import warnings
from skimage import filters, morphology, feature, img_as_ubyte
from scipy import ndimage
from ctypes import *
from lib import utils
# For Testing:
from skimage import segmentation
import matplotlib.image as mimg
def segment_image(input_data, image_type=False):
'''
Wrapper function that handles all of the processing to create watersheds
'''
#### Define segmentation parameters
# High_threshold:
# Low_threshold: Lower threshold for canny edge detection. Determines which "weak" edges to keep.
# Values above this amount that are connected to a strong edge will be marked as an edge.
# Gauss_sigma: sigma value to use in the gaussian blur applied to the image prior to segmentation.
# Value chosen here should be based on the quality and resolution of the image
# Feature_separation: minimum distance, in pixels, between the center point of multiple features. Use a lower value
# for lower resolution (.5m) images, and higher resolution for aerial images (~.1m).
# These values are dependent on the type of imagery being processed, and are
# mostly empirically derived.
# band_list contains the three bands to be used for segmentation
if image_type == 'pan':
high_threshold = 0.15 * 255 ## Needs to be checked
low_threshold = 0.05 * 255 ## Needs to be checked
gauss_sigma = 1
feature_separation = 1
band_list = [0, 0, 0]
elif image_type == 'wv02_ms':
high_threshold = 0.20 * 255 ## Needs to be checked
low_threshold = 0.05 * 255 ## Needs to be checked
gauss_sigma = 1.5
feature_separation = 3
band_list = [4, 2, 1]
else: #image_type == 'srgb'
high_threshold = 0.15 * 255
low_threshold = 0.05 * 255
gauss_sigma = 2
feature_separation = 5
band_list = [0, 1, 2]
segmented_data = watershed_transformation(input_data, band_list, low_threshold, high_threshold,
gauss_sigma,feature_separation)
# Method that provides the user an option to view the original image
# side by side with the segmented image.
# print(np.amax(segmented_data))
# image_data = np.array([input_data[band_list[0]],
# input_data[band_list[1]],
# input_data[band_list[2]]],
# dtype=np.uint8)
# ws_bound = segmentation.find_boundaries(segmented_data)
# ws_display = utils.create_composite(image_data)
#
# # save_name = '/Users/nicholas/Desktop/original_{}.png'
# # mimg.imsave(save_name.format(np.random.randint(0,100)), ws_display, format='png')
#
# ws_display[:, :, 0][ws_bound] = 240
# ws_display[:, :, 1][ws_bound] = 80
# ws_display[:, :, 2][ws_bound] = 80
#
# save_name = '/Users/nicholas/Desktop/seg_{}.png'
# mimg.imsave(save_name.format(np.random.randint(0, 100)), ws_display, format='png')
return segmented_data
def watershed_transformation(image_data, band_list, low_threshold, high_threshold, gauss_sigma, feature_separation):
'''
Runs a watershed transform on the main dataset
1. Create a gradient image using the sobel algorithm
2. Adjust the gradient image based on given threshold and amplification.
3. Find the local minimum gradient values and place a marker
4. Construct watersheds on top of the gradient image starting at the
markers.
'''
# If this block has no data, return a placeholder watershed.
if np.amax(image_data[0]) <= 1:
# We just need the dimensions from one band
return np.zeros(np.shape(image_data[0]))
# Build a raster of detected edges to inform the creation of watershed seed points
edge_image = edge_detect(image_data, band_list, gauss_sigma, low_threshold, high_threshold)
# Build a raster of image gradient that will be the base for watershed expansion.
grad_image = build_gradient(image_data, band_list, gauss_sigma)
image_data = None
# Find local minimum values in the edge image by inverting
# edge_image and finding the local maximum values
inv_edge = np.empty_like(edge_image, dtype=np.uint8)
np.subtract(255, edge_image, out=inv_edge)
edge_image = None
# Distance to the nearest detected edge
distance_image = ndimage.distance_transform_edt(inv_edge)
inv_edge = None
# Local maximum distance
local_min = feature.peak_local_max(distance_image, min_distance=feature_separation,
exclude_border=False, indices=False, num_peaks_per_label=1)
distance_image = None
markers = ndimage.label(local_min)[0]
local_min = None
# Build a watershed from the markers on top of the edge image
im_watersheds = morphology.watershed(grad_image, markers)
grad_image = None
# Set all values outside of the image area (empty pixels, usually caused by
# orthorectification) to one value, at the end of the watershed list.
# im_watersheds[empty_pixels] = np.amax(im_watersheds)+1
gc.collect()
return im_watersheds
def edge_detect(image_data, band_list, gauss_sigma, low_threshold, high_threshold):
# Detect edges in the image with a canny edge detector
with warnings.catch_warnings():
warnings.simplefilter("ignore")
edge_image = img_as_ubyte(feature.canny(image_data[band_list[1]], sigma=gauss_sigma,
low_threshold=low_threshold, high_threshold=high_threshold))
return edge_image
def build_gradient(image_data, band_list, gauss_sigma):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
smooth_im_blue = ndimage.filters.gaussian_filter(image_data[band_list[2]], sigma=gauss_sigma)
grad_image = img_as_ubyte(filters.scharr(smooth_im_blue))
# Prevent the watersheds from 'leaking' along the sides of the image
grad_image[:, 0] = grad_image[:, 1]
grad_image[:, -1] = grad_image[:, -2]
grad_image[0, :] = grad_image[1, :]
grad_image[-1, :] = grad_image[-2, :]
return grad_image | mit | 6,013,892,367,951,991,000 | 40.715232 | 119 | 0.656558 | false |
shaistaansari/django | django/db/backends/base/schema.py | 339 | 43421 | import hashlib
import logging
from django.db.backends.utils import truncate_name
from django.db.transaction import atomic
from django.utils import six
from django.utils.encoding import force_bytes
logger = logging.getLogger('django.db.backends.schema')
def _related_non_m2m_objects(old_field, new_field):
# Filters out m2m objects from reverse relations.
# Returns (old_relation, new_relation) tuples.
return zip(
(obj for obj in old_field.model._meta.related_objects if not obj.field.many_to_many),
(obj for obj in new_field.model._meta.related_objects if not obj.field.many_to_many)
)
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
)
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.connection.features.can_rollback_ddl:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.connection.features.can_rollback_ddl:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
ending = "" if sql.endswith(";") else ";"
if params is not None:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
include_default = include_default and not self.skip_default(field)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null and not self.connection.features.implied_column_null:
sql += " NULL"
elif not null:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
'subclasses of BaseDatabaseSchemaEditor for backends which have '
'requires_literal_defaults must provide a prepare_default() method'
)
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = six.binary_type()
else:
default = six.text_type()
else:
default = None
# If it's a callable, call it
if six.callable(default):
default = default()
# Run it through the field's get_db_prep_save method so we can send it
# to the database.
default = field.get_db_prep_save(default, self.connection)
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers (always deferred, as some fields might be
# created afterwards, like geometry fields with some backends)
for fields in model._meta.unique_together:
columns = [model._meta.get_field(field).column for field in fields]
self.deferred_sql.append(self._create_unique_sql(model, columns))
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
if tablespace_sql:
sql += ' ' + tablespace_sql
# Prevent using [] as params, in the case a literal '%' is used in the definition
self.execute(sql, params or None)
# Add any field index and index_together's (deferred as SQLite3 _remake_table needs it)
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field(field).column for field in fields]
self.execute(self._create_unique_sql(model, columns))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(model, columns, **constraint_kwargs)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
if old_db_table == new_db_table:
return
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if not self.skip_default(field) and field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(self._create_index_sql(model, [field]))
# Add any FK constraints later
if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if ((old_type is None and old_field.remote_field is None) or
(new_type is None and new_field.remote_field is None)):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using a badly-written custom field?)" %
(old_field, new_field),
)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
old_field.remote_field.through._meta.auto_created and
new_field.remote_field.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
not old_field.remote_field.through._meta.auto_created and
not new_field.remote_field.through._meta.auto_created):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.remote_field and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name))
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
if (old_field.db_index and not new_field.db_index and
not old_field.unique and not
(not new_field.unique and old_field.unique)):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
for index_name in index_names:
self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name))
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(
model._meta.db_table, old_field, new_field, new_type
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
needs_database_default = (
old_default != new_default and
new_default is not None and
not self.skip_default(new_field)
)
if needs_database_default:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if (self.connection.features.interprets_empty_strings_as_nulls and
new_field.get_internal_type() in ("CharField", "TextField")):
# The field is nullable in the database anyway, leave it alone
pass
elif new_field.null:
null_actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
null_actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = (
new_field.has_default() and
(old_field.null and not new_field.null)
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if (not old_field.unique and new_field.unique) or (
old_field.primary_key and not new_field.primary_key and new_field.unique
):
self.execute(self._create_unique_sql(model, [new_field.column]))
# Added an index?
if (not old_field.db_index and new_field.db_index and
not new_field.unique and not
(not old_field.unique and new_field.unique)):
self.execute(self._create_index_sql(model, [new_field], suffix="_uniq"))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name))
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model._meta.db_table, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column % {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.remote_field and
(fks_dropped or not old_field.remote_field or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.related_objects:
if not rel.many_to_many:
self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk"))
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
}
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table:
self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.remote_field.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)
self.alter_field(
new_field.remote_field.through,
# for self-referential models we need to alter field from the other end too
old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, self._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%s' % self._digest(table_name, *column_names)
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (
table_name, column_names[0], index_unique_name, suffix,
)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _create_index_sql(self, model, fields, suffix="", sql=None):
"""
Return the SQL statement to create the index for one or several fields.
`sql` can be specified if the syntax differs from the standard (GIS
indexes, ...).
"""
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
return sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": tablespace_sql,
}
def _model_indexes_sql(self, model):
"""
Return all index SQL statements (field indexes, index_together) for the
specified model, as a list.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
if field.db_index and not field.unique:
output.append(self._create_index_sql(model, [field], suffix=""))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields, suffix="_idx"))
return output
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
from_table = model._meta.db_table
from_column = field.column
to_table = field.target_field.model._meta.db_table
to_column = field.target_field.column
suffix = suffix % {
"to_table": to_table,
"to_column": to_column,
}
return self.sql_create_fk % {
"table": self.quote_name(from_table),
"name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)),
"column": self.quote_name(from_column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
def _create_unique_sql(self, model, columns):
return self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")),
"columns": ", ".join(self.quote_name(column) for column in columns),
}
def _delete_constraint_sql(self, template, model, name):
return template % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(name),
}
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
| bsd-3-clause | 1,051,814,205,494,674,800 | 45.890929 | 113 | 0.578867 | false |
toshywoshy/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_certificate_info.py | 25 | 3570 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_certificate_info
short_description: Gather information about DigitalOcean certificates
description:
- This module can be used to gather information about DigitalOcean provided certificates.
- This module was called C(digital_ocean_certificate_facts) before Ansible 2.9. The usage did not change.
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.6"
options:
certificate_id:
description:
- Certificate ID that can be used to identify and reference a certificate.
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment: digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather information about all certificates
digital_ocean_certificate_info:
oauth_token: "{{ oauth_token }}"
- name: Gather information about certificate with given id
digital_ocean_certificate_info:
oauth_token: "{{ oauth_token }}"
certificate_id: "892071a0-bb95-49bc-8021-3afd67a210bf"
- name: Get not after information about certificate
digital_ocean_certificate_info:
register: resp_out
- set_fact:
not_after_date: "{{ item.not_after }}"
loop: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?name=='web-cert-01']"
- debug: var=not_after_date
'''
RETURN = '''
data:
description: DigitalOcean certificate information
returned: success
type: list
sample: [
{
"id": "892071a0-bb95-49bc-8021-3afd67a210bf",
"name": "web-cert-01",
"not_after": "2017-02-22T00:23:00Z",
"sha1_fingerprint": "dfcc9f57d86bf58e321c2c6c31c7a971be244ac7",
"created_at": "2017-02-08T16:02:37Z"
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
certificate_id = module.params.get('certificate_id', None)
rest = DigitalOceanHelper(module)
base_url = 'certificates?'
if certificate_id is not None:
response = rest.get("%s/%s" % (base_url, certificate_id))
status_code = response.status_code
if status_code != 200:
module.fail_json(msg="Failed to retrieve certificates for DigitalOcean")
resp_json = response.json
certificate = resp_json['certificate']
else:
certificate = rest.get_paginated_data(base_url=base_url, data_key_name='certificates')
module.exit_json(changed=False, data=certificate)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
certificate_id=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
if module._name == 'digital_ocean_certificate_facts':
module.deprecate("The 'digital_ocean_certificate_facts' module has been renamed to 'digital_ocean_certificate_info'", version='2.13')
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 | 4,173,002,048,607,667,700 | 29.254237 | 141 | 0.684034 | false |
FRidh/Sea | Sea/adapter/couplings/Coupling.py | 2 | 3004 | import abc
import logging
import Sea
import numpy as np
from ..base import Base
class Coupling(Base):
"""
Abstract base class for all :mod:`Sea.adapter.couplings` classes.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, obj, connection, subsystem_from, subsystem_to):
Base.__init__(self, obj)
obj.addProperty("App::PropertyLink", "Connection", "Coupling", "Connection this coupling is part of.")
obj.Connection = connection
obj.setEditorMode("Connection", 1)
obj.Frequency = connection.Frequency
obj.addProperty("App::PropertyFloatList", "CLF", "Coupling", "Coupling loss factor.")
obj.setEditorMode("CLF", 1)
obj.addProperty("App::PropertyLink", "SubsystemFrom", "Coupling", "Subsystem from")
obj.setEditorMode("SubsystemFrom", 1)
obj.addProperty("App::PropertyLink", "SubsystemTo", "Coupling", "Subsystem to")
obj.setEditorMode("SubsystemTo", 1)
obj.addProperty("App::PropertyFloatList", "ImpedanceFrom", "Subsystem From", "Impedance of connection corrected From subsystem.")
obj.setEditorMode("ImpedanceFrom", 1)
obj.addProperty("App::PropertyFloatList", "ResistanceFrom", "Subsystem From", "Resistance of connection corrected From subsystem.")
obj.setEditorMode("ResistanceFrom", 1)
obj.addProperty("App::PropertyFloatList", "ImpedanceTo", "Subsystem To", "Impedance of connection corrected To subsystem.")
obj.setEditorMode("ImpedanceTo", 1)
obj.addProperty("App::PropertyFloatList", "ResistanceTo", "Subsystem To", "Resistance of connection corrected To subsystem.")
obj.setEditorMode("ResistanceTo", 1)
"""How or more specifically, when to update the size of the coupling?"""
#obj.addProperty("App::PropertyFloat", "Size", "Coupling", "Size of the junction.")
#subsystem_from.CouplingsFrom = subsystem_from.CouplingsFrom + [obj]
#subsystem_to.CouplingsTo = subsystem_to.CouplingsTo + [obj]
obj.SubsystemFrom = subsystem_from
obj.SubsystemTo = subsystem_to
def onChanged(self, obj, prop):
Base.onChanged(self, obj, prop)
if prop == 'SubsystemFrom':
obj.Proxy.subsystem_from = obj.SubsystemFrom.Proxy
if prop == 'SubsystemTo':
obj.Proxy.subsystem_to = obj.SubsystemTo.Proxy
def execute(self, obj):
Base.execute(self, obj)
obj.CLF = obj.Proxy.clf.tolist()
obj.ImpedanceFrom = obj.Proxy.impedance_from.tolist()
obj.ImpedanceTo = obj.Proxy.impedance_to.tolist()
obj.ResistanceFrom = obj.Proxy.resistance_from.tolist()
obj.ResistanceTo = obj.Proxy.resistance_to.tolist()
#@abc.abstractmethod
#def size(self, obj):
#"""
#Return the size of the coupling.
#"""
#return
| bsd-3-clause | 8,337,189,614,351,210,000 | 38.025974 | 142 | 0.625166 | false |
gtacoin-dev/gtacoin | qa/rpc-tests/test_framework/comptool.py | 1 | 18090 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Gtacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
'''
This is a tool for comparing two or more gtacoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print("Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ])
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| mit | 1,838,214,201,828,971,800 | 44.112219 | 145 | 0.585627 | false |
NL66278/odoo | addons/resource/faces/plocale.py | 433 | 1910 | ############################################################################
# Copyright (C) 2005 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
import gettext
import os.path
import locale
import sys
def _get_translation():
try:
return gettext.translation("faces")
except:
try:
if sys.frozen:
path = os.path.dirname(sys.argv[0])
path = os.path.join(path, "resources", "faces", "locale")
else:
path = os.path.split(__file__)[0]
path = os.path.join(path, "locale")
return gettext.translation("faces", path)
except Exception, e:
return None
def get_gettext():
trans = _get_translation()
if trans: return trans.ugettext
return lambda msg: msg
def get_encoding():
trans = _get_translation()
if trans: return trans.charset()
return locale.getpreferredencoding()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,346,833,483,868,535,000 | 33.107143 | 76 | 0.577487 | false |
arrabito/DIRAC | FrameworkSystem/private/standardLogging/Formatter/ColoredBaseFormatter.py | 4 | 2106 | """
ColoredBaseFormatter
"""
__RCSID__ = "$Id$"
import sys
from DIRAC.FrameworkSystem.private.standardLogging.Formatter.BaseFormatter import BaseFormatter
class ColoredBaseFormatter(BaseFormatter):
"""
ColoredBaseFormatter is used to format log record to create a string representing a log message.
It is based on the BaseFormatter object which is based on the of the standard logging library.
This custom formatter is useful for format messages to correspond with the gLogger format.
It adds color on all messages which come from StdoutBackend and StderrBackend
and color them according to their levels.
"""
COLOR_MAP = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7
}
LEVEL_MAP = {
'ALWAYS': ('black', 'white', False),
'NOTICE': (None, 'magenta', False),
'INFO': (None, 'green', False),
'VERBOSE': (None, 'cyan', False),
'DEBUG': (None, 'blue', False),
'WARN': (None, 'yellow', False),
'ERROR': (None, 'red', False),
'FATAL': ('red', 'black', False)
}
def format(self, record):
"""
Overriding.
format is the main method of the Formatter object because it is the method which transforms
a log record into a string with colors.
According to the level, the method get colors from LEVEL_MAP to add them to the message.
:params record: the log record containing all the information about the log message: name, level, threadid...
"""
stringRecord = super(ColoredBaseFormatter, self).format(record)
# post treatment
if self._options['Color'] and sys.stdout.isatty() and sys.stderr.isatty():
params = []
bg, fg, bold = self.LEVEL_MAP[record.levelname]
if bg in self.COLOR_MAP:
params.append(str(self.COLOR_MAP[bg] + 40))
if fg in self.COLOR_MAP:
params.append(str(self.COLOR_MAP[fg] + 30))
if bold:
params.append('1')
stringRecord = ("".join(('\x1b[', ";".join(params), 'm', stringRecord, '\x1b[0m')))
return stringRecord
| gpl-3.0 | -4,829,345,305,667,525,000 | 31.4 | 113 | 0.639126 | false |
TheBigW/DRC | XMLSerializer.py | 1 | 4179 | # -*- coding: utf-8 -*-
import lxml.etree as ET
class Serializer(object):
def __init__(self):
return None
@staticmethod
def getSerializeMembers(SerializeObject):
strSerializeMembers = []
for member in dir(SerializeObject):
strMember = str(member)
strType = str(type(getattr(SerializeObject, strMember)))
print(strMember + " : " + strType)
if (strType.find("descriptor") == -1) and (
strType.find("function") == -1) and (
strType.find("method") == -1) and (
strMember.find("__") != 0):
strSerializeMembers.append(strMember)
# print( "Serialize considered members : " + str(strSerializeMembers) )
return strSerializeMembers
@staticmethod
def SerializeArray(XMLParent, arrayInst):
for arrayIndex, arrayItem in enumerate(arrayInst):
Serializer.SerializeMember(XMLParent, "elem" + str(arrayIndex),
arrayItem)
@staticmethod
def SerializeMember(XMLParent, MemberName, newValue):
strType = str(type(newValue))
# print( "serialize type : " + strType )
if strType.find("instance") != -1:
XMLParent = ET.SubElement(XMLParent, MemberName)
Serializer.SerializeClass(newValue, XMLParent)
elif strType.find("list") != -1:
newElem = ET.SubElement(XMLParent, MemberName)
Serializer.SerializeArray(newElem, newValue)
else:
newElem = ET.SubElement(XMLParent, MemberName)
newElem.text = str(newValue)
@staticmethod
def SerializeClass(SerializeObject, rootElem=None):
strSerMemberNames = Serializer.getSerializeMembers(SerializeObject)
for strElem in strSerMemberNames:
Serializer.SerializeMember(rootElem, strElem,
getattr(SerializeObject, strElem))
@staticmethod
def Serialize(SerializeObject):
strClassName = SerializeObject.__class__.__name__
rootElem = ET.Element(strClassName)
Serializer.SerializeClass(SerializeObject, rootElem)
return ET.tostring(rootElem)
@staticmethod
def read(fileName, SerializeObject):
root = ET.parse(fileName)
return Serializer.DeserializeClass(SerializeObject, root.getroot())
@staticmethod
def write(fileName, SerializeObject):
strClassName = SerializeObject.__class__.__name__
rootElem = ET.Element(strClassName)
Serializer.SerializeClass(SerializeObject, rootElem)
ET.ElementTree(rootElem).write(fileName)
@staticmethod
def DeserializeArray(XMLParent, value):
# array needs to have at least one value for correct type
# information, else values are read and treated as string
arrayInst = []
for arrayIndex, arrayNode in enumerate(XMLParent):
arrayInst.append(Serializer.DeserializeMember(arrayNode, value[0]))
return arrayInst
@staticmethod
def DeserializeMember(XMLElem, value):
theType = type(value)
strType = str(theType)
print("Deserializing : " + strType)
if strType.find("instance") != -1:
return Serializer.DeserializeClass(value, XMLElem)
elif strType.find("list") != -1:
return Serializer.DeserializeArray(XMLElem, value)
else:
return theType(XMLElem.text)
@staticmethod
def DeserializeClass(SerializeObject, rootElem):
strSerMemberNames = Serializer.getSerializeMembers(SerializeObject)
for strElem, xmlChildElem in zip(strSerMemberNames, rootElem):
setattr(SerializeObject, strElem,
Serializer.DeserializeMember(xmlChildElem,
getattr(SerializeObject,
strElem)))
return SerializeObject
@staticmethod
def DeSerialize(strXmlString, SerializeObject):
root = ET.fromstring(strXmlString)
return Serializer.DeserializeClass(SerializeObject, root)
| gpl-3.0 | 6,235,422,881,603,661,000 | 39.182692 | 79 | 0.622158 | false |
luiseduardohdbackup/odoo | openerp/modules/registry.py | 220 | 19731 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Models registries.
"""
from collections import Mapping, defaultdict
import logging
import os
import threading
import openerp
from .. import SUPERUSER_ID
from openerp.tools import assertion_report, lazy_property, classproperty, config
from openerp.tools.lru import LRU
_logger = logging.getLogger(__name__)
class Registry(Mapping):
""" Model registry for a particular database.
The registry is essentially a mapping between model names and model
instances. There is one registry instance per database.
"""
def __init__(self, db_name):
super(Registry, self).__init__()
self.models = {} # model name/model instance mapping
self._sql_error = {}
self._store_function = {}
self._pure_function_fields = {} # {model: [field, ...], ...}
self._init = True
self._init_parent = {}
self._assertion_report = assertion_report.assertion_report()
self._fields_by_model = None
# modules fully loaded (maintained during init phase by `loading` module)
self._init_modules = set()
self.db_name = db_name
self._db = openerp.sql_db.db_connect(db_name)
# special cursor for test mode; None means "normal" mode
self.test_cr = None
# Indicates that the registry is
self.ready = False
# Inter-process signaling (used only when openerp.multi_process is True):
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
self.base_registry_signaling_sequence = None
self.base_cache_signaling_sequence = None
self.cache = LRU(8192)
# Flag indicating if at least one model cache has been cleared.
# Useful only in a multi-process context.
self._any_cache_cleared = False
cr = self.cursor()
has_unaccent = openerp.modules.db.has_unaccent(cr)
if openerp.tools.config['unaccent'] and not has_unaccent:
_logger.warning("The option --unaccent was given but no unaccent() function was found in database.")
self.has_unaccent = openerp.tools.config['unaccent'] and has_unaccent
cr.close()
#
# Mapping abstract methods implementation
# => mixin provides methods keys, items, values, get, __eq__, and __ne__
#
def __len__(self):
""" Return the size of the registry. """
return len(self.models)
def __iter__(self):
""" Return an iterator over all model names. """
return iter(self.models)
def __getitem__(self, model_name):
""" Return the model with the given name or raise KeyError if it doesn't exist."""
return self.models[model_name]
def __call__(self, model_name):
""" Same as ``self[model_name]``. """
return self.models[model_name]
@lazy_property
def pure_function_fields(self):
""" Return the list of pure function fields (field objects) """
fields = []
for mname, fnames in self._pure_function_fields.iteritems():
model_fields = self[mname]._fields
for fname in fnames:
fields.append(model_fields[fname])
return fields
def clear_manual_fields(self):
""" Invalidate the cache for manual fields. """
self._fields_by_model = None
def get_manual_fields(self, cr, model_name):
""" Return the manual fields (as a dict) for the given model. """
if self._fields_by_model is None:
# Query manual fields for all models at once
self._fields_by_model = dic = defaultdict(dict)
cr.execute('SELECT * FROM ir_model_fields WHERE state=%s', ('manual',))
for field in cr.dictfetchall():
dic[field['model']][field['name']] = field
return self._fields_by_model[model_name]
def do_parent_store(self, cr):
for o in self._init_parent:
self.get(o)._parent_store_compute(cr)
self._init = False
def obj_list(self):
""" Return the list of model names in this registry."""
return self.keys()
def add(self, model_name, model):
""" Add or replace a model in the registry."""
self.models[model_name] = model
def load(self, cr, module):
""" Load a given module in the registry.
At the Python level, the modules are already loaded, but not yet on a
per-registry level. This method populates a registry with the given
modules, i.e. it instanciates all the classes of a the given module
and registers them in the registry.
"""
from .. import models
models_to_load = [] # need to preserve loading order
lazy_property.reset_all(self)
# Instantiate registered classes (via the MetaModel automatic discovery
# or via explicit constructor call), and add them to the pool.
for cls in models.MetaModel.module_to_models.get(module.name, []):
# models register themselves in self.models
model = cls._build_model(self, cr)
if model._name not in models_to_load:
# avoid double-loading models whose declaration is split
models_to_load.append(model._name)
return [self.models[m] for m in models_to_load]
def setup_models(self, cr, partial=False):
""" Complete the setup of models.
This must be called after loading modules and before using the ORM.
:param partial: ``True`` if all models have not been loaded yet.
"""
lazy_property.reset_all(self)
# load custom models
ir_model = self['ir.model']
cr.execute('select model from ir_model where state=%s', ('manual',))
for (model_name,) in cr.fetchall():
ir_model.instanciate(cr, SUPERUSER_ID, model_name, {})
# prepare the setup on all models
for model in self.models.itervalues():
model._prepare_setup(cr, SUPERUSER_ID)
# do the actual setup from a clean state
self._m2m = {}
for model in self.models.itervalues():
model._setup_base(cr, SUPERUSER_ID, partial)
for model in self.models.itervalues():
model._setup_fields(cr, SUPERUSER_ID)
for model in self.models.itervalues():
model._setup_complete(cr, SUPERUSER_ID)
def clear_caches(self):
""" Clear the caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models.
"""
for model in self.models.itervalues():
model.clear_caches()
# Special case for ir_ui_menu which does not use openerp.tools.ormcache.
ir_ui_menu = self.models.get('ir.ui.menu')
if ir_ui_menu is not None:
ir_ui_menu.clear_cache()
# Useful only in a multi-process context.
def reset_any_cache_cleared(self):
self._any_cache_cleared = False
# Useful only in a multi-process context.
def any_cache_cleared(self):
return self._any_cache_cleared
@classmethod
def setup_multi_process_signaling(cls, cr):
if not openerp.multi_process:
return None, None
# Inter-process signaling:
# The `base_registry_signaling` sequence indicates the whole registry
# must be reloaded.
# The `base_cache_signaling sequence` indicates all caches must be
# invalidated (i.e. cleared).
cr.execute("""SELECT sequence_name FROM information_schema.sequences WHERE sequence_name='base_registry_signaling'""")
if not cr.fetchall():
cr.execute("""CREATE SEQUENCE base_registry_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_registry_signaling')""")
cr.execute("""CREATE SEQUENCE base_cache_signaling INCREMENT BY 1 START WITH 1""")
cr.execute("""SELECT nextval('base_cache_signaling')""")
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess load registry signaling: [Registry: # %s] "\
"[Cache: # %s]",
r, c)
return r, c
def enter_test_mode(self):
""" Enter the 'test' mode, where one cursor serves several requests. """
assert self.test_cr is None
self.test_cr = self._db.test_cursor()
RegistryManager.enter_test_mode()
def leave_test_mode(self):
""" Leave the test mode. """
assert self.test_cr is not None
self.test_cr.force_close()
self.test_cr = None
RegistryManager.leave_test_mode()
def cursor(self):
""" Return a new cursor for the database. The cursor itself may be used
as a context manager to commit/rollback and close automatically.
"""
cr = self.test_cr
if cr is not None:
# While in test mode, we use one special cursor across requests. The
# test cursor uses a reentrant lock to serialize accesses. The lock
# is granted here by cursor(), and automatically released by the
# cursor itself in its method close().
cr.acquire()
return cr
return self._db.cursor()
class DummyRLock(object):
""" Dummy reentrant lock, to be used while running rpc and js tests """
def acquire(self):
pass
def release(self):
pass
def __enter__(self):
self.acquire()
def __exit__(self, type, value, traceback):
self.release()
class RegistryManager(object):
""" Model registries manager.
The manager is responsible for creation and deletion of model
registries (essentially database connection/model registry pairs).
"""
_registries = None
_lock = threading.RLock()
_saved_lock = None
@classproperty
def registries(cls):
if cls._registries is None:
size = config.get('registry_lru_size', None)
if not size:
# Size the LRU depending of the memory limits
if os.name != 'posix':
# cannot specify the memory limit soft on windows...
size = 42
else:
# A registry takes 10MB of memory on average, so we reserve
# 10Mb (registry) + 5Mb (working memory) per registry
avgsz = 15 * 1024 * 1024
size = int(config['limit_memory_soft'] / avgsz)
cls._registries = LRU(size)
return cls._registries
@classmethod
def lock(cls):
""" Return the current registry lock. """
return cls._lock
@classmethod
def enter_test_mode(cls):
""" Enter the 'test' mode, where the registry is no longer locked. """
assert cls._saved_lock is None
cls._lock, cls._saved_lock = DummyRLock(), cls._lock
@classmethod
def leave_test_mode(cls):
""" Leave the 'test' mode. """
assert cls._saved_lock is not None
cls._lock, cls._saved_lock = cls._saved_lock, None
@classmethod
def get(cls, db_name, force_demo=False, status=None, update_module=False):
""" Return a registry for a given database name."""
with cls.lock():
try:
return cls.registries[db_name]
except KeyError:
return cls.new(db_name, force_demo, status,
update_module)
finally:
# set db tracker - cleaned up at the WSGI
# dispatching phase in openerp.service.wsgi_server.application
threading.current_thread().dbname = db_name
@classmethod
def new(cls, db_name, force_demo=False, status=None,
update_module=False):
""" Create and return a new registry for a given database name.
The (possibly) previous registry for that database name is discarded.
"""
import openerp.modules
with cls.lock():
with openerp.api.Environment.manage():
registry = Registry(db_name)
# Initializing a registry will call general code which will in
# turn call registries.get (this object) to obtain the registry
# being initialized. Make it available in the registries
# dictionary then remove it if an exception is raised.
cls.delete(db_name)
cls.registries[db_name] = registry
try:
with registry.cursor() as cr:
seq_registry, seq_cache = Registry.setup_multi_process_signaling(cr)
registry.base_registry_signaling_sequence = seq_registry
registry.base_cache_signaling_sequence = seq_cache
# This should be a method on Registry
openerp.modules.load_modules(registry._db, force_demo, status, update_module)
except Exception:
del cls.registries[db_name]
raise
# load_modules() above can replace the registry by calling
# indirectly new() again (when modules have to be uninstalled).
# Yeah, crazy.
registry = cls.registries[db_name]
cr = registry.cursor()
try:
registry.do_parent_store(cr)
cr.commit()
finally:
cr.close()
registry.ready = True
if update_module:
# only in case of update, otherwise we'll have an infinite reload loop!
cls.signal_registry_change(db_name)
return registry
@classmethod
def delete(cls, db_name):
"""Delete the registry linked to a given database. """
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
del cls.registries[db_name]
@classmethod
def delete_all(cls):
"""Delete all the registries. """
with cls.lock():
for db_name in cls.registries.keys():
cls.delete(db_name)
@classmethod
def clear_caches(cls, db_name):
"""Clear caches
This clears the caches associated to methods decorated with
``tools.ormcache`` or ``tools.ormcache_multi`` for all the models
of the given database name.
This method is given to spare you a ``RegistryManager.get(db_name)``
that would loads the given database if it was not already loaded.
"""
with cls.lock():
if db_name in cls.registries:
cls.registries[db_name].clear_caches()
@classmethod
def check_registry_signaling(cls, db_name):
"""
Check if the modules have changed and performs all necessary operations to update
the registry of the corresponding database.
:returns: True if changes has been detected in the database and False otherwise.
"""
changed = False
if openerp.multi_process and db_name in cls.registries:
registry = cls.get(db_name)
cr = registry.cursor()
try:
cr.execute("""
SELECT base_registry_signaling.last_value,
base_cache_signaling.last_value
FROM base_registry_signaling, base_cache_signaling""")
r, c = cr.fetchone()
_logger.debug("Multiprocess signaling check: [Registry - old# %s new# %s] "\
"[Cache - old# %s new# %s]",
registry.base_registry_signaling_sequence, r,
registry.base_cache_signaling_sequence, c)
# Check if the model registry must be reloaded (e.g. after the
# database has been updated by another process).
if registry.base_registry_signaling_sequence is not None and registry.base_registry_signaling_sequence != r:
changed = True
_logger.info("Reloading the model registry after database signaling.")
registry = cls.new(db_name)
# Check if the model caches must be invalidated (e.g. after a write
# occured on another process). Don't clear right after a registry
# has been reload.
elif registry.base_cache_signaling_sequence is not None and registry.base_cache_signaling_sequence != c:
changed = True
_logger.info("Invalidating all model caches after database signaling.")
registry.clear_caches()
registry.reset_any_cache_cleared()
registry.base_registry_signaling_sequence = r
registry.base_cache_signaling_sequence = c
finally:
cr.close()
return changed
@classmethod
def signal_caches_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
# Check the registries if any cache has been cleared and signal it
# through the database to other processes.
registry = cls.get(db_name)
if registry.any_cache_cleared():
_logger.info("At least one model cache has been cleared, signaling through the database.")
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_cache_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_cache_signaling_sequence = r
registry.reset_any_cache_cleared()
@classmethod
def signal_registry_change(cls, db_name):
if openerp.multi_process and db_name in cls.registries:
_logger.info("Registry changed, signaling through the database")
registry = cls.get(db_name)
cr = registry.cursor()
r = 1
try:
cr.execute("select nextval('base_registry_signaling')")
r = cr.fetchone()[0]
finally:
cr.close()
registry.base_registry_signaling_sequence = r
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,729,547,210,829,207,000 | 38.541082 | 126 | 0.58588 | false |
EDUlib/edx-platform | lms/djangoapps/mobile_api/tests/test_middleware.py | 5 | 7822 | """
Tests for Version Based App Upgrade Middleware
"""
from datetime import datetime
from unittest import mock
import ddt
from django.core.cache import caches
from django.http import HttpRequest, HttpResponse
from pytz import UTC
from lms.djangoapps.mobile_api.middleware import AppVersionUpgrade
from lms.djangoapps.mobile_api.models import AppVersionConfig
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase
@ddt.ddt
class TestAppVersionUpgradeMiddleware(CacheIsolationTestCase):
"""
Tests for version based app upgrade middleware
"""
ENABLED_CACHES = ['default']
def setUp(self):
super().setUp()
self.middleware = AppVersionUpgrade()
self.set_app_version_config()
def set_app_version_config(self):
""" Creates configuration data for platform versions """
AppVersionConfig(platform="iOS", version="1.1.1", expire_at=None, enabled=True).save()
AppVersionConfig(
platform="iOS",
version="2.2.2",
expire_at=datetime(2014, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(
platform="iOS",
version="4.4.4",
expire_at=datetime(9000, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(platform="iOS", version="6.6.6", expire_at=None, enabled=True).save()
AppVersionConfig(platform="Android", version="1.1.1", expire_at=None, enabled=True).save()
AppVersionConfig(
platform="Android",
version="2.2.2",
expire_at=datetime(2014, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(
platform="Android",
version="4.4.4",
expire_at=datetime(5000, 1, 1, tzinfo=UTC),
enabled=True
).save()
AppVersionConfig(platform="Android", version="8.8.8", expire_at=None, enabled=True).save()
def process_middleware(self, user_agent, cache_get_many_calls_for_request=1):
""" Helper function that makes calls to middle process_request and process_response """
fake_request = HttpRequest()
fake_request.META['HTTP_USER_AGENT'] = user_agent
with mock.patch.object(caches['default'], 'get_many', wraps=caches['default'].get_many) as mocked_code:
request_response = self.middleware.process_request(fake_request)
assert cache_get_many_calls_for_request == mocked_code.call_count
with mock.patch.object(caches['default'], 'get_many', wraps=caches['default'].get_many) as mocked_code:
processed_response = self.middleware.process_response(fake_request, request_response or HttpResponse())
assert 0 == mocked_code.call_count
return request_response, processed_response
@ddt.data(
("Mozilla/5.0 (Linux; Android 5.1; Nexus 5 Build/LMY47I; wv) AppleWebKit/537.36 (KHTML, like Gecko) "
"Version/4.0 Chrome/47.0.2526.100 Mobile Safari/537.36 edX/org.edx.mobile/2.0.0"),
("Mozilla/5.0 (iPhone; CPU iPhone OS 9_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) "
"Mobile/13C75 edX/org.edx.mobile/2.2.1"),
("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.116 "
"Safari/537.36"),
)
def test_non_mobile_app_requests(self, user_agent):
with self.assertNumQueries(0):
request_response, processed_response = self.process_middleware(user_agent, 0)
assert request_response is None
assert 200 == processed_response.status_code
assert AppVersionUpgrade.LATEST_VERSION_HEADER not in processed_response
assert AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER not in processed_response
@ddt.data(
"edX/org.edx.mobile (6.6.6; OS Version 9.2 (Build 13C75))",
"edX/org.edx.mobile (7.7.7; OS Version 9.2 (Build 13C75))",
"Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/8.8.8",
"Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/9.9.9",
)
def test_no_update(self, user_agent):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is None
assert 200 == processed_response.status_code
assert AppVersionUpgrade.LATEST_VERSION_HEADER not in processed_response
assert AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER not in processed_response
with self.assertNumQueries(0):
self.process_middleware(user_agent)
@ddt.data(
("edX/org.edx.mobile (5.1.1; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (5.1.1.RC; OS Version 9.2 (Build 13C75))", "6.6.6"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/5.1.1", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/5.1.1.RC", "8.8.8"),
)
@ddt.unpack
def test_new_version_available(self, user_agent, latest_version):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is None
assert 200 == processed_response.status_code
assert latest_version == processed_response[AppVersionUpgrade.LATEST_VERSION_HEADER]
assert AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER not in processed_response
with self.assertNumQueries(0):
self.process_middleware(user_agent)
@ddt.data(
("edX/org.edx.mobile (1.0.1; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (1.1.1; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (2.0.5.RC; OS Version 9.2 (Build 13C75))", "6.6.6"),
("edX/org.edx.mobile (2.2.2; OS Version 9.2 (Build 13C75))", "6.6.6"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/1.0.1", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/1.1.1", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/2.0.5.RC", "8.8.8"),
("Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/2.2.2", "8.8.8"),
)
@ddt.unpack
def test_version_update_required(self, user_agent, latest_version):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is not None
assert 426 == processed_response.status_code
assert latest_version == processed_response[AppVersionUpgrade.LATEST_VERSION_HEADER]
with self.assertNumQueries(0):
self.process_middleware(user_agent)
@ddt.data(
("edX/org.edx.mobile (4.4.4; OS Version 9.2 (Build 13C75))", "6.6.6", '9000-01-01T00:00:00+00:00'),
(
"Dalvik/2.1.0 (Linux; U; Android 5.1; Nexus 5 Build/LMY47I) edX/org.edx.mobile/4.4.4",
"8.8.8",
'5000-01-01T00:00:00+00:00',
),
)
@ddt.unpack
def test_version_update_available_with_deadline(self, user_agent, latest_version, upgrade_date):
with self.assertNumQueries(2):
request_response, processed_response = self.process_middleware(user_agent)
assert request_response is None
assert 200 == processed_response.status_code
assert latest_version == processed_response[AppVersionUpgrade.LATEST_VERSION_HEADER]
assert upgrade_date == processed_response[AppVersionUpgrade.LAST_SUPPORTED_DATE_HEADER]
with self.assertNumQueries(0):
self.process_middleware(user_agent)
| agpl-3.0 | -3,368,105,167,040,972,300 | 47.283951 | 119 | 0.643697 | false |
adamrp/qiime | scripts/compare_alpha_diversity.py | 15 | 12087 | #!/usr/bin/env python
# File created on 06 Jun 2011
from __future__ import division
__author__ = "William Van Treuren"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["William Van Treuren", "Greg Caparaso", "Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Van Treuren"
__email__ = "[email protected]"
import os
from os.path import join
from qiime.util import (parse_command_line_parameters,
make_option,
create_dir)
from qiime.compare_alpha_diversity import (compare_alpha_diversities,
_correct_compare_alpha_results,
test_types,
correction_types,
generate_alpha_diversity_boxplots)
script_info = {}
script_info[
'brief_description'] = """This script compares alpha diversities based on a two-sample t-test using either parametric or non-parametric (Monte Carlo) methods."""
script_info['script_description'] = """
This script compares the alpha diversity of samples found in a collated alpha
diversity file. The comparison is done not between samples, but between groups
of samples. The groupings are created via the input category passed via
-c/--category. Any samples which have the same value under the catgory will be
grouped.
For example, if your mapping file had a category called 'Treatment' that
separated your samples into three groups (Treatment='Control', Treatment='Drug',
Treatment='2xDose'), passing 'Treatment' to this script would cause it to
compare (Control,Drug), (Control,2xDose), (2xDose, Drug) alpha diversity
values. By default the two-sample t-test will be nonparametric (i.e. using
Monte Carlo permutations to calculate the p-value), though the user has the
option to make the test a parametric t-test.
The script creates an output file in tab-separated format where each row is a
different group comparison. The columns in each row denote which two groups of
samples are being compared, as well as the mean and standard deviation of each
group's alpha diversity. Finally, the t-statistic and p-value are reported for
the comparison. This file can be most easily viewed in a spreadsheet program
such as Excel.
Note: Any iterations of a rarefaction at a given depth will be averaged. For
instance, if your collated_alpha file had 10 iterations of the rarefaction at
depth 480, the scores for the alpha diversity metrics of those 10 iterations
would be averaged (within sample). The iterations are not controlled by this
script; when multiple_rarefactions.py is called, the -n option specifies the
number of iterations that have occurred. The multiple comparison correction
takes into account the number of between group comparisons. If you do not know
the rarefaction depth available or you want to use the deepest rarefaction
level available then do not pass -d/--depth and it will default to using the
deepest available.
If t-statistics and/or p-values are None for any of your comparisons, there are
three possible reasons. The first is that there were undefined values in your
collated alpha diversity input file. This occurs if there were too few
sequences in one or more of the samples in the groups involved in those
comparisons to compute alpha diversity at that depth. You can either rerun
%prog passing a lower value for --depth, or you can re-run alpha diversity
after filtering samples with too few sequences. The second is that you had some
comparison where each treatment was represented by only a single sample. It is
not possible to perform a two-sample t-test on two samples each of length 1, so
None will be reported instead. The third possibility occurs when using the
nonparamteric t-test with small datasets where the Monte Carlo permutations
don't return a p-value because the distribution of the data has no variance.
The multiple comparisons correction will not penalize you for comparisons that
return as None regardless of origin.
If the means/standard deviations are None for any treatment group, the likely
cause is that there is an \'n\\a\' value in the collated_alpha file that was
passed.
"""
script_info['script_usage'] = []
script_info['script_usage'].append(("Comparing alpha diversities",
"The following command takes the following input: a mapping file (which "
"associaties each sample with a number of characteristics), alpha diversity "
"metric (the results of collate_alpha for an alpha diverity metric, like "
"PD_whole_tree), depth (the rarefaction depth to use for comparison), "
"category (the category in the mapping file to determine which samples to "
"compare to each other), and output filepath (a path to the output file to be created). A "
"nonparametric two sample t-test is run to compare the alpha diversities "
"using the default number of Monte Carlo permutations (999).",
"%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment -d 100 -o Treatment_PD100"))
script_info['script_usage'].append(("Comparing alpha diversities",
"Similar to above, but performs comparisons for two categories.",
"%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment,DOB -d 100 -o Treatment_DOB_PD100"))
script_info['script_usage'].append(("Parametric t-test",
"The following command runs a parametric two sample t-test using the "
"t-distribution instead of Monte Carlo permutations at rarefaction depth 100.",
"%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment -d 100 -o "
"PD_d100_parametric -t parametric"))
script_info['script_usage'].append(("Parametric t-test",
"The following command runs a parametric two sample t-test using the "
"t-distribution instead of Monte Carlo permutations at the greatest depth available.",
"%prog -i PD_whole_tree.txt -m mapping.txt -c Treatment -o "
"PD_dmax_parametric -t parametric"))
script_info['output_description'] = """
Generates a tsv stats file and pdf of boxplots for each input category.
Each row in the tsv file corresponds to a comparison between two groups of treatment values,
and includes the means and standard deviations of the two groups' alpha diversities,
along with the results of the two-sample t-test.
"""
script_info[
'script_usage_output_to_remove'] = [
'$PWD/PD_dmax_parametric.txt',
'$PWD/PD_d100_parametric.txt',
'$PWD/PD_d100.txt']
script_info['required_options'] = [
make_option('-i',
'--alpha_diversity_fp',
action='store',
type='existing_filepath',
help='path to collated alpha diversity file (as generated by '
'collate_alpha.py) [REQUIRED]'),
make_option('-m',
'--mapping_fp',
action='store',
type='existing_filepath',
help='path to the mapping file [REQUIRED]'),
make_option('-c',
'--categories',
action='store',
type='string',
help='comma-separated list of categories for comparison [REQUIRED]'),
make_option('-o',
'--output_dir',
action='store',
type='new_dirpath',
help='directory where output files should be stored [REQUIRED]')]
script_info['optional_options'] = [
make_option('-t', '--test_type', type='choice', choices=test_types,
help='the type of test to perform when calculating the p-values. Valid '
'choices: ' + ', '.join(test_types) + '. If test_type is '
'nonparametric, Monte Carlo permutations will be used to determine the '
'p-value. If test_type is parametric, the num_permutations option will '
'be ignored and the t-distribution will be used instead [default: '
'%default]', default='nonparametric'),
make_option('-n', '--num_permutations', type='int', default=999,
help='the number of permutations to perform when calculating the '
'p-value. Must be greater than 10. Only applies if test_type is '
'nonparametric [default: %default]'),
make_option('-p', '--correction_method', type='choice',
choices=correction_types, help='method to use for correcting multiple '
'comparisons. Available methods are bonferroni, fdr, or none. '
'[default: %default]', default='bonferroni'),
make_option('-d', '--depth', type='int', default=None,
help='depth of rarefaction file to use [default: greatest depth]')]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
mapping_fp = opts.mapping_fp
alpha_diversity_fp = opts.alpha_diversity_fp
categories = opts.categories.split(',')
depth = opts.depth
output_dir = opts.output_dir
correction_method = opts.correction_method
test_type = opts.test_type
num_permutations = opts.num_permutations
if num_permutations < 10:
option_parser.error('Number of permuations must be greater than or '
'equal to 10.')
create_dir(output_dir)
for category in categories:
stat_output_fp = join(output_dir, '%s_stats.txt' % category)
boxplot_output_fp = join(output_dir, '%s_boxplots.pdf' % category)
alpha_diversity_f = open(alpha_diversity_fp, 'U')
mapping_f = open(mapping_fp, 'U')
ttest_result, alphadiv_avgs = \
compare_alpha_diversities(alpha_diversity_f,
mapping_f,
category,
depth,
test_type,
num_permutations)
alpha_diversity_f.close()
mapping_f.close()
corrected_result = _correct_compare_alpha_results(ttest_result,
correction_method)
# write stats results
stat_output_f = open(stat_output_fp, 'w')
header = ('Group1\tGroup2\tGroup1 mean\tGroup1 std\tGroup2 mean\t'
'Group2 std\tt stat\tp-value')
lines = [header]
for (t0, t1), v in corrected_result.items():
lines.append('\t'.join(map(str, [t0,
t1,
alphadiv_avgs[t0][0],
alphadiv_avgs[t0][1],
alphadiv_avgs[t1][0],
alphadiv_avgs[t1][1],
v[0],
v[1]])))
stat_output_f.write('\n'.join(lines) + '\n')
stat_output_f.close()
# write box plots
alpha_diversity_f = open(alpha_diversity_fp, 'U')
mapping_f = open(mapping_fp, 'U')
boxplot = generate_alpha_diversity_boxplots(alpha_diversity_f,
mapping_f,
category,
depth)
alpha_diversity_f.close()
mapping_f.close()
boxplot.savefig(boxplot_output_fp)
if __name__ == "__main__":
main()
| gpl-2.0 | 6,847,303,833,885,605,000 | 50.653846 | 165 | 0.601555 | false |
GameKinger123x/mtasa-blue | vendor/google-breakpad/src/testing/gtest/test/gtest_test_utils.py | 408 | 10444 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| gpl-3.0 | 5,013,386,302,030,868,000 | 33.242623 | 79 | 0.672731 | false |
ettrig/NIPAP | nipap/nipap/errors.py | 2 | 1301 |
class NipapError(Exception):
""" NIPAP base error class.
"""
error_code = 1000
class NipapInputError(NipapError):
""" Erroneous input.
A general input error.
"""
error_code = 1100
class NipapMissingInputError(NipapInputError):
""" Missing input.
Most input is passed in dicts, this could mean a missing key in a dict.
"""
error_code = 1110
class NipapExtraneousInputError(NipapInputError):
""" Extraneous input.
Most input is passed in dicts, this could mean an unknown key in a dict.
"""
error_code = 1120
class NipapNoSuchOperatorError(NipapInputError):
""" A non existent operator was specified.
"""
error_code = 1130
class NipapValueError(NipapError):
""" Something wrong with a value
For example, trying to send an integer when an IP address is expected.
"""
error_code = 1200
class NipapNonExistentError(NipapError):
""" A non existent object was specified
For example, try to get a prefix from a pool which doesn't exist.
"""
error_code = 1300
class NipapDuplicateError(NipapError):
""" The passed object violates unique constraints
For example, create a VRF with a name of an already existing one.
"""
error_code = 1400
| mit | -2,064,641,483,649,590,000 | 18.41791 | 80 | 0.661799 | false |
Subsets and Splits