code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from south.v2 import DataMigration
from zinnia.migrations import user_name
from zinnia.migrations import user_table
from zinnia.migrations import user_orm_label
from zinnia.migrations import user_model_label
class Migration(DataMigration):
def forwards(self, orm):
"""Create the new permission for changing status"""
ct, created = orm['contenttypes.ContentType'].objects.get_or_create(
model='entry', app_label='zinnia')
perm, created = orm['auth.permission'].objects.get_or_create(
content_type=ct, codename='can_change_status',
defaults={'name': 'Can change status'})
def backwards(self, orm):
"""Delete the new permission for changing status"""
ct = orm['contenttypes.ContentType'].objects.get(
model='entry', app_label='zinnia')
perm = orm['auth.permission'].objects.get(
content_type=ct, codename='can_change_status')
perm.delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.message': {
'Meta': {'object_name': 'Message'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_message_set'", 'to': "orm['%s']" % user_orm_label})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': user_name, 'db_table': "'%s'" % user_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'zinnia.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['zinnia.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'zinnia.entry': {
'Meta': {'ordering': "['-creation_date']", 'object_name': 'Entry'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'entries'", 'blank': 'True', 'to': "orm['%s']" % user_orm_label}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'entries'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['zinnia.Category']"}),
'comment_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'end_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2042, 3, 15, 0, 0)'}),
'excerpt': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'pingback_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'related_rel_+'", 'null': 'True', 'to': "orm['zinnia.Entry']"}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'entries'", 'symmetrical': 'False', 'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'start_publication': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tags': ('tagging.fields.TagField', [], {}),
'template': ('django.db.models.fields.CharField', [], {'default': "'zinnia/entry_detail.html'", 'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['contenttypes', 'auth', 'zinnia']
| pczhaoyun/obtainfo | zinnia/migrations/0008_add_status_permission.py | Python | apache-2.0 | 8,642 |
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
max_uint = 0xffffffffl
print max_uint
digits = ['0','0']
radii = ['0','0']
for i in range(2, 37):
p = 1
while i**(p+1) <= max_uint:
p = p+1
print i, p, i**p
digits.append(str(p))
radii.append(str(i**p))
print digits, radii
print ", ".join(digits)
print ", ".join(radii)
| tempbottle/ironpython3 | Src/Scripts/radix_generator.py | Python | apache-2.0 | 1,031 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from airflow.www import app
from tests.test_utils.config import conf_vars
from tests.test_utils.decorators import dont_initialize_flask_app_submodules
@pytest.fixture(scope="session")
def minimal_app_for_api():
@dont_initialize_flask_app_submodules(
skip_all_except=["init_appbuilder", "init_api_experimental_auth", "init_api_connexion"]
)
def factory():
with conf_vars({("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}):
return app.create_app(testing=True) # type:ignore
return factory()
@pytest.fixture
def session():
from airflow.utils.session import create_session
with create_session() as session:
yield session
@pytest.fixture(scope="session")
def dagbag():
from airflow.models import DagBag
DagBag(include_examples=True, read_dags_from_db=False).sync_to_db()
return DagBag(include_examples=True, read_dags_from_db=True)
| apache/incubator-airflow | tests/api_connexion/conftest.py | Python | apache-2.0 | 1,735 |
#!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
import os
import tempfile
import subprocess
import copy
import re
import datetime
# internal modules:
from yotta.lib.fsutils import mkDirP, rmRf
from yotta.lib.detect import systemDefaultTarget
from . import cli
Test_Complex = {
'module.json': '''{
"name": "test-testdep-a",
"version": "0.0.2",
"description": "Module to test test-dependencies",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {
"test-testdep-b": "*",
"test-testdep-c": "*",
"test-testdep-d": "*"
},
"testDependencies": {
"test-testdep-e": "*"
}
}
''',
'source/a.c': '''
#include "a/a.h"
#include "b/b.h"
#include "c/c.h"
#include "d/d.h"
int a(){
return 1 + b() + c() + d(); // 35
}
''',
'source/a.c': '''
#include "a/a.h"
#include "b/b.h"
#include "c/c.h"
#include "d/d.h"
int a(){
return 1 + b() + c() + d(); // 35
}
''',
'a/a.h':'''
#ifndef __A_H__
#define __A_H__
int a();
#endif
''',
'test/check.c': '''
#include <stdio.h>
#include "a/a.h"
#include "b/b.h"
#include "c/c.h"
#include "d/d.h"
#include "e/e.h"
int main(){
int result = a() + b() + c() + d() + e();
printf("%d\\n", result);
return !(result == 86);
}
'''
}
Test_Trivial_Lib = {
'module.json':'''{
"name": "test-trivial-lib",
"version": "0.0.2",
"description": "Module to test trivial lib compilation",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {
}
}''',
'test-trivial-lib/lib.h': '''
int foo();
''',
'source/lib.c':'''
#include "test-trivial-lib/lib.h"
int foo(){
return 7;
}
'''
}
Test_Trivial_Exe = {
'module.json':'''{
"name": "test-trivial-exe",
"version": "0.0.2",
"description": "Module to test trivial exe compilation",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {
},
"bin":"./source"
}''',
'source/lib.c':'''
int main(){
return 0;
}
'''
}
Test_Build_Info = copy.copy(Test_Trivial_Exe)
Test_Build_Info['source/lib.c'] = '''
#include "stdio.h"
#include YOTTA_BUILD_INFO_HEADER
#define STRINGIFY(s) STRINGIFY_INDIRECT(s)
#define STRINGIFY_INDIRECT(s) #s
int main(){
printf("vcs ID: %s\\n", STRINGIFY(YOTTA_BUILD_VCS_ID));
printf("vcs clean: %d\\n", YOTTA_BUILD_VCS_CLEAN);
printf("build UUID: %s\\n", STRINGIFY(YOTTA_BUILD_UUID));
printf(
"build timestamp: %.4d-%.2d-%.2d-%.2d-%.2d-%.2d\\n",
YOTTA_BUILD_YEAR,
YOTTA_BUILD_MONTH,
YOTTA_BUILD_DAY,
YOTTA_BUILD_HOUR,
YOTTA_BUILD_MINUTE,
YOTTA_BUILD_SECOND
);
return 0;
}
'''
Test_Tests = {
'module.json':'''{
"name": "test-tests",
"version": "0.0.0",
"description": "Test yotta's compilation of tests.",
"keywords": [],
"author": "James Crosby <[email protected]>",
"licenses": [
{
"url": "https://spdx.org/licenses/Apache-2.0",
"type": "Apache-2.0"
}
],
"dependencies": {},
"targetDependencies": {}
}''',
'source/foo.c':'''#include "stdio.h"
int foo(){
printf("foo!\\n");
return 7;
}''',
'test-tests/foo.h':'int foo();',
'test/a/bar.c':'#include "test-tests/foo.h"\nint main(){ foo(); return 0; }',
'test/b/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/b/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/c/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/c/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/d/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/d/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/e/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/e/b/a/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/f/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/f/a/b/a/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }',
'test/g/a/a/a/bar.c':'#include "test-tests/foo.h"\nint bar(); int main(){ foo(); bar(); return 0; }',
'test/g/a/a/b/bar.c':'#include "stdio.h"\nint bar(){ printf("bar!\\n"); return 7; }'
}
def isWindows():
# can't run tests that hit github without an authn token
return os.name == 'nt'
class TestCLIBuild(unittest.TestCase):
def writeTestFiles(self, files, add_space_in_path=False):
test_dir = tempfile.mkdtemp()
if add_space_in_path:
test_dir = test_dir + ' spaces in path'
for path, contents in files.items():
path_dir, file_name = os.path.split(path)
path_dir = os.path.join(test_dir, path_dir)
mkDirP(path_dir)
with open(os.path.join(path_dir, file_name), 'w') as f:
f.write(contents)
return test_dir
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildTrivialLib(self):
test_dir = self.writeTestFiles(Test_Trivial_Lib)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildTrivialExe(self):
test_dir = self.writeTestFiles(Test_Trivial_Exe)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildComplex(self):
test_dir = self.writeTestFiles(Test_Complex)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildComplexSpaceInPath(self):
test_dir = self.writeTestFiles(Test_Complex, True)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildTests(self):
test_dir = self.writeTestFiles(Test_Tests, True)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
stdout = self.runCheckCommand(['--target', systemDefaultTarget(), 'test'], test_dir)
self.assertIn('test-a', stdout)
self.assertIn('test-c', stdout)
self.assertIn('test-d', stdout)
self.assertIn('test-e', stdout)
self.assertIn('test-f', stdout)
self.assertIn('test-g', stdout)
rmRf(test_dir)
@unittest.skipIf(isWindows(), "can't build natively on windows yet")
def test_buildInfo(self):
test_dir = self.writeTestFiles(Test_Build_Info, True)
# commit all the test files to git so that the VCS build info gets
# defined:
subprocess.check_call(['git', 'init', '-q'], cwd=test_dir)
subprocess.check_call(['git', 'add', '.'], cwd=test_dir)
subprocess.check_call(['git', 'commit', '-m', 'test build info automated commit', '-q'], cwd=test_dir)
self.runCheckCommand(['--target', systemDefaultTarget(), 'build'], test_dir)
build_time = datetime.datetime.utcnow()
output = subprocess.check_output(['./build/' + systemDefaultTarget().split(',')[0] + '/source/test-trivial-exe'], cwd=test_dir).decode()
self.assertIn('vcs clean: 1', output)
# check build timestamp
self.assertIn('build timestamp: ', output)
build_timestamp_s = re.search('build timestamp: (.*)\n', output)
self.assertTrue(build_timestamp_s)
build_timestamp_s = build_timestamp_s.group(1)
build_time_parsed = datetime.datetime.strptime(build_timestamp_s, '%Y-%m-%d-%H-%M-%S')
build_time_skew = build_time_parsed - build_time
self.assertTrue(abs(build_time_skew.total_seconds()) < 3)
def runCheckCommand(self, args, test_dir):
stdout, stderr, statuscode = cli.run(args, cwd=test_dir)
if statuscode != 0:
print('command failed with status %s' % statuscode)
print(stdout)
print(stderr)
self.assertEqual(statuscode, 0)
return stdout + stderr
| BlackstoneEngineering/yotta | yotta/test/cli/build.py | Python | apache-2.0 | 8,600 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from solum.objects import registry
from solum.objects.sqlalchemy import operation
from solum.tests import base
from solum.tests import utils
class TestOperation(base.BaseTestCase):
def setUp(self):
super(TestOperation, self).setUp()
self.db = self.useFixture(utils.Database())
self.ctx = utils.dummy_context()
self.data = [{'project_id': self.ctx.tenant,
'user_id': 'fred',
'uuid': 'ce43e347f0b0422825245b3e5f140a81cef6e65b',
'name': 'o1',
'description': 'Scale up the resource',
'documentation': 'http://documentation.link',
'target_resource': 'http://target.resource.link'}]
utils.create_models_from_data(operation.Operation, self.data, self.ctx)
def test_objects_registered(self):
self.assertIsNotNone(registry.Operation)
self.assertIsNotNone(registry.OperationList)
def test_get_all(self):
lst = operation.OperationList()
self.assertEqual(1, len(lst.get_all(self.ctx)))
def test_check_data(self):
pl = operation.Operation().get_by_id(self.ctx, self.data[0]['id'])
for key, value in self.data[0].items():
self.assertEqual(value, getattr(pl, key))
| stackforge/solum | solum/tests/objects/test_operation.py | Python | apache-2.0 | 1,849 |
#!/usr/bin/python -u
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import SkipTest
import six
import test.functional as tf
from test.functional import cluster_info
from test.functional.tests import Utils, Base, BaseEnv
from test.functional.swift_test_client import Account, Connection, \
ResponseError
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestDomainRemapEnv(BaseEnv):
domain_remap_enabled = None # tri-state: None initially, then True/False
@classmethod
def setUp(cls):
cls.conn = Connection(tf.config)
cls.conn.authenticate()
if cls.domain_remap_enabled is None:
cls.domain_remap_enabled = 'domain_remap' in cluster_info
if not cls.domain_remap_enabled:
return
cls.account = Account(
cls.conn, tf.config.get('account', tf.config['username']))
cls.account.delete_containers()
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.obj = cls.container.file(Utils.create_name())
cls.obj.write(b'obj contents')
cls.obj_slash = cls.container.file('/v1')
cls.obj_slash.write(b'obj contents')
class TestDomainRemap(Base):
env = TestDomainRemapEnv
set_up = False
def setUp(self):
super(TestDomainRemap, self).setUp()
if self.env.domain_remap_enabled is False:
raise SkipTest("Domain Remap is not enabled")
elif self.env.domain_remap_enabled is not True:
# just some sanity checking
raise Exception(
"Expected domain_remap_enabled to be True/False, got %r" %
(self.env.domain_remap_enabled,))
# domain_remap middleware does not advertise its storage_domain values
# in swift /info responses so a storage_domain must be configured in
# test.conf for these tests to succeed
if not tf.config.get('storage_domain'):
raise SkipTest('Domain Remap storage_domain not configured in %s' %
tf.config['__file__'])
storage_domain = tf.config.get('storage_domain')
self.acct_domain_dash = '%s.%s' % (self.env.account.conn.account_name,
storage_domain)
self.acct_domain_underscore = '%s.%s' % (
self.env.account.conn.account_name.replace('_', '-'),
storage_domain)
self.cont_domain_dash = '%s.%s.%s' % (
self.env.container.name,
self.env.account.conn.account_name,
storage_domain)
self.cont_domain_underscore = '%s.%s.%s' % (
self.env.container.name,
self.env.account.conn.account_name.replace('_', '-'),
storage_domain)
def test_GET_remapped_account(self):
for domain in (self.acct_domain_dash, self.acct_domain_underscore):
self.env.account.conn.make_request('GET', '/',
hdrs={'Host': domain},
cfg={'absolute_path': True})
self.assert_status(200)
body = self.env.account.conn.response.read()
if not six.PY2:
body = body.decode('utf8')
self.assertIn(self.env.container.name, body.split('\n'))
path = '/'.join(['', self.env.container.name])
self.env.account.conn.make_request('GET', path,
hdrs={'Host': domain},
cfg={'absolute_path': True})
self.assert_status(200)
body = self.env.account.conn.response.read()
if not six.PY2:
body = body.decode('utf8')
self.assertIn(self.env.obj.name, body.split('\n'))
self.assertIn(self.env.obj_slash.name, body.split('\n'))
for obj in (self.env.obj, self.env.obj_slash):
path = '/'.join(['', self.env.container.name, obj.name])
self.env.account.conn.make_request('GET', path,
hdrs={'Host': domain},
cfg={'absolute_path': True})
self.assert_status(200)
self.assert_body('obj contents')
def test_PUT_remapped_account(self):
for domain in (self.acct_domain_dash, self.acct_domain_underscore):
# Create a container
new_container_name = Utils.create_name()
path = '/'.join(['', new_container_name])
self.env.account.conn.make_request('PUT', path,
data='new obj contents',
hdrs={'Host': domain},
cfg={'absolute_path': True})
self.assert_status(201)
self.assertIn(new_container_name, self.env.account.containers())
# Create an object
new_obj_name = Utils.create_name()
path = '/'.join(['', self.env.container.name, new_obj_name])
self.env.account.conn.make_request('PUT', path,
data='new obj contents',
hdrs={'Host': domain},
cfg={'absolute_path': True})
self.assert_status(201)
new_obj = self.env.container.file(new_obj_name)
self.assertEqual(new_obj.read(), b'new obj contents')
def test_GET_remapped_container(self):
for domain in (self.cont_domain_dash, self.cont_domain_underscore):
self.env.account.conn.make_request('GET', '/',
hdrs={'Host': domain},
cfg={'absolute_path': True})
self.assert_status(200)
body = self.env.account.conn.response.read()
if not six.PY2:
body = body.decode('utf8')
self.assertIn(self.env.obj.name, body.split('\n'))
self.assertIn(self.env.obj_slash.name, body.split('\n'))
for obj in (self.env.obj, self.env.obj_slash):
path = '/'.join(['', obj.name])
self.env.account.conn.make_request('GET', path,
hdrs={'Host': domain},
cfg={'absolute_path': True})
self.assert_status(200)
self.assert_body('obj contents')
def test_PUT_remapped_container(self):
for domain in (self.cont_domain_dash, self.cont_domain_underscore):
new_obj_name = Utils.create_name()
path = '/'.join(['', new_obj_name])
self.env.account.conn.make_request('PUT', path,
data='new obj contents',
hdrs={'Host': domain},
cfg={'absolute_path': True})
self.assert_status(201)
new_obj = self.env.container.file(new_obj_name)
self.assertEqual(new_obj.read(), b'new obj contents')
| openstack/swift | test/functional/test_domain_remap.py | Python | apache-2.0 | 7,932 |
from __future__ import absolute_import
from contextlib import contextmanager, nested
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.test import TestCase as BaseTestCase
from django.test.client import Client
from django.test.utils import modify_settings, override_settings
from mock import patch
from nose.tools import make_decorator, ok_
AUTHENTICATION_BACKENDS = (
'mozillians.common.tests.authentication.DummyAuthenticationBackend',
)
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
)
ES_INDEXES = {
'default': 'mozillians-test',
'public': 'mozillians-public-test'
}
@override_settings(AUTHENTICATION_BACKENDS=AUTHENTICATION_BACKENDS,
PASSWORD_HASHERS=PASSWORD_HASHERS)
@modify_settings(MIDDLEWARE={'remove': 'mozilla_django_oidc.middleware.RefreshIDToken'})
class TestCase(BaseTestCase):
@contextmanager
def login(self, user):
client = Client()
client.login(email=user.email)
yield client
def requires_login():
def decorate(func):
def newfunc(*args, **kwargs):
with nested(
patch('mozillians.common.middleware.messages.warning'),
patch('mozillians.common.middleware.login_required',
wraps=login_required)) as (messages_mock, login_mock):
func(*args, **kwargs)
ok_(messages_mock.called, 'messages.warning() was not called.')
ok_(login_mock.called, 'login_required() was not called.')
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate
def requires_vouch():
def decorate(func):
def newfunc(*args, **kwargs):
with nested(
patch('mozillians.common.middleware.messages.error'),
patch('mozillians.common.middleware.redirect',
wraps=redirect)) as (messages_mock, redirect_mock):
func(*args, **kwargs)
ok_(messages_mock.called, 'messages.warning() was not called.')
redirect_mock.assert_called_with('phonebook:home')
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate
| akatsoulas/mozillians | mozillians/common/tests/__init__.py | Python | bsd-3-clause | 2,319 |
from django.conf.urls.defaults import *
from piston.resource import Resource
from api_v2.system_handler import SystemHandler
from api_v2.networkadapter_handler import NetworkAdapterHandler
from api_v2.keyvalue_handler import KeyValueHandler
from api_v2.truth_handler import TruthHandler
from api_v2.dhcp_handler import DHCPHandler
from api_v2.reverse_dns_handler import ReverseDNSHandler
from api_v2.system_rack_handler import SystemRackHandler
from api_v2.system_status_handler import SystemStatusHandler
from django.views.decorators.cache import cache_control
cached_resource = cache_control(public=True, maxage=600, s_maxage=600)
systems_handler = Resource(SystemHandler)
network_adapter_handler = Resource(NetworkAdapterHandler)
keyvalue_handler = Resource(KeyValueHandler)
reverse_dns_handler = Resource(ReverseDNSHandler)
dhcp_handler = Resource(DHCPHandler)
system_rack_handler = Resource(SystemRackHandler)
system_status_handler = Resource(SystemStatusHandler)
urlpatterns = patterns('',
url(r'^dhcp/(?P<dhcp_scope>[^/]+)/(?P<dhcp_action>[^/]+)', cached_resource(dhcp_handler)),
url(r'^dhcp/', cached_resource(dhcp_handler)),
url(r'^reverse_dns/(?P<reverse_dns_zone>[^/]+)/(?P<reverse_dns_action>[^/]+)', cached_resource(reverse_dns_handler)),
url(r'^reverse_dns/', cached_resource(reverse_dns_handler)),
url(r'^system/(?P<system_id>[^/]+)/', cached_resource(systems_handler)),
url(r'^systems/', cached_resource(systems_handler)),
url(r'^systemrack/(?P<system_rack_id>[^/]+)/', cached_resource(system_rack_handler)),
url(r'^systemrack/', cached_resource(system_rack_handler)),
url(r'^systemstatus/(?P<system_status_id>[^/]+)/', cached_resource(system_status_handler)),
url(r'^systemstatus/', cached_resource(system_status_handler)),
url(r'^keyvalue/(?P<key_value_id>[^/]+)/', cached_resource(keyvalue_handler)),
url(r'^keyvalue/', cached_resource(keyvalue_handler)),
url(r'^networkadapter/(?P<network_adapter_id>[^/]+)/', cached_resource(network_adapter_handler)),
url(r'^networkadapter/', cached_resource(network_adapter_handler)),
)
| mozilla/inventory | api_v1/urls.py | Python | bsd-3-clause | 2,107 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from operator import itemgetter
import itertools
from datetime import datetime
def format_datetime(dt):
""" Format datetime.datetime object to make HAR validator happy """
return dt.isoformat() + 'Z'
def get_duration(start, end=None):
""" Return duration between `start` and `end` datetimes in HAR format """
if end is None:
end = datetime.utcnow()
elapsed = (end - start).total_seconds()
return int(elapsed * 1000) # ms
def without_private(dct):
return {k: v for (k, v) in dct.items() if not k.startswith('_')}
def entries2pages(entries):
""" Group HAR entries into pages by pageref """
pages = []
for pageref, group in itertools.groupby(entries, key=itemgetter("pageref")):
pages.append(list(group))
return pages
| sunu/splash | splash/har/utils.py | Python | bsd-3-clause | 847 |
from __future__ import absolute_import, print_function
import six
from sentry.testutils import APITestCase
from sentry.testutils.helpers.datetime import iso_format, before_now
from sentry.models import GroupShare
class SharedGroupDetailsTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
min_ago = iso_format(before_now(minutes=1))
event = self.store_event(data={"timestamp": min_ago}, project_id=self.project.id)
group = event.group
share_id = group.get_share_id()
assert share_id is None
GroupShare.objects.create(project_id=group.project_id, group=group)
share_id = group.get_share_id()
assert share_id is not None
url = u"/api/0/shared/issues/{}/".format(share_id)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == six.text_type(group.id)
assert response.data["latestEvent"]["id"] == six.text_type(event.event_id)
assert response.data["project"]["slug"] == group.project.slug
assert response.data["project"]["organization"]["slug"] == group.organization.slug
def test_feature_disabled(self):
self.login_as(user=self.user)
group = self.create_group()
org = group.organization
org.flags.disable_shared_issues = True
org.save()
share_id = group.get_share_id()
assert share_id is None
GroupShare.objects.create(project_id=group.project_id, group=group)
share_id = group.get_share_id()
assert share_id is not None
url = u"/api/0/shared/issues/{}/".format(share_id)
response = self.client.get(url, format="json")
assert response.status_code == 404
def test_permalink(self):
group = self.create_group()
share_id = group.get_share_id()
assert share_id is None
GroupShare.objects.create(project_id=group.project_id, group=group)
share_id = group.get_share_id()
assert share_id is not None
url = u"/api/0/shared/issues/{}/".format(share_id)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert not response.data["permalink"] # not show permalink when not logged in
self.login_as(user=self.user)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["permalink"] # show permalink when logged in
| mvaled/sentry | tests/sentry/api/endpoints/test_shared_group_details.py | Python | bsd-3-clause | 2,593 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
''' Runs various chrome tests through valgrind_test.py.'''
import glob
import logging
import optparse
import os
import subprocess
import sys
import logging_utils
import path_utils
import common
import valgrind_test
class TestNotFound(Exception): pass
class MultipleGTestFiltersSpecified(Exception): pass
class BuildDirNotFound(Exception): pass
class BuildDirAmbiguous(Exception): pass
class ExecutableNotFound(Exception): pass
class BadBinary(Exception): pass
class ChromeTests:
SLOW_TOOLS = ["drmemory"]
def __init__(self, options, args, test):
if ':' in test:
(self._test, self._gtest_filter) = test.split(':', 1)
else:
self._test = test
self._gtest_filter = options.gtest_filter
if self._test not in self._test_list:
raise TestNotFound("Unknown test: %s" % test)
if options.gtest_filter and options.gtest_filter != self._gtest_filter:
raise MultipleGTestFiltersSpecified("Can not specify both --gtest_filter "
"and --test %s" % test)
self._options = options
self._args = args
# Compute the top of the tree (the "source dir") from the script dir
# (where this script lives). We assume that the script dir is in
# tools/drmemory/scripts relative to the top of the tree.
script_dir = os.path.dirname(path_utils.ScriptDir())
self._source_dir = os.path.dirname(os.path.dirname(script_dir))
# Setup Dr. Memory if it's not set up yet.
drmem_cmd = os.getenv("DRMEMORY_COMMAND")
if not drmem_cmd:
drmem_sfx = os.path.join(script_dir, "drmemory-windows-sfx.exe")
if not os.path.isfile(drmem_sfx):
raise RuntimeError, "Cannot find drmemory-windows-sfx.exe"
drmem_dir = os.path.join(script_dir, "unpacked")
subprocess.call([drmem_sfx, "-o" + drmem_dir, "-y"], 0)
drmem_cmd = os.path.join(drmem_dir, "bin", "drmemory.exe")
os.environ["DRMEMORY_COMMAND"] = drmem_cmd
# since this path is used for string matching, make sure it's always
# an absolute Unix-style path
self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')
self._command_preamble = ["--source-dir=%s" % (self._source_dir)]
if not self._options.build_dir:
dirs = [
os.path.join(self._source_dir, "xcodebuild", "Debug"),
os.path.join(self._source_dir, "out", "Debug"),
os.path.join(self._source_dir, "build", "Debug"),
]
build_dir = [d for d in dirs if os.path.isdir(d)]
if len(build_dir) > 1:
raise BuildDirAmbiguous("Found more than one suitable build dir:\n"
"%s\nPlease specify just one "
"using --build-dir" % ", ".join(build_dir))
elif build_dir:
self._options.build_dir = build_dir[0]
else:
self._options.build_dir = None
if self._options.build_dir:
build_dir = os.path.abspath(self._options.build_dir)
self._command_preamble += ["--build-dir=%s" % (self._options.build_dir)]
def _EnsureBuildDirFound(self):
if not self._options.build_dir:
raise BuildDirNotFound("Oops, couldn't find a build dir, please "
"specify it manually using --build-dir")
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
'''Generates the default command array that most tests will use.'''
if exe and common.IsWindows():
exe += '.exe'
cmd = list(self._command_preamble)
# Find all suppressions matching the following pattern:
# tools/valgrind/TOOL/suppressions[_PLATFORM].txt
# and list them with --suppressions= prefix.
script_dir = path_utils.ScriptDir()
suppression_file = os.path.join(script_dir, "..", "suppressions.txt")
if os.path.exists(suppression_file):
cmd.append("--suppressions=%s" % suppression_file)
# Platform-specific suppression
for platform in common.PlatformNames():
platform_suppression_file = \
os.path.join(script_dir, "..", 'suppressions_%s.txt' % platform)
if os.path.exists(platform_suppression_file):
cmd.append("--suppressions=%s" % platform_suppression_file)
if self._options.valgrind_tool_flags:
cmd += self._options.valgrind_tool_flags.split(" ")
if self._options.keep_logs:
cmd += ["--keep_logs"]
if valgrind_test_args != None:
for arg in valgrind_test_args:
cmd.append(arg)
if exe:
self._EnsureBuildDirFound()
exe_path = os.path.join(self._options.build_dir, exe)
if not os.path.exists(exe_path):
raise ExecutableNotFound("Couldn't find '%s'" % exe_path)
cmd.append(exe_path)
# Valgrind runs tests slowly, so slow tests hurt more; show elapased time
# so we can find the slowpokes.
cmd.append("--gtest_print_time")
# Built-in test launcher for gtest-based executables runs tests using
# multiple process by default. Force the single-process mode back.
cmd.append("--single-process-tests")
if self._options.gtest_repeat:
cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
if self._options.gtest_shuffle:
cmd.append("--gtest_shuffle")
if self._options.gtest_break_on_failure:
cmd.append("--gtest_break_on_failure")
if self._options.test_launcher_bot_mode:
cmd.append("--test-launcher-bot-mode")
if self._options.test_launcher_total_shards is not None:
cmd.append("--test-launcher-total-shards=%d" % self._options.test_launcher_total_shards)
if self._options.test_launcher_shard_index is not None:
cmd.append("--test-launcher-shard-index=%d" % self._options.test_launcher_shard_index)
return cmd
def Run(self):
''' Runs the test specified by command-line argument --test '''
logging.info("running test %s" % (self._test))
return self._test_list[self._test](self)
def _AppendGtestFilter(self, tool, name, cmd):
'''Append an appropriate --gtest_filter flag to the googletest binary
invocation.
If the user passed his own filter mentioning only one test, just use it.
Othewise, filter out tests listed in the appropriate gtest_exclude files.
'''
if (self._gtest_filter and
":" not in self._gtest_filter and
"?" not in self._gtest_filter and
"*" not in self._gtest_filter):
cmd.append("--gtest_filter=%s" % self._gtest_filter)
return
filters = []
gtest_files_dir = os.path.join(path_utils.ScriptDir(), "gtest_exclude")
gtest_filter_files = [
os.path.join(gtest_files_dir, name + ".gtest-%s.txt" % tool.ToolName())]
# Use ".gtest.txt" files only for slow tools, as they now contain
# Valgrind- and Dr.Memory-specific filters.
# TODO(glider): rename the files to ".gtest_slow.txt"
if tool.ToolName() in ChromeTests.SLOW_TOOLS:
gtest_filter_files += [os.path.join(gtest_files_dir, name + ".gtest.txt")]
for platform_suffix in common.PlatformNames():
gtest_filter_files += [
os.path.join(gtest_files_dir, name + ".gtest_%s.txt" % platform_suffix),
os.path.join(gtest_files_dir, name + ".gtest-%s_%s.txt" % \
(tool.ToolName(), platform_suffix))]
logging.info("Reading gtest exclude filter files:")
for filename in gtest_filter_files:
# strip the leading absolute path (may be very long on the bot)
# and the following / or \.
readable_filename = filename.replace("\\", "/") # '\' on Windows
readable_filename = readable_filename.replace(self._source_dir, "")[1:]
if not os.path.exists(filename):
logging.info(" \"%s\" - not found" % readable_filename)
continue
logging.info(" \"%s\" - OK" % readable_filename)
f = open(filename, 'r')
for line in f.readlines():
if line.startswith("#") or line.startswith("//") or line.isspace():
continue
line = line.rstrip()
test_prefixes = ["FLAKY", "FAILS"]
for p in test_prefixes:
# Strip prefixes from the test names.
line = line.replace(".%s_" % p, ".")
# Exclude the original test name.
filters.append(line)
if line[-2:] != ".*":
# List all possible prefixes if line doesn't end with ".*".
for p in test_prefixes:
filters.append(line.replace(".", ".%s_" % p))
# Get rid of duplicates.
filters = set(filters)
gtest_filter = self._gtest_filter
if len(filters):
if gtest_filter:
gtest_filter += ":"
if gtest_filter.find("-") < 0:
gtest_filter += "-"
else:
gtest_filter = "-"
gtest_filter += ":".join(filters)
if gtest_filter:
cmd.append("--gtest_filter=%s" % gtest_filter)
@staticmethod
def ShowTests():
test_to_names = {}
for name, test_function in ChromeTests._test_list.iteritems():
test_to_names.setdefault(test_function, []).append(name)
name_to_aliases = {}
for names in test_to_names.itervalues():
names.sort(key=lambda name: len(name))
name_to_aliases[names[0]] = names[1:]
print
print "Available tests:"
print "----------------"
for name, aliases in sorted(name_to_aliases.iteritems()):
if aliases:
print " {} (aka {})".format(name, ', '.join(aliases))
else:
print " {}".format(name)
def SetupLdPath(self, requires_build_dir):
if requires_build_dir:
self._EnsureBuildDirFound()
elif not self._options.build_dir:
return
# Append build_dir to LD_LIBRARY_PATH so external libraries can be loaded.
if (os.getenv("LD_LIBRARY_PATH")):
os.putenv("LD_LIBRARY_PATH", "%s:%s" % (os.getenv("LD_LIBRARY_PATH"),
self._options.build_dir))
else:
os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
def SimpleTest(self, module, name, valgrind_test_args=None, cmd_args=None):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, name, valgrind_test_args)
self._AppendGtestFilter(tool, name, cmd)
cmd.extend(['--test-tiny-timeout=1000'])
if cmd_args:
cmd.extend(cmd_args)
self.SetupLdPath(True)
return tool.Run(cmd, module)
def RunCmdLine(self):
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool, None, self._args)
self.SetupLdPath(False)
return tool.Run(cmd, None)
def TestPDFiumUnitTests(self):
return self.SimpleTest("pdfium_unittests", "pdfium_unittests")
def TestPDFiumEmbedderTests(self):
return self.SimpleTest("pdfium_embeddertests", "pdfium_embeddertests")
def TestPDFiumTest(self, script_name):
# Build the command line in 'cmd'.
# It's going to be roughly
# python valgrind_test.py ...
# but we'll use the --indirect_pdfium_test flag to valgrind_test.py
# to avoid valgrinding python.
# Start by building the valgrind_test.py commandline.
tool = valgrind_test.CreateTool(self._options.valgrind_tool)
cmd = self._DefaultCommand(tool)
cmd.append("--trace_children")
cmd.append("--indirect_pdfium_test")
cmd.append("--ignore_exit_code")
# Now build script_cmd, the run_corpus_tests commandline.
script = os.path.join(self._source_dir, "testing", "tools", script_name)
script_cmd = ["python", script]
if self._options.build_dir:
script_cmd.extend(["--build-dir", self._options.build_dir])
# TODO(zhaoqin): it only runs in single process mode now,
# need figure out why it does not work with test_one_file_parallel
# in run_corpus_tests.py.
if script_name == "run_corpus_tests.py":
script_cmd.extend(["-j", "1"])
# Now run script_cmd with the wrapper in cmd
cmd.append("--")
cmd.extend(script_cmd)
ret = tool.Run(cmd, "layout", min_runtime_in_seconds=0)
return ret
def TestPDFiumJavascript(self):
return self.TestPDFiumTest("run_javascript_tests.py")
def TestPDFiumPixel(self):
return self.TestPDFiumTest("run_pixel_tests.py")
def TestPDFiumCorpus(self):
return self.TestPDFiumTest("run_corpus_tests.py")
# The known list of tests.
_test_list = {
"cmdline" : RunCmdLine,
"pdfium_corpus": TestPDFiumCorpus,
"pdfium_embeddertests": TestPDFiumEmbedderTests,
"pdfium_javascript": TestPDFiumJavascript,
"pdfium_pixel": TestPDFiumPixel,
"pdfium_unittests": TestPDFiumUnitTests,
}
def _main():
parser = optparse.OptionParser("usage: %prog -b <dir> -t <test> "
"[-t <test> ...]")
parser.add_option("--help-tests", dest="help_tests", action="store_true",
default=False, help="List all available tests")
parser.add_option("-b", "--build-dir",
help="the location of the compiler output")
parser.add_option("--target", help="Debug or Release")
parser.add_option("-t", "--test", action="append", default=[],
help="which test to run, supports test:gtest_filter format "
"as well.")
parser.add_option("--gtest_filter",
help="additional arguments to --gtest_filter")
parser.add_option("--gtest_repeat", help="argument for --gtest_repeat")
parser.add_option("--gtest_shuffle", action="store_true", default=False,
help="Randomize tests' orders on every iteration.")
parser.add_option("--gtest_break_on_failure", action="store_true",
default=False,
help="Drop in to debugger on assertion failure. Also "
"useful for forcing tests to exit with a stack dump "
"on the first assertion failure when running with "
"--gtest_repeat=-1")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="verbose output - enable debug log messages")
parser.add_option("--tool", dest="valgrind_tool", default="drmemory_full",
help="specify a valgrind tool to run the tests under")
parser.add_option("--tool_flags", dest="valgrind_tool_flags", default="",
help="specify custom flags for the selected valgrind tool")
parser.add_option("--keep_logs", action="store_true", default=False,
help="store memory tool logs in the <tool>.logs directory "
"instead of /tmp.\nThis can be useful for tool "
"developers/maintainers.\nPlease note that the <tool>"
".logs directory will be clobbered on tool startup.")
parser.add_option("--test-launcher-bot-mode", action="store_true",
help="run the tests with --test-launcher-bot-mode")
parser.add_option("--test-launcher-total-shards", type=int,
help="run the tests with --test-launcher-total-shards")
parser.add_option("--test-launcher-shard-index", type=int,
help="run the tests with --test-launcher-shard-index")
options, args = parser.parse_args()
# Bake target into build_dir.
if options.target and options.build_dir:
assert (options.target !=
os.path.basename(os.path.dirname(options.build_dir)))
options.build_dir = os.path.join(os.path.abspath(options.build_dir),
options.target)
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if options.help_tests:
ChromeTests.ShowTests()
return 0
if not options.test:
parser.error("--test not specified")
if len(options.test) != 1 and options.gtest_filter:
parser.error("--gtest_filter and multiple tests don't make sense together")
for t in options.test:
tests = ChromeTests(options, args, t)
ret = tests.Run()
if ret: return ret
return 0
if __name__ == "__main__":
sys.exit(_main())
| andoma/pdfium | tools/drmemory/scripts/pdfium_tests.py | Python | bsd-3-clause | 16,135 |
from django.contrib import admin
from markedit.widgets import AdminMarkEdit
class MarkEditAdmin(admin.ModelAdmin):
class MarkEdit:
fields = ['text', ]
options = {}
class Media:
css = {'all': (
'//ajax.googleapis.com/ajax/libs/jqueryui/1.10.4/themes/smoothness/jquery-ui.css',
'css/jquery.markedit.css',
)}
js = ('js/jquery.admin.js',
'//ajax.googleapis.com/ajax/libs/jqueryui/1.10.4/jquery-ui.js',
'js/jquery.markedit.js',
'js/showdown.js', )
def formfield_for_dbfield(self, db_field, **kwargs):
formfield = super(MarkEditAdmin, self).formfield_for_dbfield(db_field, **kwargs)
if db_field.name in self.MarkEdit.fields:
formfield.widget = AdminMarkEdit(attrs={
'options': self.MarkEdit.options,
})
return formfield
| SaptakS/pune.pycon.org | markedit/admin.py | Python | bsd-3-clause | 901 |
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
"""
base class for interfaces to indexing engines for pootle
"""
import os
import translate.lang.data
def is_available():
"""Check if this indexing engine interface is usable.
This function must exist in every module that contains indexing engine
interfaces.
:return: is this interface usable?
:rtype: bool
"""
return False
class CommonDatabase(object):
"""Base class for indexing support.
Any real implementation must override most methods of this class.
"""
field_analyzers = {}
"""mapping of field names and analyzers - see
:meth:`~.CommonDatabase.set_field_analyzers`"""
ANALYZER_EXACT = 0
"""exact matching: the query string must equal the whole term string"""
ANALYZER_PARTIAL = 1 << 1
"""partial matching: a document matches, even if the query string only
matches the beginning of the term value."""
ANALYZER_TOKENIZE = 1 << 2
"""tokenize terms and queries automatically"""
ANALYZER_DEFAULT = ANALYZER_TOKENIZE | ANALYZER_PARTIAL
"""the default analyzer to be used if nothing is configured"""
QUERY_TYPE = None
"""override this with the query class of the implementation"""
INDEX_DIRECTORY_NAME = None
"""override this with a string to be used as the name of the indexing
directory/file in the filesystem
"""
def __init__(self, basedir, analyzer=None, create_allowed=True):
"""initialize or open an indexing database
Any derived class must override ``__init__``.
Any implementation can rely on the "self.location" attribute to be set
by the ``__init__`` function of the super class.
:raise ValueError: the given location exists, but the database type
is incompatible (e.g. created by a different
indexing engine)
:raise OSError: the database failed to initialize
:param basedir: the parent directory of the database
:type basedir: str
:param analyzer: bitwise combination of possible analyzer flags
to be used as the default analyzer for this
database. Leave it empty to use the system
default analyzer (``self.ANALYZER_DEFAULT``).
see :attr:`CommonDatabase.ANALYZER_TOKENIZE`,
:attr:`CommonDatabase.ANALYZER_PARTIAL`, ...
:type analyzer: int
:param create_allowed: create the database, if necessary.
:type create_allowed: bool
"""
# just do some checks
if self.QUERY_TYPE is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'QUERY_TYPE' is undefined")
if self.INDEX_DIRECTORY_NAME is None:
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'INDEX_DIRECTORY_NAME' is undefined")
self.location = os.path.join(basedir, self.INDEX_DIRECTORY_NAME)
if (not create_allowed) and (not os.path.exists(self.location)):
raise OSError("Indexer: the database does not exist - and I am" \
+ " not configured to create it.")
if analyzer is None:
self.analyzer = self.ANALYZER_DEFAULT
else:
self.analyzer = analyzer
self.field_analyzers = {}
def flush(self, optimize=False):
"""Flush the content of the database - to force changes to be written
to disk.
Some databases also support index optimization.
:param optimize: should the index be optimized if possible?
:type optimize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'flush' is missing")
def make_query(self, args, require_all=True, analyzer=None):
"""Create simple queries (strings or field searches) or
combine multiple queries (AND/OR).
To specifiy rules for field searches, you may want to take a look at
:meth:`~.CommonDatabase.set_field_analyzers`. The parameter
'match_text_partial' can override the previously defined
default setting.
:param args: queries or search string or description of field query
examples::
[xapian.Query("foo"), xapian.Query("bar")]
xapian.Query("foo")
"bar"
{"foo": "bar", "foobar": "foo"}
:type args: list of queries | single query | str | dict
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: boolean
:param analyzer: (only applicable for 'dict' or 'str')
Define query options (partial matching, exact
matching, tokenizing, ...) as bitwise
combinations of *CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is ``None`` (default), then the
configured analyzer for the field is used.
:type analyzer: int
:return: the combined query
:rtype: query type of the specific implementation
"""
# turn a dict into a list if necessary
if isinstance(args, dict):
args = args.items()
# turn 'args' into a list if necessary
if not isinstance(args, list):
args = [args]
# combine all given queries
result = []
for query in args:
# just add precompiled queries
if isinstance(query, self.QUERY_TYPE):
result.append(self._create_query_for_query(query))
# create field/value queries out of a tuple
elif isinstance(query, tuple):
field, value = query
# perform unicode normalization
field = translate.lang.data.normalize(unicode(field))
value = translate.lang.data.normalize(unicode(value))
# check for the choosen match type
if analyzer is None:
analyzer = self.get_field_analyzers(field)
result.append(self._create_query_for_field(field, value,
analyzer=analyzer))
# parse plaintext queries
elif isinstance(query, basestring):
if analyzer is None:
analyzer = self.analyzer
# perform unicode normalization
query = translate.lang.data.normalize(unicode(query))
result.append(self._create_query_for_string(query,
require_all=require_all, analyzer=analyzer))
else:
# other types of queries are not supported
raise ValueError("Unable to handle query type: %s" \
% str(type(query)))
# return the combined query
return self._create_query_combined(result, require_all)
def _create_query_for_query(self, query):
"""Generate a query based on an existing query object.
Basically this function should just create a copy of the original.
:param query: the original query object
:type query: ``xapian.Query``
:return: the resulting query object
:rtype: ``xapian.Query`` | ``PyLucene.Query``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_query' is missing")
def _create_query_for_string(self, text, require_all=True,
analyzer=None):
"""Generate a query for a plain term of a string query.
Basically this function parses the string and returns the resulting
query.
:param text: the query string
:type text: str
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: resulting query object
:rtype: xapian.Query | PyLucene.Query
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_string' is missing")
def _create_query_for_field(self, field, value, analyzer=None):
"""Generate a field query.
This functions creates a field->value query.
:param field: the fieldname to be used
:type field: str
:param value: the wanted value of the field
:type value: str
:param analyzer: Define query options (partial matching, exact matching,
tokenizing, ...) as bitwise combinations of
*CommonIndexer.ANALYZER_???*.
This can override previously defined field
analyzer settings.
If analyzer is None (default), then the configured
analyzer for the field is used.
:type analyzer: int
:return: resulting query object
:rtype: ``xapian.Query`` | ``PyLucene.Query``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_for_field' is missing")
def _create_query_combined(self, queries, require_all=True):
"""generate a combined query
:param queries: list of the original queries
:type queries: list of xapian.Query
:param require_all: boolean operator
(True -> AND (default) / False -> OR)
:type require_all: bool
:return: the resulting combined query object
:rtype: ``xapian.Query`` | ``PyLucene.Query``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_query_combined' is missing")
def index_document(self, data):
"""Add the given data to the database.
:param data: the data to be indexed.
A dictionary will be treated as ``fieldname:value``
combinations.
If the fieldname is None then the value will be
interpreted as a plain term or as a list of plain terms.
Lists of terms are indexed separately.
Lists of strings are treated as plain terms.
:type data: dict | list of str
"""
doc = self._create_empty_document()
if isinstance(data, dict):
data = data.items()
# add all data
for dataset in data:
if isinstance(dataset, tuple):
# the dataset tuple consists of '(key, value)'
key, value = dataset
if key is None:
if isinstance(value, list):
terms = value[:]
elif isinstance(value, basestring):
terms = [value]
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
for one_term in terms:
self._add_plain_term(doc, self._decode(one_term),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
analyze_settings = self.get_field_analyzers(key)
# handle multiple terms
if not isinstance(value, list):
value = [value]
for one_term in value:
self._add_field_term(doc, key, self._decode(one_term),
(analyze_settings & self.ANALYZER_TOKENIZE > 0))
elif isinstance(dataset, basestring):
self._add_plain_term(doc, self._decode(dataset),
(self.ANALYZER_DEFAULT & self.ANALYZER_TOKENIZE > 0))
else:
raise ValueError("Invalid data type to be indexed: %s" \
% str(type(data)))
self._add_document_to_index(doc)
def _create_empty_document(self):
"""Create an empty document to be filled and added to the index later.
:return: the new document object
:rtype: ``xapian.Document`` | ``PyLucene.Document``
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_create_empty_document' is missing")
def _add_plain_term(self, document, term, tokenize=True):
"""Add a term to a document.
:param document: the document to be changed
:type document: ``xapian.Document`` | ``PyLucene.Document``
:param term: a single term to be added
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_plain_term' is missing")
def _add_field_term(self, document, field, term, tokenize=True):
"""Add a field term to a document.
:param document: the document to be changed
:type document: ``xapian.Document`` | ``PyLucene.Document``
:param field: name of the field
:type field: str
:param term: term to be associated to the field
:type term: str
:param tokenize: should the term be tokenized automatically
:type tokenize: bool
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_field_term' is missing")
def _add_document_to_index(self, document):
"""Add a prepared document to the index database.
:param document: the document to be added
:type document: xapian.Document | PyLucene.Document
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'_add_document_to_index' is missing")
def begin_transaction(self):
"""begin a transaction
You can group multiple modifications of a database as a transaction.
This prevents time-consuming database flushing and helps, if you want
that a changeset is committed either completely or not at all.
No changes will be written to disk until 'commit_transaction'.
'cancel_transaction' can be used to revert an ongoing transaction.
Database types that do not support transactions may silently ignore it.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'begin_transaction' is missing")
def cancel_transaction(self):
"""cancel an ongoing transaction
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'cancel_transaction' is missing")
def commit_transaction(self):
"""Submit the currently ongoing transaction and write changes to disk.
See 'start_transaction' for details.
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'commit_transaction' is missing")
def get_query_result(self, query):
"""return an object containing the results of a query
:param query: a pre-compiled query
:type query: a query object of the real implementation
:return: an object that allows access to the results
:rtype: subclass of CommonEnquire
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'get_query_result' is missing")
def delete_document_by_id(self, docid):
"""Delete a specified document.
:param docid: the document ID to be deleted
:type docid: int
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'delete_document_by_id' is missing")
def search(self, query, fieldnames):
"""Return a list of the contents of specified fields for all
matches of a query.
:param query: the query to be issued
:type query: a query object of the real implementation
:param fieldnames: the name(s) of a field of the document content
:type fieldnames: string | list of strings
:return: a list of dicts containing the specified field(s)
:rtype: list of dicts
"""
raise NotImplementedError("Incomplete indexer implementation: " \
+ "'search' is missing")
def delete_doc(self, ident):
"""Delete the documents returned by a query.
:param ident: [list of] document IDs | dict describing a query | query
:type ident: int | list of tuples | dict | list of dicts |
query (e.g. xapian.Query) | list of queries
"""
# turn a doc-ID into a list of doc-IDs
if isinstance(ident, list):
# it is already a list
ident_list = ident
else:
ident_list = [ident]
if len(ident_list) == 0:
# no matching items
return 0
if isinstance(ident_list[0], int) or isinstance(ident_list[0], long):
# create a list of IDs of all successfully removed documents
success_delete = [match for match in ident_list
if self.delete_document_by_id(match)]
return len(success_delete)
if isinstance(ident_list[0], dict):
# something like: { "msgid": "foobar" }
# assemble all queries
query = self.make_query([self.make_query(query_dict,
require_all=True) for query_dict in ident_list],
require_all=True)
elif isinstance(ident_list[0], object):
# assume a query object (with 'AND')
query = self.make_query(ident_list, require_all=True)
else:
# invalid element type in list (not necessarily caught in the
# lines above)
raise TypeError("description of documents to-be-deleted is not " \
+ "supported: list of %s" % type(ident_list[0]))
# we successfully created a query - now iterate through the result
# no documents deleted so far ...
remove_list = []
# delete all resulting documents step by step
def add_docid_to_list(match):
"""Collect every document ID."""
remove_list.append(match["docid"])
self._walk_matches(query, add_docid_to_list)
return self.delete_doc(remove_list)
def _walk_matches(self, query, function, arg_for_function=None):
"""Use this function if you want to do something with every single match
of a query.
Example::
self._walk_matches(query, function_for_match, arg_for_func)
*function_for_match* expects only one argument: the matched object
:param query: a query object of the real implementation
:type query: xapian.Query | PyLucene.Query
:param function: the function to execute with every match
:type function: function
:param arg_for_function: an optional argument for the function
:type arg_for_function: anything
"""
# execute the query
enquire = self.get_query_result(query)
# start with the first element
start = 0
# do the loop at least once
size, avail = (0, 1)
# how many results per 'get_matches'?
steps = 2
while start < avail:
(size, avail, matches) = enquire.get_matches(start, steps)
for match in matches:
if arg_for_function is None:
function(match)
else:
function(match, arg_for_function)
start += size
def set_field_analyzers(self, field_analyzers):
"""Set the analyzers for different fields of the database documents.
All bitwise combinations of *CommonIndexer.ANALYZER_???* are possible.
:param field_analyzers: mapping of field names and analyzers
:type field_analyzers: dict containing field names and analyzers
:raise TypeError: invalid values in *field_analyzers*
"""
for field, analyzer in field_analyzers.items():
# check for invald input types
if not isinstance(field, (str, unicode)):
raise TypeError("field name must be a string")
if not isinstance(analyzer, int):
raise TypeError("the analyzer must be a whole number (int)")
# map the analyzer to the field name
self.field_analyzers[field] = analyzer
def get_field_analyzers(self, fieldnames=None):
"""Return the analyzer that was mapped to a specific field.
See :meth:`~.CommonDatabase.set_field_analyzers` for details.
:param fieldnames: the analyzer of this field (or all/multiple fields)
is requested; leave empty (or *None*) to
request all fields.
:type fieldnames: str | list of str | None
:return: The analyzer setting of the field - see
*CommonDatabase.ANALYZER_???* or a dict of field names
and analyzers
:rtype: int | dict
"""
# all field analyzers are requested
if fieldnames is None:
# return a copy
return dict(self.field_analyzers)
# one field is requested
if isinstance(fieldnames, (str, unicode)):
if fieldnames in self.field_analyzers:
return self.field_analyzers[fieldnames]
else:
return self.analyzer
# a list of fields is requested
if isinstance(fieldnames, list):
result = {}
for field in fieldnames:
result[field] = self.get_field_analyzers(field)
return result
return self.analyzer
def _decode(self, text):
"""Decode the string from utf-8 or charmap perform
unicode normalization."""
if isinstance(text, str):
try:
result = unicode(text.decode("UTF-8"))
except UnicodeEncodeError, e:
result = unicode(text.decode("charmap"))
elif not isinstance(text, unicode):
result = unicode(text)
else:
result = text
# perform unicode normalization
return translate.lang.data.normalize(result)
class CommonEnquire(object):
"""An enquire object contains the information about the result of a request.
"""
def __init__(self, enquire):
"""Intialization of a wrapper around enquires of different backends
:param enquire: a previous enquire
:type enquire: xapian.Enquire | pylucene-enquire
"""
self.enquire = enquire
def get_matches(self, start, number):
"""Return a specified number of qualified matches of a previous query.
:param start: index of the first match to return (starting from zero)
:type start: int
:param number: the number of matching entries to return
:type number: int
:return: a set of matching entries and some statistics
:rtype: tuple of (returned number, available number, matches)
"matches" is a dictionary of::
["rank", "percent", "document", "docid"]
"""
raise NotImplementedError("Incomplete indexing implementation: " \
+ "'get_matches' for the 'Enquire' class is missing")
def get_matches_count(self):
"""Return the estimated number of matches.
Use :meth:`translate.search.indexing.CommonIndexer.search`
to retrieve the exact number of matches
:return: The estimated number of matches
:rtype: int
"""
(returned, estimate_count, matches) = self.get_matches(0, 1)
return estimate_count
| staranjeet/fjord | vendor/packages/translate-toolkit/translate/search/indexing/CommonIndexer.py | Python | bsd-3-clause | 25,337 |
#!/usr/bin/python3
# Halide tutorial lesson 13: Tuples
# This lesson describes how to write Funcs that evaluate to multiple
# values.
# On linux, you can compile and run it like so:
# g++ lesson_13*.cpp -g -I ../include -L ../bin -lHalide -lpthread -ldl -o lesson_13 -std=c++11
# LD_LIBRARY_PATH=../bin ./lesson_13
# On os x:
# g++ lesson_13*.cpp -g -I ../include -L ../bin -lHalide -o lesson_13 -std=c++11
# DYLD_LIBRARY_PATH=../bin ./lesson_13
# If you have the entire Halide source tree, you can also build it by
# running:
# make tutorial_lesson_13_tuples
# in a shell with the current directory at the top of the halide
# source tree.
#include "Halide.h"
#include <stdio.h>
#include <algorithm>
#using namespace Halide
from halide import *
import numpy
import math
min_, max_ = __builtins__.min, __builtins__.max
def main():
# So far Funcs (such as the one below) have evaluated to a single
# scalar value for each point in their domain.
single_valued = Func()
x, y = Var("x"), Var("y")
single_valued[x, y] = x + y
# One way to write a Func that returns a collection of values is
# to add an additional dimension which indexes that
# collection. This is how we typically deal with color. For
# example, the Func below represents a collection of three values
# for every x, y coordinate indexed by c.
color_image = Func()
c = Var("c")
color_image[x, y, c] = select(c == 0, 245, # Red value
c == 1, 42, # Green value
132) # Blue value
# This method is often convenient because it makes it easy to
# operate on this Func in a way that treats each item in the
# collection equally:
brighter = Func()
brighter[x, y, c] = color_image[x, y, c] + 10
# However this method is also inconvenient for three reasons.
#
# 1) Funcs are defined over an infinite domain, so users of this
# Func can for example access color_image(x, y, -17), which is
# not a meaningful value and is probably indicative of a bug.
#
# 2) It requires a select, which can impact performance if not
# bounded and unrolled:
# brighter.bound(c, 0, 3).unroll(c)
#
# 3) With this method, all values in the collection must have the
# same type. While the above two issues are merely inconvenient,
# this one is a hard limitation that makes it impossible to
# express certain things in this way.
# It is also possible to represent a collection of values as a
# collection of Funcs:
func_array = [Func() for i in range(3)]
func_array[0][x, y] = x + y
func_array[1][x, y] = sin(x)
func_array[2][x, y] = cos(y)
# This method avoids the three problems above, but introduces a
# new annoyance. Because these are separate Funcs, it is
# difficult to schedule them so that they are all computed
# together inside a single loop over x, y.
# A third alternative is to define a Func as evaluating to a
# Tuple instead of an Expr. A Tuple is a fixed-size collection of
# Exprs which may have different type. The following function
# evaluates to an integer value (x+y), and a floating point value
# (sin(x*y)).
multi_valued = Func("multi_valued")
multi_valued[x, y] = (x + y, sin(x * y))
# Realizing a tuple-valued Func returns a collection of
# Buffers. We call this a Realization. It's equivalent to a
# std::vector of Buffer/Image objects:
if True:
(im1, im2) = multi_valued.realize(80, 60)
assert type(im1) is Buffer_int32
assert type(im2) is Buffer_float32
assert im1(30, 40) == 30 + 40
assert numpy.isclose(im2(30, 40), math.sin(30 * 40))
# All Tuple elements are evaluated together over the same domain
# in the same loop nest, but stored in distinct allocations. The
# equivalent C++ code to the above is:
if True:
multi_valued_0 = numpy.empty((80*60), dtype=numpy.int32)
multi_valued_1 = numpy.empty((80*60), dtype=numpy.int32)
for yy in range(80):
for xx in range(60):
multi_valued_0[xx + 60*yy] = xx + yy
multi_valued_1[xx + 60*yy] = math.sin(xx*yy)
# When compiling ahead-of-time, a Tuple-valued Func evaluates
# into multiple distinct output buffer_t structs. These appear in
# order at the end of the function signature:
# int multi_valued(...input buffers and params..., buffer_t *output_1, buffer_t *output_2)
# You can construct a Tuple by passing multiple Exprs to the
# Tuple constructor as we did above. Perhaps more elegantly, you
# can also take advantage of C++11 initializer lists and just
# enclose your Exprs in braces:
multi_valued_2 = Func("multi_valued_2")
multi_valued_2[x, y] = (x + y, sin(x * y))
# Calls to a multi-valued Func cannot be treated as Exprs. The
# following is a syntax error:
# Func consumer
# consumer[x, y] = multi_valued_2[x, y] + 10
# Instead you must index the returned object with square brackets
# to retrieve the individual Exprs:
integer_part = multi_valued_2[x, y][0]
floating_part = multi_valued_2[x, y][1]
assert type(integer_part) is FuncTupleElementRef
assert type(floating_part) is FuncTupleElementRef
consumer = Func()
consumer[x, y] = (integer_part + 10, floating_part + 10.0)
# Tuple reductions.
if True:
# Tuples are particularly useful in reductions, as they allow
# the reduction to maintain complex state as it walks along
# its domain. The simplest example is an argmax.
# First we create an Image to take the argmax over.
input_func = Func()
input_func[x] = sin(x)
input = input_func.realize(100)
assert type(input) is Buffer_float32
# Then we defined a 2-valued Tuple which tracks the maximum value
# its index.
arg_max = Func()
# Pure definition.
# (using [()] for zero-dimensional Funcs is a convention of this python interface)
arg_max[()] = (0, input(0))
# Update definition.
r = RDom(1, 99)
old_index = arg_max[()][0]
old_max = arg_max[()][1]
new_index = select(old_max > input[r], r, old_index)
new_max = max(input[r], old_max)
arg_max[()] = (new_index, new_max)
# The equivalent C++ is:
arg_max_0 = 0
arg_max_1 = float(input(0))
for r in range(1, 100):
old_index = arg_max_0
old_max = arg_max_1
new_index = r if (old_max > input(r)) else old_index
new_max = max_(input(r), old_max)
# In a tuple update definition, all loads and computation
# are done before any stores, so that all Tuple elements
# are updated atomically with respect to recursive calls
# to the same Func.
arg_max_0 = new_index
arg_max_1 = new_max
# Let's verify that the Halide and C++ found the same maximum
# value and index.
if True:
(r0, r1) = arg_max.realize()
assert type(r0) is Buffer_int32
assert type(r1) is Buffer_float32
assert arg_max_0 == r0(0)
assert numpy.isclose(arg_max_1, r1(0))
# Halide provides argmax and argmin as built-in reductions
# similar to sum, product, maximum, and minimum. They return
# a Tuple consisting of the point in the reduction domain
# corresponding to that value, and the value itself. In the
# case of ties they return the first value found. We'll use
# one of these in the following section.
# Tuples for user-defined types.
if True:
# Tuples can also be a convenient way to represent compound
# objects such as complex numbers. Defining an object that
# can be converted to and from a Tuple is one way to extend
# Halide's type system with user-defined types.
class Complex:
def __init__(self, r, i=None):
if type(r) is float and type(i) is float:
self.real = Expr(r)
self.imag = Expr(i)
elif i is not None:
self.real = r
self.imag = i
else:
self.real = r[0]
self.imag = r[1]
def as_tuple(self):
"Convert to a Tuple"
return (self.real, self.imag)
def __add__(self, other):
"Complex addition"
return Complex(self.real + other.real, self.imag + other.imag)
def __mul__(self, other):
"Complex multiplication"
return Complex(self.real * other.real - self.imag * other.imag,
self.real * other.imag + self.imag * other.real)
def __getitem__(self, idx):
return (self.real, self.imag)[idx]
def __len__(self):
return 2
def magnitude(self):
"Complex magnitude"
return (self.real * self.real) + (self.imag * self.imag)
# Other complex operators would go here. The above are
# sufficient for this example.
# Let's use the Complex struct to compute a Mandelbrot set.
mandelbrot = Func()
# The initial complex value corresponding to an x, y coordinate
# in our Func.
initial = Complex(x/15.0 - 2.5, y/6.0 - 2.0)
# Pure definition.
t = Var("t")
mandelbrot[x, y, t] = Complex(0.0, 0.0)
# We'll use an update definition to take 12 steps.
r = RDom(1, 12)
current = Complex(mandelbrot[x, y, r-1])
# The following line uses the complex multiplication and
# addition we defined above.
mandelbrot[x, y, r] = (Complex(current*current) + initial)
# We'll use another tuple reduction to compute the iteration
# number where the value first escapes a circle of radius 4.
# This can be expressed as an argmin of a boolean - we want
# the index of the first time the given boolean expression is
# false (we consider false to be less than true). The argmax
# would return the index of the first time the expression is
# true.
escape_condition = Complex(mandelbrot[x, y, r]).magnitude() < 16.0
first_escape = argmin(escape_condition)
assert type(first_escape) is tuple
# We only want the index, not the value, but argmin returns
# both, so we'll index the argmin Tuple expression using
# square brackets to get the Expr representing the index.
escape = Func()
escape[x, y] = first_escape[0]
# Realize the pipeline and print the result as ascii art.
result = escape.realize(61, 25)
assert type(result) is Buffer_int32
code = " .:-~*={&%#@"
for yy in range(result.height()):
for xx in range(result.width()):
index = result(xx, yy)
if index < len(code):
print("%c" % code[index], end="")
else:
pass # is lesson 13 cpp version buggy ?
print("\n")
print("Success!")
return 0
if __name__ == "__main__":
main()
| kgnk/Halide | python_bindings/tutorial/lesson_13_tuples.py | Python | mit | 11,459 |
from _pytest.pytester import Pytester
def test_show_fixtures_and_test(
pytester: Pytester, dummy_yaml_custom_test: None
) -> None:
"""Verify that fixtures are not executed."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture
def arg():
assert False
def test_arg(arg):
assert False
"""
)
result = pytester.runpytest("--setup-plan")
assert result.ret == 0
result.stdout.fnmatch_lines(
["*SETUP F arg*", "*test_arg (fixtures used: arg)", "*TEARDOWN F arg*"]
)
def test_show_multi_test_fixture_setup_and_teardown_correctly_simple(
pytester: Pytester,
) -> None:
"""Verify that when a fixture lives for longer than a single test, --setup-plan
correctly displays the SETUP/TEARDOWN indicators the right number of times.
As reported in https://github.com/pytest-dev/pytest/issues/2049
--setup-plan was showing SETUP/TEARDOWN on every test, even when the fixture
should persist through multiple tests.
(Note that this bug never affected actual test execution, which used the
correct fixture lifetimes. It was purely a display bug for --setup-plan, and
did not affect the related --setup-show or --setup-only.)
"""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope = 'class')
def fix():
return object()
class TestClass:
def test_one(self, fix):
assert False
def test_two(self, fix):
assert False
"""
)
result = pytester.runpytest("--setup-plan")
assert result.ret == 0
setup_fragment = "SETUP C fix"
setup_count = 0
teardown_fragment = "TEARDOWN C fix"
teardown_count = 0
for line in result.stdout.lines:
if setup_fragment in line:
setup_count += 1
if teardown_fragment in line:
teardown_count += 1
# before the fix this tests, there would have been a setup/teardown
# message for each test, so the counts would each have been 2
assert setup_count == 1
assert teardown_count == 1
def test_show_multi_test_fixture_setup_and_teardown_same_as_setup_show(
pytester: Pytester,
) -> None:
"""Verify that SETUP/TEARDOWN messages match what comes out of --setup-show."""
pytester.makepyfile(
"""
import pytest
@pytest.fixture(scope = 'session')
def sess():
return True
@pytest.fixture(scope = 'module')
def mod():
return True
@pytest.fixture(scope = 'class')
def cls():
return True
@pytest.fixture(scope = 'function')
def func():
return True
def test_outside(sess, mod, cls, func):
assert True
class TestCls:
def test_one(self, sess, mod, cls, func):
assert True
def test_two(self, sess, mod, cls, func):
assert True
"""
)
plan_result = pytester.runpytest("--setup-plan")
show_result = pytester.runpytest("--setup-show")
# the number and text of these lines should be identical
plan_lines = [
line
for line in plan_result.stdout.lines
if "SETUP" in line or "TEARDOWN" in line
]
show_lines = [
line
for line in show_result.stdout.lines
if "SETUP" in line or "TEARDOWN" in line
]
assert plan_lines == show_lines
| nicoddemus/pytest | testing/test_setupplan.py | Python | mit | 3,498 |
from authorize import Configuration
class BankAccount(object):
@staticmethod
def create(customer_id, params={}):
return Configuration.api.bank_account.create(customer_id, params)
@staticmethod
def details(customer_id, payment_id):
return Configuration.api.bank_account.details(customer_id, payment_id)
@staticmethod
def update(customer_id, payment_id, params={}):
return Configuration.api.bank_account.update(customer_id, payment_id, params)
@staticmethod
def delete(customer_id, payment_id):
return Configuration.api.bank_account.delete(customer_id, payment_id)
| aryeh/py-authorize | authorize/bank_account.py | Python | mit | 633 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import datetime
from frappe.utils import formatdate, fmt_money, flt, cstr, cint, format_datetime
from frappe.model.meta import get_field_currency, get_field_precision
import re
def format_value(value, df=None, doc=None, currency=None, translated=False):
'''Format value based on given fieldtype, document reference, currency reference.
If docfield info (df) is not given, it will try and guess based on the datatype of the value'''
if isinstance(df, basestring):
df = frappe._dict(fieldtype=df)
if not df:
df = frappe._dict()
if isinstance(value, datetime.datetime):
df.fieldtype = 'Datetime'
elif isinstance(value, datetime.date):
df.fieldtype = 'Date'
elif isinstance(value, int):
df.fieldtype = 'Int'
elif isinstance(value, float):
df.fieldtype = 'Float'
else:
df.fieldtype = 'Data'
elif (isinstance(df, dict)):
# Convert dict to object if necessary
df = frappe._dict(df)
if value is None:
value = ""
elif translated:
value = frappe._(value)
if not df:
return value
elif df.get("fieldtype")=="Date":
return formatdate(value)
elif df.get("fieldtype")=="Datetime":
return format_datetime(value)
elif value==0 and df.get("fieldtype") in ("Int", "Float", "Currency", "Percent") and df.get("print_hide_if_no_value"):
# this is required to show 0 as blank in table columns
return ""
elif df.get("fieldtype") == "Currency" or (df.get("fieldtype")=="Float" and (df.options or "").strip()):
return fmt_money(value, precision=get_field_precision(df, doc),
currency=currency if currency else (get_field_currency(df, doc) if doc else None))
elif df.get("fieldtype") == "Float":
precision = get_field_precision(df, doc)
# show 1.000000 as 1
# options should not specified
if not df.options and value is not None:
temp = cstr(value).split(".")
if len(temp)==1 or cint(temp[1])==0:
precision = 0
return fmt_money(value, precision=precision)
elif df.get("fieldtype") == "Percent":
return "{}%".format(flt(value, 2))
elif df.get("fieldtype") in ("Text", "Small Text"):
if not re.search("(\<br|\<div|\<p)", value):
return value.replace("\n", "<br>")
return value
| elba7r/builder | frappe/utils/formatters.py | Python | mit | 2,313 |
# coding=utf-8
from kivy.lang import Builder
from kivy.properties import StringProperty, BooleanProperty, ObjectProperty, \
NumericProperty, ListProperty, OptionProperty
from kivy.uix.behaviors import ButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivymd.ripplebehavior import RectangularRippleBehavior
from kivymd.theming import ThemableBehavior
Builder.load_string("""
<SmartTile>
_img_widget: img
_img_overlay: img_overlay
_box_overlay: box
AsyncImage:
id: img
allow_stretch: root.allow_stretch
anim_delay: root.anim_delay
anim_loop: root.anim_loop
color: root.img_color
keep_ratio: root.keep_ratio
mipmap: root.mipmap
source: root.source
size_hint_y: 1 if root.overlap else None
x: root.x
y: root.y if root.overlap or root.box_position == 'header' else box.top
BoxLayout:
id: img_overlay
size_hint: img.size_hint
size: img.size
pos: img.pos
BoxLayout:
canvas:
Color:
rgba: root.box_color
Rectangle:
pos: self.pos
size: self.size
id: box
size_hint_y: None
height: dp(68) if root.lines == 2 else dp(48)
x: root.x
y: root.y if root.box_position == 'footer' else root.y + root.height - self.height
<SmartTileWithLabel>
_img_widget: img
_img_overlay: img_overlay
_box_overlay: box
_box_label: boxlabel
AsyncImage:
id: img
allow_stretch: root.allow_stretch
anim_delay: root.anim_delay
anim_loop: root.anim_loop
color: root.img_color
keep_ratio: root.keep_ratio
mipmap: root.mipmap
source: root.source
size_hint_y: 1 if root.overlap else None
x: root.x
y: root.y if root.overlap or root.box_position == 'header' else box.top
BoxLayout:
id: img_overlay
size_hint: img.size_hint
size: img.size
pos: img.pos
BoxLayout:
canvas:
Color:
rgba: root.box_color
Rectangle:
pos: self.pos
size: self.size
id: box
size_hint_y: None
height: dp(68) if root.lines == 2 else dp(48)
x: root.x
y: root.y if root.box_position == 'footer' else root.y + root.height - self.height
MDLabel:
id: boxlabel
font_style: "Caption"
halign: "center"
text: root.text
""")
class Tile(ThemableBehavior, RectangularRippleBehavior, ButtonBehavior,
BoxLayout):
"""A simple tile. It does nothing special, just inherits the right behaviors
to work as a building block.
"""
pass
class SmartTile(ThemableBehavior, RectangularRippleBehavior, ButtonBehavior,
FloatLayout):
"""A tile for more complex needs.
Includes an image, a container to place overlays and a box that can act
as a header or a footer, as described in the Material Design specs.
"""
box_color = ListProperty([0, 0, 0, 0.5])
"""Sets the color and opacity for the information box."""
box_position = OptionProperty('footer', options=['footer', 'header'])
"""Determines wether the information box acts as a header or footer to the
image.
"""
lines = OptionProperty(1, options=[1, 2])
"""Number of lines in the header/footer.
As per Material Design specs, only 1 and 2 are valid values.
"""
overlap = BooleanProperty(True)
"""Determines if the header/footer overlaps on top of the image or not"""
# Img properties
allow_stretch = BooleanProperty(True)
anim_delay = NumericProperty(0.25)
anim_loop = NumericProperty(0)
img_color = ListProperty([1, 1, 1, 1])
keep_ratio = BooleanProperty(False)
mipmap = BooleanProperty(False)
source = StringProperty()
_img_widget = ObjectProperty()
_img_overlay = ObjectProperty()
_box_overlay = ObjectProperty()
_box_label = ObjectProperty()
def reload(self):
self._img_widget.reload()
def add_widget(self, widget, index=0):
if issubclass(widget.__class__, IOverlay):
self._img_overlay.add_widget(widget, index)
elif issubclass(widget.__class__, IBoxOverlay):
self._box_overlay.add_widget(widget, index)
else:
super(SmartTile, self).add_widget(widget, index)
class SmartTileWithLabel(SmartTile):
_box_label = ObjectProperty()
# MDLabel properties
font_style = StringProperty("Caption")
theme_text_color = StringProperty("")
text = StringProperty("")
"""Determines the text for the box footer/header"""
class IBoxOverlay():
"""An interface to specify widgets that belong to to the image overlay
in the :class:`SmartTile` widget when added as a child.
"""
pass
class IOverlay():
"""An interface to specify widgets that belong to to the image overlay
in the :class:`SmartTile` widget when added as a child.
"""
pass
| PeterSurda/PyBitmessage | src/kivymd/grid.py | Python | mit | 5,125 |
from chainer.functions.array import concat
from chainer.functions.array import expand_dims
def stack(xs, axis=0):
"""Concatenate variables along a new axis.
Args:
xs (list of chainer.Variable): Variables to be concatenated.
axis (int): Axis of result along which variables are stacked.
Returns:
~chainer.Variable: Output variable.
"""
xs = [expand_dims.expand_dims(x, axis=axis) for x in xs]
return concat.concat(xs, axis=axis)
| kikusu/chainer | chainer/functions/array/stack.py | Python | mit | 481 |
"""
PymageJ Copyright (C) 2015 Jochem Smit
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
| Jhsmit/PymageJ | tests/__init__.py | Python | gpl-2.0 | 730 |
import numpy
import theano
from theano.misc.gnumpy_utils import gnumpy_available
if not gnumpy_available:
from nose.plugins.skip import SkipTest
raise SkipTest("gnumpy not installed. Skip test related to it.")
from theano.misc.gnumpy_utils import (garray_to_cudandarray,
cudandarray_to_garray)
import gnumpy
def test(shape=(3, 4, 5)):
"""
Make sure that the gnumpy conversion is exact from garray to
CudaNdarray back to garray.
"""
gpu = theano.sandbox.cuda.basic_ops.gpu_from_host
U = gpu(theano.tensor.ftensor3('U'))
ii = theano.function([U], gpu(U + 1))
A = gnumpy.rand(*shape)
A_cnd = garray_to_cudandarray(A)
assert A_cnd.shape == A.shape
# dtype always float32
# garray don't have strides
B_cnd = ii(A_cnd)
B = cudandarray_to_garray(B_cnd)
assert A_cnd.shape == A.shape
from numpy import array
u = (A + 1).asarray()
v = B.asarray()
w = array(B_cnd)
assert (u == v).all()
assert (u == w).all()
def test2(shape=(3, 4, 5)):
"""
Make sure that the gnumpy conversion is exact from CudaNdarray to
garray back to CudaNdarray.
"""
gpu = theano.sandbox.cuda.basic_ops.gpu_from_host
U = gpu(theano.tensor.ftensor3('U'))
ii = theano.function([U], gpu(U + 1))
A = numpy.random.rand(*shape).astype('float32')
A_cnd = theano.sandbox.cuda.CudaNdarray(A)
A_gar = cudandarray_to_garray(A_cnd)
assert A_cnd.shape == A_gar.shape
# dtype always float32
# garray don't have strides
B = garray_to_cudandarray(A_gar)
assert A_cnd.shape == B.shape
# dtype always float32
assert A_cnd._strides == B._strides
assert A_cnd.gpudata == B.gpudata
v = numpy.asarray(B)
assert (v == A).all()
def test_broadcast_dims():
"""
Test with some dimensions being 1.
CudaNdarray use 0 for strides for those dimensions.
"""
test((1, 2, 3))
test((2, 1, 3))
test((2, 3, 1))
test2((1, 2, 3))
test2((2, 1, 3))
test2((2, 3, 1))
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/misc/tests/test_gnumpy_utils.py | Python | gpl-2.0 | 2,052 |
import os
import unittest
from ubuntutweak.janitor.mozilla_plugin import FirefoxCachePlugin
class TestJanitorPlugin(unittest.TestCase):
def setUp(self):
self.firefox_plugin = FirefoxCachePlugin()
def test_firefox_plugin(self):
self.assertTrue(os.path.expanduser('~/.mozilla/firefox/5tzbwjwa.default'), self.firefox_plugin.get_path())
if __name__ == '__main__':
unittest.main()
| frdb194/ubuntu-tweak | tests/test_janitor_plugins.py | Python | gpl-2.0 | 409 |
#!/usr/bin/env python
import os, sys
SALVUS_ROOT=os.environ['SALVUS_ROOT']
sys.path.append(SALVUS_ROOT)
import admin
a = admin.Services('%s/conf/deploy_smc/'%SALVUS_ROOT, password='')
for x in a._hosts('hub', 'cd salvus/salvus; . smc-env; ./update',timeout=60):
print x
| haraldschilly/smc | src/scripts/update_hubs.py | Python | gpl-3.0 | 280 |
"""
cmdline drivers for unipen data set
"""
import cPickle
import os.path
import sys
import numpy as np
from dataset import DatFile
def read_file(input_name, output_name, box, to_array=True, to_csv=False):
dat = DatFile()
dat.read(open(input_name, 'rb'))
segments = ['nan, nan']
num_sequences = 5
x_scale = (box[1] - box[0]) / float(num_sequences)
y_scale = box[3] - box[2]
for jj in range(num_sequences):
# get sequence
npa = np.array(dat.pen_downs[jj])
# if there's no data skip it
if len(npa) < 2:
continue
# for normalization
xmin = npa[:, 0].min()
xmax = npa[:, 0].max()
ymin = npa[:, 1].min()
ymax = npa[:, 1].max()
for ii in range(len(npa)):
x0, y0 = npa[ii]
segments.append("%s, %s" % (
jj*x_scale +
(x_scale * (x0 - xmin) / (xmax - xmin)) + box[0],
box[3] - y_scale * (ymax - y0) / (ymax - ymin)))
segments.append('nan, nan')
if to_csv:
open(output_name, 'wb').write('\n'.join(segments))
if to_array:
array = np.zeros((len(segments), 2), dtype='float32')
for i in range(len(segments)):
row = segments[i].split(',')
array[i] = row
return array
def read_file_pkl(input_name, output_name, box, to_array=True, to_csv=False):
pen_pos = cPickle.load(open(input_name, 'rb'))
xmin = pen_pos[:, 0].min()
xmax = pen_pos[:, 0].max()
ymin = pen_pos[:, 1].min()
ymax = pen_pos[:, 1].max()
pen_pos[:, 0] -= xmin
pen_pos[:, 0] /= (xmax - xmin)
pen_pos[:, 1] -= ymin
pen_pos[:, 1] /= (ymax - ymin)
pen_pos[:, 0] *= box[1] - box[0]
pen_pos[:, 0] += box[0]
pen_pos[:, 1] *= box[3] - box[2]
pen_pos[:, 1] += box[2]
xmin = pen_pos[:, 0].min()
xmax = pen_pos[:, 0].max()
ymin = pen_pos[:, 1].min()
ymax = pen_pos[:, 1].max()
# wrap in nans
nans = (np.ones((1,2))*np.nan)
pen_pos = np.vstack([nans, pen_pos, nans])
return pen_pos
| Vidhyalakshimi/blog | Control/Tasks/Write/read_trajectory.py | Python | gpl-3.0 | 2,097 |
""" Unit tests for Time Series data type
:Author: Titiruck Nuntapramote ([email protected])
:Created: 2011/04/23
"""
import unittest
if __name__ == '__main__':
import sys
import os
# The root of the code
file_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(file_path[:file_path.rfind('pySPACE')-1])
from pySPACE.resources.data_types.time_series import TimeSeries # as ts
import numpy as np
class TimeSeriesTestCase(unittest.TestCase):
""" Test for TimeSeries data type """
def setUp(self):
""" Define some TimeSeries objects """
# no tag
self.x1 = TimeSeries([[1,2,3,4,5,6]], ['a','b','c','d','e','f'], 120,
marker_name='S4', name='Name_text ending with Standard',
start_time=12004.0, end_time=13004.0)
# no -
self.x2 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
marker_name='S4',
start_time=12004.0, end_time=13004.0,
tag = 'Tag of x2', name='Name_text ending with Standard')
# no name, tag
self.x3 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
marker_name='S4', start_time=12004.0, end_time=13004.0)
# no name, start_time, end_time, tag
self.x4 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,marker_name='S4')
# no start_time, end_time, name, marker_name, tag
self.x5 = TimeSeries([1,2], ['a','b'], 25, start_time = 12004.0)
# no name, start_time, end_time
self.x6 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
tag = 'Tag of x6')
def test_generate_tag(self):
self.assertEqual(TimeSeries._generate_tag(self.x1),
'Epoch Start: 12004ms; End: 13004ms; Class: Standard')
self.assertEqual(TimeSeries._generate_tag(self.x3),
'Epoch Start: 12004ms; End: 13004ms; Class: na')
self.assertEqual(TimeSeries._generate_tag(self.x4),None)
self.assertEqual(TimeSeries._generate_tag(self.x5),
'Epoch Start: 12004ms; End: nams; Class: na')
# replace with new data and inherit history, key, tag, specs from the old
def test_repalce_data(self):
data = TimeSeries.replace_data(self.x2, [10,11,12,13,14,15],
channel_names=['m','n','o','p','q','r'],
sampling_frequency=30,
start_time=1200.0)
self.assertFalse((data.view(np.ndarray)-[10,11,12,13,14,15]).any())
self.assertEqual(data.channel_names, ['m','n','o','p','q','r'])
self.assertEqual(data.sampling_frequency, 30)
self.assertEqual(data.start_time, 1200)
self.assertEqual(data.end_time, 13004)
self.assertEqual(data.name, 'Name_text ending with Standard')
self.assertEqual(data.tag,'Tag of x2')
def test_get_channel(self):
self.assertEqual(self.x1.channel_names, self.x1.get_channel_names())
self.assertEqual(self.x6.channel_names, self.x6.get_channel_names())
def test_ms_to_samples(self):
self.assertEqual(self.x1._ms_to_samples(12),12/1000.0*120)
self.assertEqual(self.x2._ms_to_samples(25),25/1000.0*12)
def test_samples_to_ms(self):
self.assertEqual(self.x3._samples_to_ms(34),34/12.0*1000)
self.assertEqual(self.x5._samples_to_ms(10),10/25.0*1000)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromName('test_time_series')
unittest.TextTestRunner(verbosity=2).run(suite)
| Crespo911/pyspace | pySPACE/tests/unittests/data_types/test_time_series.py | Python | gpl-3.0 | 3,589 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((5772.29, -434.074, 5890.09), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((5710.38, 1534.62, 6216.5), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((4461.92, 2813.85, 5990.98), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2221.41, 2294.96, 6471.22), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((2411.34, 2954.6, 7324.25), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((4419.41, 4078.94, 7807.25), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((5251.72, 3640.84, 10584.3), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((4694.04, 3424.42, 11811.4), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((7217.51, 4746.22, 8298.38), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((8099.7, 4132.19, 8009.76), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((7724.63, 3358.6, 6421.95), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((7865.65, 2155.71, 4825.57), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((7838.35, 3767.11, 4267.84), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((6563.18, 5021.81, 3687.39), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((6594.83, 4066.94, 2502.49), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((7146.61, 3157.99, 197.921), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((7417.24, 2864.55, 1952.71), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((7576.96, 2155.62, 3351.84), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((6205.05, 1494.99, 3179.41), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((6696.28, 2650.5, 2309.69), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((6830.08, 4035.46, 3435.14), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((6258.41, 3415.2, 3314.16), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((7423.14, 2833.56, 3734.71), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((7644.67, 2171.41, 4858.47), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((6050.53, 2364.95, 4823), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((5998.87, 3229.89, 3757.82), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((6677.71, 2219.99, 3016.23), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((5914.24, 3158.57, 3778.65), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((5991.17, 3612.79, 2864.43), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((5756.27, 4706.23, 2262.7), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5816.84, 4192, 3767.36), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((5142.68, 4492.89, 5119.53), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((6629.46, 4369.47, 4641.64), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((7994.53, 3887.26, 4139.9), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((7554.83, 2843.21, 3440.69), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((9079.02, 2790.13, 3366.07), (0.7, 0.7, 0.7), 697.612)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((7252.81, 4849.62, 4743.85), (0.7, 0.7, 0.7), 799.808)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((7486.38, 5742.81, 6893.28), (0.7, 0.7, 0.7), 1132.58)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((7132.24, 6356.79, 6467.02), (0.7, 0.7, 0.7), 1011.94)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((6757.01, 7739.67, 6840.04), (0.7, 0.7, 0.7), 782.592)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((7947.61, 7959.38, 8176.26), (0.7, 0.7, 0.7), 856.575)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((8722.11, 8097.32, 8839.14), (1, 0.7, 0), 706.579)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((7793.98, 7707.97, 8308.26), (0.7, 0.7, 0.7), 1015.96)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((5336.4, 7603.19, 6852.43), (0.7, 0.7, 0.7), 1205.72)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((3983.44, 8541.36, 5531.13), (0.7, 0.7, 0.7), 841.939)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((3600.1, 7533.52, 6673.18), (1, 0.7, 0), 806.999)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((3105.88, 7875.22, 6531.39), (0.7, 0.7, 0.7), 958.856)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((1564.1, 7056.12, 5752.5), (0.7, 0.7, 0.7), 952.892)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((1093.44, 7613.03, 5209.02), (0.7, 0.7, 0.7), 809.284)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((1622.34, 8162.58, 5663.66), (0.7, 0.7, 0.7), 709.159)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((1413.3, 7218, 4183.26), (0.7, 0.7, 0.7), 859.832)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((1911.7, 5574.41, 3864.73), (0.7, 0.7, 0.7), 800.866)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((2599.31, 7013.63, 3007.84), (0.7, 0.7, 0.7), 949.508)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((4388.84, 7666.41, 3281.74), (0.7, 0.7, 0.7), 891.98)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((4029.83, 5721.56, 3521.15), (0.7, 0.7, 0.7), 890.034)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((2443.04, 6372.16, 3684.58), (0.7, 0.7, 0.7), 804.165)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((1114.41, 6150.48, 2750.24), (0.7, 0.7, 0.7), 826.796)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((2413.42, 7004.61, 1598.42), (0.7, 0.7, 0.7), 1085.8)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((4294.16, 6521.63, 3258.52), (0.7, 0.7, 0.7), 906.997)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((3151.57, 7700.74, 2793.99), (0.7, 0.7, 0.7), 708.694)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((1865.95, 7556.57, 3590.47), (0.7, 0.7, 0.7), 780.223)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((1383.94, 6949.43, 3653.72), (0.7, 0.7, 0.7), 757.424)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((161.958, 7112.72, 4690.4), (0.7, 0.7, 0.7), 817.574)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((-277.95, 7877.42, 5235.8), (0.7, 0.7, 0.7), 782.423)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((1046.49, 6740.51, 5465.31), (0.7, 0.7, 0.7), 906.404)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((1857.13, 7845.3, 4265.33), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((1381.39, 6368.49, 4606.03), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((613.104, 6522.86, 6299.65), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((203.612, 8075.09, 4975.77), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((1296.73, 7743.86, 4754.71), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((149.43, 7594.75, 5337.42), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| batxes/4Cin | SHH_INV_models/SHH_INV_models_final_output_0.2_-0.1_10000/mtx1_models/SHH_INV_models19205.py | Python | gpl-3.0 | 17,572 |
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.db.models import signals
from django.utils.translation import ugettext_noop as _
import logging
logger = logging.getLogger(__name__)
if "notification" in settings.INSTALLED_APPS:
import notification
def create_notice_types(app, created_models, verbosity, **kwargs):
notification.models.NoticeType.create("layer_uploaded", _("Layer Uploaded"), _("A layer was uploaded"))
notification.models.NoticeType.create("layer_comment", _("Comment on Layer"), _("A layer was commented on"))
notification.models.NoticeType.create("layer_rated", _("Rating for Layer"), _("A rating was given to a layer"))
signals.post_syncdb.connect(create_notice_types, sender=notification)
logger.info("Notifications Configured for geonode.layers.managment.commands")
else:
logger.info("Skipping creation of NoticeTypes for geonode.layers.management.commands, since notification \
app was not found.")
| IMIO/imio.geonode.portal | geonode/layers/management/__init__.py | Python | gpl-3.0 | 1,791 |
# -*- coding: utf-8 -*-
"""
pygments.lexers.lisp
~~~~~~~~~~~~~~~~~~~~
Lexers for Lispy languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, words, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal, Error
from pygments.lexers.python import PythonLexer
__all__ = ['SchemeLexer', 'CommonLispLexer', 'HyLexer', 'RacketLexer',
'NewLispLexer', 'EmacsLispLexer', 'ShenLexer', 'CPSALexer',
'XtlangLexer', 'FennelLexer']
class SchemeLexer(RegexLexer):
"""
A Scheme lexer, parsing a stream and outputting the tokens
needed to highlight scheme code.
This lexer could be most probably easily subclassed to parse
other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp.
This parser is checked with pastes from the LISP pastebin
at http://paste.lisp.org/ to cover as much syntax as possible.
It supports the full Scheme syntax as defined in R5RS.
.. versionadded:: 0.6
"""
name = 'Scheme'
aliases = ['scheme', 'scm']
filenames = ['*.scm', '*.ss']
mimetypes = ['text/x-scheme', 'application/x-scheme']
# list of known keywords and builtins taken form vim 6.4 scheme.vim
# syntax file.
keywords = (
'lambda', 'define', 'if', 'else', 'cond', 'and', 'or', 'case', 'let',
'let*', 'letrec', 'begin', 'do', 'delay', 'set!', '=>', 'quote',
'quasiquote', 'unquote', 'unquote-splicing', 'define-syntax',
'let-syntax', 'letrec-syntax', 'syntax-rules'
)
builtins = (
'*', '+', '-', '/', '<', '<=', '=', '>', '>=', 'abs', 'acos', 'angle',
'append', 'apply', 'asin', 'assoc', 'assq', 'assv', 'atan',
'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr',
'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr',
'cadr', 'call-with-current-continuation', 'call-with-input-file',
'call-with-output-file', 'call-with-values', 'call/cc', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr',
'cdr', 'ceiling', 'char->integer', 'char-alphabetic?', 'char-ci<=?',
'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
'char-lower-case?', 'char-numeric?', 'char-ready?', 'char-upcase',
'char-upper-case?', 'char-whitespace?', 'char<=?', 'char<?', 'char=?',
'char>=?', 'char>?', 'char?', 'close-input-port', 'close-output-port',
'complex?', 'cons', 'cos', 'current-input-port', 'current-output-port',
'denominator', 'display', 'dynamic-wind', 'eof-object?', 'eq?',
'equal?', 'eqv?', 'eval', 'even?', 'exact->inexact', 'exact?', 'exp',
'expt', 'floor', 'for-each', 'force', 'gcd', 'imag-part',
'inexact->exact', 'inexact?', 'input-port?', 'integer->char',
'integer?', 'interaction-environment', 'lcm', 'length', 'list',
'list->string', 'list->vector', 'list-ref', 'list-tail', 'list?',
'load', 'log', 'magnitude', 'make-polar', 'make-rectangular',
'make-string', 'make-vector', 'map', 'max', 'member', 'memq', 'memv',
'min', 'modulo', 'negative?', 'newline', 'not', 'null-environment',
'null?', 'number->string', 'number?', 'numerator', 'odd?',
'open-input-file', 'open-output-file', 'output-port?', 'pair?',
'peek-char', 'port?', 'positive?', 'procedure?', 'quotient',
'rational?', 'rationalize', 'read', 'read-char', 'real-part', 'real?',
'remainder', 'reverse', 'round', 'scheme-report-environment',
'set-car!', 'set-cdr!', 'sin', 'sqrt', 'string', 'string->list',
'string->number', 'string->symbol', 'string-append', 'string-ci<=?',
'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?',
'string-copy', 'string-fill!', 'string-length', 'string-ref',
'string-set!', 'string<=?', 'string<?', 'string=?', 'string>=?',
'string>?', 'string?', 'substring', 'symbol->string', 'symbol?',
'tan', 'transcript-off', 'transcript-on', 'truncate', 'values',
'vector', 'vector->list', 'vector-fill!', 'vector-length',
'vector-ref', 'vector-set!', 'vector?', 'with-input-from-file',
'with-output-to-file', 'write', 'write-char', 'zero?'
)
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
tokens = {
'root': [
# the comments
# and going to the end of the line
(r';.*$', Comment.Single),
# multi-line comment
(r'#\|', Comment.Multiline, 'multiline-comment'),
# commented form (entire sexpr folliwng)
(r'#;\s*\(', Comment, 'commented-form'),
# signifies that the program text that follows is written with the
# lexical and datum syntax described in r6rs
(r'#!r6rs', Comment),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
# (r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join(re.escape(entry) + ' ' for entry in keywords),
Keyword),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
(r"(?<=\()(%s)" % '|'.join(re.escape(entry) + ' ' for entry in builtins),
Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\))', Punctuation),
(r'(\[|\])', Punctuation),
],
'multiline-comment': [
(r'#\|', Comment.Multiline, '#push'),
(r'\|#', Comment.Multiline, '#pop'),
(r'[^|#]+', Comment.Multiline),
(r'[|#]', Comment.Multiline),
],
'commented-form': [
(r'\(', Comment, '#push'),
(r'\)', Comment, '#pop'),
(r'[^()]+', Comment),
],
}
class CommonLispLexer(RegexLexer):
"""
A Common Lisp lexer.
.. versionadded:: 0.9
"""
name = 'Common Lisp'
aliases = ['common-lisp', 'cl', 'lisp']
filenames = ['*.cl', '*.lisp']
mimetypes = ['text/x-common-lisp']
flags = re.IGNORECASE | re.MULTILINE
# couple of useful regexes
# characters that are not macro-characters and can be used to begin a symbol
nonmacro = r'\\.|[\w!$%&*+-/<=>?@\[\]^{}~]'
constituent = nonmacro + '|[#.:]'
terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters
# symbol token, reverse-engineered from hyperspec
# Take a deep breath...
symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent)
def __init__(self, **options):
from pygments.lexers._cl_builtins import BUILTIN_FUNCTIONS, \
SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \
BUILTIN_TYPES, BUILTIN_CLASSES
self.builtin_function = BUILTIN_FUNCTIONS
self.special_forms = SPECIAL_FORMS
self.macros = MACROS
self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS
self.declarations = DECLARATIONS
self.builtin_types = BUILTIN_TYPES
self.builtin_classes = BUILTIN_CLASSES
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in self.builtin_function:
yield index, Name.Builtin, value
continue
if value in self.special_forms:
yield index, Keyword, value
continue
if value in self.macros:
yield index, Name.Builtin, value
continue
if value in self.lambda_list_keywords:
yield index, Keyword, value
continue
if value in self.declarations:
yield index, Keyword, value
continue
if value in self.builtin_types:
yield index, Keyword.Type, value
continue
if value in self.builtin_classes:
yield index, Name.Class, value
continue
yield index, token, value
tokens = {
'root': [
default('body'),
],
'multiline-comment': [
(r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19)
(r'\|#', Comment.Multiline, '#pop'),
(r'[^|#]+', Comment.Multiline),
(r'[|#]', Comment.Multiline),
],
'commented-form': [
(r'\(', Comment.Preproc, '#push'),
(r'\)', Comment.Preproc, '#pop'),
(r'[^()]+', Comment.Preproc),
],
'body': [
# whitespace
(r'\s+', Text),
# single-line comment
(r';.*$', Comment.Single),
# multi-line comment
(r'#\|', Comment.Multiline, 'multiline-comment'),
# encoding comment (?)
(r'#\d*Y.*$', Comment.Special),
# strings and characters
(r'"(\\.|\\\n|[^"\\])*"', String),
# quoting
(r":" + symbol, String.Symbol),
(r"::" + symbol, String.Symbol),
(r":#" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
# decimal numbers
(r'[-+]?\d+\.?' + terminated, Number.Integer),
(r'[-+]?\d+/\d+' + terminated, Number),
(r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' +
terminated, Number.Float),
# sharpsign strings and characters
(r"#\\." + terminated, String.Char),
(r"#\\" + symbol, String.Char),
# vector
(r'#\(', Operator, 'body'),
# bitstring
(r'#\d*\*[01]*', Literal.Other),
# uninterned symbol
(r'#:' + symbol, String.Symbol),
# read-time and load-time evaluation
(r'#[.,]', Operator),
# function shorthand
(r'#\'', Name.Function),
# binary rational
(r'#b[+-]?[01]+(/[01]+)?', Number.Bin),
# octal rational
(r'#o[+-]?[0-7]+(/[0-7]+)?', Number.Oct),
# hex rational
(r'#x[+-]?[0-9a-f]+(/[0-9a-f]+)?', Number.Hex),
# radix rational
(r'#\d+r[+-]?[0-9a-z]+(/[0-9a-z]+)?', Number),
# complex
(r'(#c)(\()', bygroups(Number, Punctuation), 'body'),
# array
(r'(#\d+a)(\()', bygroups(Literal.Other, Punctuation), 'body'),
# structure
(r'(#s)(\()', bygroups(Literal.Other, Punctuation), 'body'),
# path
(r'#p?"(\\.|[^"])*"', Literal.Other),
# reference
(r'#\d+=', Operator),
(r'#\d+#', Operator),
# read-time comment
(r'#+nil' + terminated + r'\s*\(', Comment.Preproc, 'commented-form'),
# read-time conditional
(r'#[+-]', Operator),
# special operators that should have been parsed already
(r'(,@|,|\.)', Operator),
# special constants
(r'(t|nil)' + terminated, Name.Constant),
# functions and variables
(r'\*' + symbol + r'\*', Name.Variable.Global),
(symbol, Name.Variable),
# parentheses
(r'\(', Punctuation, 'body'),
(r'\)', Punctuation, '#pop'),
],
}
class HyLexer(RegexLexer):
"""
Lexer for `Hy <http://hylang.org/>`_ source code.
.. versionadded:: 2.0
"""
name = 'Hy'
aliases = ['hylang']
filenames = ['*.hy']
mimetypes = ['text/x-hy', 'application/x-hy']
special_forms = (
'cond', 'for', '->', '->>', 'car',
'cdr', 'first', 'rest', 'let', 'when', 'unless',
'import', 'do', 'progn', 'get', 'slice', 'assoc', 'with-decorator',
',', 'list_comp', 'kwapply', '~', 'is', 'in', 'is-not', 'not-in',
'quasiquote', 'unquote', 'unquote-splice', 'quote', '|', '<<=', '>>=',
'foreach', 'while',
'eval-and-compile', 'eval-when-compile'
)
declarations = (
'def', 'defn', 'defun', 'defmacro', 'defclass', 'lambda', 'fn', 'setv'
)
hy_builtins = ()
hy_core = (
'cycle', 'dec', 'distinct', 'drop', 'even?', 'filter', 'inc',
'instance?', 'iterable?', 'iterate', 'iterator?', 'neg?',
'none?', 'nth', 'numeric?', 'odd?', 'pos?', 'remove', 'repeat',
'repeatedly', 'take', 'take_nth', 'take_while', 'zero?'
)
builtins = hy_builtins + hy_core
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'(?!#)[\w!$%*+<=>?/.#-:]+'
def _multi_escape(entries):
return words(entries, suffix=' ')
tokens = {
'root': [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'[,\s]+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\(.|[a-z]+)", String.Char),
(r'^(\s*)([rRuU]{,2}"""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)([rRuU]{,2}'''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
# keywords
(r'::?' + valid_name, String.Symbol),
# special operators
(r'~@|[`\'#^~&@]', Operator),
include('py-keywords'),
include('py-builtins'),
# highlight the special forms
(_multi_escape(special_forms), Keyword),
# Technically, only the special forms are 'keywords'. The problem
# is that only treating them as keywords means that things like
# 'defn' and 'ns' need to be highlighted as builtins. This is ugly
# and weird for most styles. So, as a compromise we're going to
# highlight them as Keyword.Declarations.
(_multi_escape(declarations), Keyword.Declaration),
# highlight the builtins
(_multi_escape(builtins), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Hy accepts vector notation
(r'(\[|\])', Punctuation),
# Hy accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
'py-keywords': PythonLexer.tokens['keywords'],
'py-builtins': PythonLexer.tokens['builtins'],
}
def analyse_text(text):
if '(import ' in text or '(defn ' in text:
return 0.9
class RacketLexer(RegexLexer):
"""
Lexer for `Racket <http://racket-lang.org/>`_ source code (formerly
known as PLT Scheme).
.. versionadded:: 1.6
"""
name = 'Racket'
aliases = ['racket', 'rkt']
filenames = ['*.rkt', '*.rktd', '*.rktl']
mimetypes = ['text/x-racket', 'application/x-racket']
# Generated by example.rkt
_keywords = (
u'#%app', u'#%datum', u'#%declare', u'#%expression', u'#%module-begin',
u'#%plain-app', u'#%plain-lambda', u'#%plain-module-begin',
u'#%printing-module-begin', u'#%provide', u'#%require',
u'#%stratified-body', u'#%top', u'#%top-interaction',
u'#%variable-reference', u'->', u'->*', u'->*m', u'->d', u'->dm', u'->i',
u'->m', u'...', u':do-in', u'==', u'=>', u'_', u'absent', u'abstract',
u'all-defined-out', u'all-from-out', u'and', u'any', u'augment', u'augment*',
u'augment-final', u'augment-final*', u'augride', u'augride*', u'begin',
u'begin-for-syntax', u'begin0', u'case', u'case->', u'case->m',
u'case-lambda', u'class', u'class*', u'class-field-accessor',
u'class-field-mutator', u'class/c', u'class/derived', u'combine-in',
u'combine-out', u'command-line', u'compound-unit', u'compound-unit/infer',
u'cond', u'cons/dc', u'contract', u'contract-out', u'contract-struct',
u'contracted', u'define', u'define-compound-unit',
u'define-compound-unit/infer', u'define-contract-struct',
u'define-custom-hash-types', u'define-custom-set-types',
u'define-for-syntax', u'define-local-member-name', u'define-logger',
u'define-match-expander', u'define-member-name',
u'define-module-boundary-contract', u'define-namespace-anchor',
u'define-opt/c', u'define-sequence-syntax', u'define-serializable-class',
u'define-serializable-class*', u'define-signature',
u'define-signature-form', u'define-struct', u'define-struct/contract',
u'define-struct/derived', u'define-syntax', u'define-syntax-rule',
u'define-syntaxes', u'define-unit', u'define-unit-binding',
u'define-unit-from-context', u'define-unit/contract',
u'define-unit/new-import-export', u'define-unit/s', u'define-values',
u'define-values-for-export', u'define-values-for-syntax',
u'define-values/invoke-unit', u'define-values/invoke-unit/infer',
u'define/augment', u'define/augment-final', u'define/augride',
u'define/contract', u'define/final-prop', u'define/match',
u'define/overment', u'define/override', u'define/override-final',
u'define/private', u'define/public', u'define/public-final',
u'define/pubment', u'define/subexpression-pos-prop',
u'define/subexpression-pos-prop/name', u'delay', u'delay/idle',
u'delay/name', u'delay/strict', u'delay/sync', u'delay/thread', u'do',
u'else', u'except', u'except-in', u'except-out', u'export', u'extends',
u'failure-cont', u'false', u'false/c', u'field', u'field-bound?', u'file',
u'flat-murec-contract', u'flat-rec-contract', u'for', u'for*', u'for*/and',
u'for*/async', u'for*/first', u'for*/fold', u'for*/fold/derived',
u'for*/hash', u'for*/hasheq', u'for*/hasheqv', u'for*/last', u'for*/list',
u'for*/lists', u'for*/mutable-set', u'for*/mutable-seteq',
u'for*/mutable-seteqv', u'for*/or', u'for*/product', u'for*/set',
u'for*/seteq', u'for*/seteqv', u'for*/stream', u'for*/sum', u'for*/vector',
u'for*/weak-set', u'for*/weak-seteq', u'for*/weak-seteqv', u'for-label',
u'for-meta', u'for-syntax', u'for-template', u'for/and', u'for/async',
u'for/first', u'for/fold', u'for/fold/derived', u'for/hash', u'for/hasheq',
u'for/hasheqv', u'for/last', u'for/list', u'for/lists', u'for/mutable-set',
u'for/mutable-seteq', u'for/mutable-seteqv', u'for/or', u'for/product',
u'for/set', u'for/seteq', u'for/seteqv', u'for/stream', u'for/sum',
u'for/vector', u'for/weak-set', u'for/weak-seteq', u'for/weak-seteqv',
u'gen:custom-write', u'gen:dict', u'gen:equal+hash', u'gen:set',
u'gen:stream', u'generic', u'get-field', u'hash/dc', u'if', u'implies',
u'import', u'include', u'include-at/relative-to',
u'include-at/relative-to/reader', u'include/reader', u'inherit',
u'inherit-field', u'inherit/inner', u'inherit/super', u'init',
u'init-depend', u'init-field', u'init-rest', u'inner', u'inspect',
u'instantiate', u'interface', u'interface*', u'invariant-assertion',
u'invoke-unit', u'invoke-unit/infer', u'lambda', u'lazy', u'let', u'let*',
u'let*-values', u'let-syntax', u'let-syntaxes', u'let-values', u'let/cc',
u'let/ec', u'letrec', u'letrec-syntax', u'letrec-syntaxes',
u'letrec-syntaxes+values', u'letrec-values', u'lib', u'link', u'local',
u'local-require', u'log-debug', u'log-error', u'log-fatal', u'log-info',
u'log-warning', u'match', u'match*', u'match*/derived', u'match-define',
u'match-define-values', u'match-lambda', u'match-lambda*',
u'match-lambda**', u'match-let', u'match-let*', u'match-let*-values',
u'match-let-values', u'match-letrec', u'match-letrec-values',
u'match/derived', u'match/values', u'member-name-key', u'mixin', u'module',
u'module*', u'module+', u'nand', u'new', u'nor', u'object-contract',
u'object/c', u'only', u'only-in', u'only-meta-in', u'open', u'opt/c', u'or',
u'overment', u'overment*', u'override', u'override*', u'override-final',
u'override-final*', u'parameterize', u'parameterize*',
u'parameterize-break', u'parametric->/c', u'place', u'place*',
u'place/context', u'planet', u'prefix', u'prefix-in', u'prefix-out',
u'private', u'private*', u'prompt-tag/c', u'protect-out', u'provide',
u'provide-signature-elements', u'provide/contract', u'public', u'public*',
u'public-final', u'public-final*', u'pubment', u'pubment*', u'quasiquote',
u'quasisyntax', u'quasisyntax/loc', u'quote', u'quote-syntax',
u'quote-syntax/prune', u'recontract-out', u'recursive-contract',
u'relative-in', u'rename', u'rename-in', u'rename-inner', u'rename-out',
u'rename-super', u'require', u'send', u'send*', u'send+', u'send-generic',
u'send/apply', u'send/keyword-apply', u'set!', u'set!-values',
u'set-field!', u'shared', u'stream', u'stream*', u'stream-cons', u'struct',
u'struct*', u'struct-copy', u'struct-field-index', u'struct-out',
u'struct/c', u'struct/ctc', u'struct/dc', u'submod', u'super',
u'super-instantiate', u'super-make-object', u'super-new', u'syntax',
u'syntax-case', u'syntax-case*', u'syntax-id-rules', u'syntax-rules',
u'syntax/loc', u'tag', u'this', u'this%', u'thunk', u'thunk*', u'time',
u'unconstrained-domain->', u'unit', u'unit-from-context', u'unit/c',
u'unit/new-import-export', u'unit/s', u'unless', u'unquote',
u'unquote-splicing', u'unsyntax', u'unsyntax-splicing', u'values/drop',
u'when', u'with-continuation-mark', u'with-contract',
u'with-contract-continuation-mark', u'with-handlers', u'with-handlers*',
u'with-method', u'with-syntax', u'λ'
)
# Generated by example.rkt
_builtins = (
u'*', u'*list/c', u'+', u'-', u'/', u'<', u'</c', u'<=', u'<=/c', u'=', u'=/c',
u'>', u'>/c', u'>=', u'>=/c', u'abort-current-continuation', u'abs',
u'absolute-path?', u'acos', u'add-between', u'add1', u'alarm-evt',
u'always-evt', u'and/c', u'andmap', u'angle', u'any/c', u'append', u'append*',
u'append-map', u'apply', u'argmax', u'argmin', u'arithmetic-shift',
u'arity-at-least', u'arity-at-least-value', u'arity-at-least?',
u'arity-checking-wrapper', u'arity-includes?', u'arity=?',
u'arrow-contract-info', u'arrow-contract-info-accepts-arglist',
u'arrow-contract-info-chaperone-procedure',
u'arrow-contract-info-check-first-order', u'arrow-contract-info?',
u'asin', u'assf', u'assoc', u'assq', u'assv', u'atan',
u'bad-number-of-results', u'banner', u'base->-doms/c', u'base->-rngs/c',
u'base->?', u'between/c', u'bitwise-and', u'bitwise-bit-field',
u'bitwise-bit-set?', u'bitwise-ior', u'bitwise-not', u'bitwise-xor',
u'blame-add-car-context', u'blame-add-cdr-context', u'blame-add-context',
u'blame-add-missing-party', u'blame-add-nth-arg-context',
u'blame-add-range-context', u'blame-add-unknown-context',
u'blame-context', u'blame-contract', u'blame-fmt->-string',
u'blame-missing-party?', u'blame-negative', u'blame-original?',
u'blame-positive', u'blame-replace-negative', u'blame-source',
u'blame-swap', u'blame-swapped?', u'blame-update', u'blame-value',
u'blame?', u'boolean=?', u'boolean?', u'bound-identifier=?', u'box',
u'box-cas!', u'box-immutable', u'box-immutable/c', u'box/c', u'box?',
u'break-enabled', u'break-parameterization?', u'break-thread',
u'build-chaperone-contract-property', u'build-compound-type-name',
u'build-contract-property', u'build-flat-contract-property',
u'build-list', u'build-path', u'build-path/convention-type',
u'build-string', u'build-vector', u'byte-pregexp', u'byte-pregexp?',
u'byte-ready?', u'byte-regexp', u'byte-regexp?', u'byte?', u'bytes',
u'bytes->immutable-bytes', u'bytes->list', u'bytes->path',
u'bytes->path-element', u'bytes->string/latin-1', u'bytes->string/locale',
u'bytes->string/utf-8', u'bytes-append', u'bytes-append*',
u'bytes-close-converter', u'bytes-convert', u'bytes-convert-end',
u'bytes-converter?', u'bytes-copy', u'bytes-copy!',
u'bytes-environment-variable-name?', u'bytes-fill!', u'bytes-join',
u'bytes-length', u'bytes-no-nuls?', u'bytes-open-converter', u'bytes-ref',
u'bytes-set!', u'bytes-utf-8-index', u'bytes-utf-8-length',
u'bytes-utf-8-ref', u'bytes<?', u'bytes=?', u'bytes>?', u'bytes?', u'caaaar',
u'caaadr', u'caaar', u'caadar', u'caaddr', u'caadr', u'caar', u'cadaar',
u'cadadr', u'cadar', u'caddar', u'cadddr', u'caddr', u'cadr',
u'call-in-nested-thread', u'call-with-atomic-output-file',
u'call-with-break-parameterization',
u'call-with-composable-continuation', u'call-with-continuation-barrier',
u'call-with-continuation-prompt', u'call-with-current-continuation',
u'call-with-default-reading-parameterization',
u'call-with-escape-continuation', u'call-with-exception-handler',
u'call-with-file-lock/timeout', u'call-with-immediate-continuation-mark',
u'call-with-input-bytes', u'call-with-input-file',
u'call-with-input-file*', u'call-with-input-string',
u'call-with-output-bytes', u'call-with-output-file',
u'call-with-output-file*', u'call-with-output-string',
u'call-with-parameterization', u'call-with-semaphore',
u'call-with-semaphore/enable-break', u'call-with-values', u'call/cc',
u'call/ec', u'car', u'cartesian-product', u'cdaaar', u'cdaadr', u'cdaar',
u'cdadar', u'cdaddr', u'cdadr', u'cdar', u'cddaar', u'cddadr', u'cddar',
u'cdddar', u'cddddr', u'cdddr', u'cddr', u'cdr', u'ceiling', u'channel-get',
u'channel-put', u'channel-put-evt', u'channel-put-evt?',
u'channel-try-get', u'channel/c', u'channel?', u'chaperone-box',
u'chaperone-channel', u'chaperone-continuation-mark-key',
u'chaperone-contract-property?', u'chaperone-contract?', u'chaperone-evt',
u'chaperone-hash', u'chaperone-hash-set', u'chaperone-of?',
u'chaperone-procedure', u'chaperone-procedure*', u'chaperone-prompt-tag',
u'chaperone-struct', u'chaperone-struct-type', u'chaperone-vector',
u'chaperone?', u'char->integer', u'char-alphabetic?', u'char-blank?',
u'char-ci<=?', u'char-ci<?', u'char-ci=?', u'char-ci>=?', u'char-ci>?',
u'char-downcase', u'char-foldcase', u'char-general-category',
u'char-graphic?', u'char-in', u'char-in/c', u'char-iso-control?',
u'char-lower-case?', u'char-numeric?', u'char-punctuation?',
u'char-ready?', u'char-symbolic?', u'char-title-case?', u'char-titlecase',
u'char-upcase', u'char-upper-case?', u'char-utf-8-length',
u'char-whitespace?', u'char<=?', u'char<?', u'char=?', u'char>=?', u'char>?',
u'char?', u'check-duplicate-identifier', u'check-duplicates',
u'checked-procedure-check-and-extract', u'choice-evt',
u'class->interface', u'class-info', u'class-seal', u'class-unseal',
u'class?', u'cleanse-path', u'close-input-port', u'close-output-port',
u'coerce-chaperone-contract', u'coerce-chaperone-contracts',
u'coerce-contract', u'coerce-contract/f', u'coerce-contracts',
u'coerce-flat-contract', u'coerce-flat-contracts', u'collect-garbage',
u'collection-file-path', u'collection-path', u'combinations', u'compile',
u'compile-allow-set!-undefined', u'compile-context-preservation-enabled',
u'compile-enforce-module-constants', u'compile-syntax',
u'compiled-expression-recompile', u'compiled-expression?',
u'compiled-module-expression?', u'complete-path?', u'complex?', u'compose',
u'compose1', u'conjoin', u'conjugate', u'cons', u'cons/c', u'cons?', u'const',
u'continuation-mark-key/c', u'continuation-mark-key?',
u'continuation-mark-set->context', u'continuation-mark-set->list',
u'continuation-mark-set->list*', u'continuation-mark-set-first',
u'continuation-mark-set?', u'continuation-marks',
u'continuation-prompt-available?', u'continuation-prompt-tag?',
u'continuation?', u'contract-continuation-mark-key',
u'contract-custom-write-property-proc', u'contract-exercise',
u'contract-first-order', u'contract-first-order-passes?',
u'contract-late-neg-projection', u'contract-name', u'contract-proc',
u'contract-projection', u'contract-property?',
u'contract-random-generate', u'contract-random-generate-fail',
u'contract-random-generate-fail?',
u'contract-random-generate-get-current-environment',
u'contract-random-generate-stash', u'contract-random-generate/choose',
u'contract-stronger?', u'contract-struct-exercise',
u'contract-struct-generate', u'contract-struct-late-neg-projection',
u'contract-struct-list-contract?', u'contract-val-first-projection',
u'contract?', u'convert-stream', u'copy-directory/files', u'copy-file',
u'copy-port', u'cos', u'cosh', u'count', u'current-blame-format',
u'current-break-parameterization', u'current-code-inspector',
u'current-command-line-arguments', u'current-compile',
u'current-compiled-file-roots', u'current-continuation-marks',
u'current-contract-region', u'current-custodian', u'current-directory',
u'current-directory-for-user', u'current-drive',
u'current-environment-variables', u'current-error-port', u'current-eval',
u'current-evt-pseudo-random-generator',
u'current-force-delete-permissions', u'current-future',
u'current-gc-milliseconds', u'current-get-interaction-input-port',
u'current-inexact-milliseconds', u'current-input-port',
u'current-inspector', u'current-library-collection-links',
u'current-library-collection-paths', u'current-load',
u'current-load-extension', u'current-load-relative-directory',
u'current-load/use-compiled', u'current-locale', u'current-logger',
u'current-memory-use', u'current-milliseconds',
u'current-module-declare-name', u'current-module-declare-source',
u'current-module-name-resolver', u'current-module-path-for-load',
u'current-namespace', u'current-output-port', u'current-parameterization',
u'current-plumber', u'current-preserved-thread-cell-values',
u'current-print', u'current-process-milliseconds', u'current-prompt-read',
u'current-pseudo-random-generator', u'current-read-interaction',
u'current-reader-guard', u'current-readtable', u'current-seconds',
u'current-security-guard', u'current-subprocess-custodian-mode',
u'current-thread', u'current-thread-group',
u'current-thread-initial-stack-size',
u'current-write-relative-directory', u'curry', u'curryr',
u'custodian-box-value', u'custodian-box?', u'custodian-limit-memory',
u'custodian-managed-list', u'custodian-memory-accounting-available?',
u'custodian-require-memory', u'custodian-shutdown-all', u'custodian?',
u'custom-print-quotable-accessor', u'custom-print-quotable?',
u'custom-write-accessor', u'custom-write-property-proc', u'custom-write?',
u'date', u'date*', u'date*-nanosecond', u'date*-time-zone-name', u'date*?',
u'date-day', u'date-dst?', u'date-hour', u'date-minute', u'date-month',
u'date-second', u'date-time-zone-offset', u'date-week-day', u'date-year',
u'date-year-day', u'date?', u'datum->syntax', u'datum-intern-literal',
u'default-continuation-prompt-tag', u'degrees->radians',
u'delete-directory', u'delete-directory/files', u'delete-file',
u'denominator', u'dict->list', u'dict-can-functional-set?',
u'dict-can-remove-keys?', u'dict-clear', u'dict-clear!', u'dict-copy',
u'dict-count', u'dict-empty?', u'dict-for-each', u'dict-has-key?',
u'dict-implements/c', u'dict-implements?', u'dict-iter-contract',
u'dict-iterate-first', u'dict-iterate-key', u'dict-iterate-next',
u'dict-iterate-value', u'dict-key-contract', u'dict-keys', u'dict-map',
u'dict-mutable?', u'dict-ref', u'dict-ref!', u'dict-remove',
u'dict-remove!', u'dict-set', u'dict-set!', u'dict-set*', u'dict-set*!',
u'dict-update', u'dict-update!', u'dict-value-contract', u'dict-values',
u'dict?', u'directory-exists?', u'directory-list', u'disjoin', u'display',
u'display-lines', u'display-lines-to-file', u'display-to-file',
u'displayln', u'double-flonum?', u'drop', u'drop-common-prefix',
u'drop-right', u'dropf', u'dropf-right', u'dump-memory-stats',
u'dup-input-port', u'dup-output-port', u'dynamic->*', u'dynamic-get-field',
u'dynamic-object/c', u'dynamic-place', u'dynamic-place*',
u'dynamic-require', u'dynamic-require-for-syntax', u'dynamic-send',
u'dynamic-set-field!', u'dynamic-wind', u'eighth', u'empty',
u'empty-sequence', u'empty-stream', u'empty?',
u'environment-variables-copy', u'environment-variables-names',
u'environment-variables-ref', u'environment-variables-set!',
u'environment-variables?', u'eof', u'eof-evt', u'eof-object?',
u'ephemeron-value', u'ephemeron?', u'eprintf', u'eq-contract-val',
u'eq-contract?', u'eq-hash-code', u'eq?', u'equal-contract-val',
u'equal-contract?', u'equal-hash-code', u'equal-secondary-hash-code',
u'equal<%>', u'equal?', u'equal?/recur', u'eqv-hash-code', u'eqv?', u'error',
u'error-display-handler', u'error-escape-handler',
u'error-print-context-length', u'error-print-source-location',
u'error-print-width', u'error-value->string-handler', u'eval',
u'eval-jit-enabled', u'eval-syntax', u'even?', u'evt/c', u'evt?',
u'exact->inexact', u'exact-ceiling', u'exact-floor', u'exact-integer?',
u'exact-nonnegative-integer?', u'exact-positive-integer?', u'exact-round',
u'exact-truncate', u'exact?', u'executable-yield-handler', u'exit',
u'exit-handler', u'exn', u'exn-continuation-marks', u'exn-message',
u'exn:break', u'exn:break-continuation', u'exn:break:hang-up',
u'exn:break:hang-up?', u'exn:break:terminate', u'exn:break:terminate?',
u'exn:break?', u'exn:fail', u'exn:fail:contract',
u'exn:fail:contract:arity', u'exn:fail:contract:arity?',
u'exn:fail:contract:blame', u'exn:fail:contract:blame-object',
u'exn:fail:contract:blame?', u'exn:fail:contract:continuation',
u'exn:fail:contract:continuation?', u'exn:fail:contract:divide-by-zero',
u'exn:fail:contract:divide-by-zero?',
u'exn:fail:contract:non-fixnum-result',
u'exn:fail:contract:non-fixnum-result?', u'exn:fail:contract:variable',
u'exn:fail:contract:variable-id', u'exn:fail:contract:variable?',
u'exn:fail:contract?', u'exn:fail:filesystem',
u'exn:fail:filesystem:errno', u'exn:fail:filesystem:errno-errno',
u'exn:fail:filesystem:errno?', u'exn:fail:filesystem:exists',
u'exn:fail:filesystem:exists?', u'exn:fail:filesystem:missing-module',
u'exn:fail:filesystem:missing-module-path',
u'exn:fail:filesystem:missing-module?', u'exn:fail:filesystem:version',
u'exn:fail:filesystem:version?', u'exn:fail:filesystem?',
u'exn:fail:network', u'exn:fail:network:errno',
u'exn:fail:network:errno-errno', u'exn:fail:network:errno?',
u'exn:fail:network?', u'exn:fail:object', u'exn:fail:object?',
u'exn:fail:out-of-memory', u'exn:fail:out-of-memory?', u'exn:fail:read',
u'exn:fail:read-srclocs', u'exn:fail:read:eof', u'exn:fail:read:eof?',
u'exn:fail:read:non-char', u'exn:fail:read:non-char?', u'exn:fail:read?',
u'exn:fail:syntax', u'exn:fail:syntax-exprs',
u'exn:fail:syntax:missing-module',
u'exn:fail:syntax:missing-module-path',
u'exn:fail:syntax:missing-module?', u'exn:fail:syntax:unbound',
u'exn:fail:syntax:unbound?', u'exn:fail:syntax?', u'exn:fail:unsupported',
u'exn:fail:unsupported?', u'exn:fail:user', u'exn:fail:user?',
u'exn:fail?', u'exn:misc:match?', u'exn:missing-module-accessor',
u'exn:missing-module?', u'exn:srclocs-accessor', u'exn:srclocs?', u'exn?',
u'exp', u'expand', u'expand-once', u'expand-syntax', u'expand-syntax-once',
u'expand-syntax-to-top-form', u'expand-to-top-form', u'expand-user-path',
u'explode-path', u'expt', u'externalizable<%>', u'failure-result/c',
u'false?', u'field-names', u'fifth', u'file->bytes', u'file->bytes-lines',
u'file->lines', u'file->list', u'file->string', u'file->value',
u'file-exists?', u'file-name-from-path', u'file-or-directory-identity',
u'file-or-directory-modify-seconds', u'file-or-directory-permissions',
u'file-position', u'file-position*', u'file-size',
u'file-stream-buffer-mode', u'file-stream-port?', u'file-truncate',
u'filename-extension', u'filesystem-change-evt',
u'filesystem-change-evt-cancel', u'filesystem-change-evt?',
u'filesystem-root-list', u'filter', u'filter-map', u'filter-not',
u'filter-read-input-port', u'find-executable-path', u'find-files',
u'find-library-collection-links', u'find-library-collection-paths',
u'find-relative-path', u'find-system-path', u'findf', u'first',
u'first-or/c', u'fixnum?', u'flat-contract', u'flat-contract-predicate',
u'flat-contract-property?', u'flat-contract?', u'flat-named-contract',
u'flatten', u'floating-point-bytes->real', u'flonum?', u'floor',
u'flush-output', u'fold-files', u'foldl', u'foldr', u'for-each', u'force',
u'format', u'fourth', u'fprintf', u'free-identifier=?',
u'free-label-identifier=?', u'free-template-identifier=?',
u'free-transformer-identifier=?', u'fsemaphore-count', u'fsemaphore-post',
u'fsemaphore-try-wait?', u'fsemaphore-wait', u'fsemaphore?', u'future',
u'future?', u'futures-enabled?', u'gcd', u'generate-member-key',
u'generate-temporaries', u'generic-set?', u'generic?', u'gensym',
u'get-output-bytes', u'get-output-string', u'get-preference',
u'get/build-late-neg-projection', u'get/build-val-first-projection',
u'getenv', u'global-port-print-handler', u'group-by', u'group-execute-bit',
u'group-read-bit', u'group-write-bit', u'guard-evt', u'handle-evt',
u'handle-evt?', u'has-blame?', u'has-contract?', u'hash', u'hash->list',
u'hash-clear', u'hash-clear!', u'hash-copy', u'hash-copy-clear',
u'hash-count', u'hash-empty?', u'hash-eq?', u'hash-equal?', u'hash-eqv?',
u'hash-for-each', u'hash-has-key?', u'hash-iterate-first',
u'hash-iterate-key', u'hash-iterate-key+value', u'hash-iterate-next',
u'hash-iterate-pair', u'hash-iterate-value', u'hash-keys', u'hash-map',
u'hash-placeholder?', u'hash-ref', u'hash-ref!', u'hash-remove',
u'hash-remove!', u'hash-set', u'hash-set!', u'hash-set*', u'hash-set*!',
u'hash-update', u'hash-update!', u'hash-values', u'hash-weak?', u'hash/c',
u'hash?', u'hasheq', u'hasheqv', u'identifier-binding',
u'identifier-binding-symbol', u'identifier-label-binding',
u'identifier-prune-lexical-context',
u'identifier-prune-to-source-module',
u'identifier-remove-from-definition-context',
u'identifier-template-binding', u'identifier-transformer-binding',
u'identifier?', u'identity', u'if/c', u'imag-part', u'immutable?',
u'impersonate-box', u'impersonate-channel',
u'impersonate-continuation-mark-key', u'impersonate-hash',
u'impersonate-hash-set', u'impersonate-procedure',
u'impersonate-procedure*', u'impersonate-prompt-tag',
u'impersonate-struct', u'impersonate-vector', u'impersonator-contract?',
u'impersonator-ephemeron', u'impersonator-of?',
u'impersonator-prop:application-mark', u'impersonator-prop:blame',
u'impersonator-prop:contracted',
u'impersonator-property-accessor-procedure?', u'impersonator-property?',
u'impersonator?', u'implementation?', u'implementation?/c', u'in-bytes',
u'in-bytes-lines', u'in-combinations', u'in-cycle', u'in-dict',
u'in-dict-keys', u'in-dict-pairs', u'in-dict-values', u'in-directory',
u'in-hash', u'in-hash-keys', u'in-hash-pairs', u'in-hash-values',
u'in-immutable-hash', u'in-immutable-hash-keys',
u'in-immutable-hash-pairs', u'in-immutable-hash-values',
u'in-immutable-set', u'in-indexed', u'in-input-port-bytes',
u'in-input-port-chars', u'in-lines', u'in-list', u'in-mlist',
u'in-mutable-hash', u'in-mutable-hash-keys', u'in-mutable-hash-pairs',
u'in-mutable-hash-values', u'in-mutable-set', u'in-naturals',
u'in-parallel', u'in-permutations', u'in-port', u'in-producer', u'in-range',
u'in-sequences', u'in-set', u'in-slice', u'in-stream', u'in-string',
u'in-syntax', u'in-value', u'in-values*-sequence', u'in-values-sequence',
u'in-vector', u'in-weak-hash', u'in-weak-hash-keys', u'in-weak-hash-pairs',
u'in-weak-hash-values', u'in-weak-set', u'inexact->exact',
u'inexact-real?', u'inexact?', u'infinite?', u'input-port-append',
u'input-port?', u'inspector?', u'instanceof/c', u'integer->char',
u'integer->integer-bytes', u'integer-bytes->integer', u'integer-in',
u'integer-length', u'integer-sqrt', u'integer-sqrt/remainder', u'integer?',
u'interface->method-names', u'interface-extension?', u'interface?',
u'internal-definition-context-binding-identifiers',
u'internal-definition-context-introduce',
u'internal-definition-context-seal', u'internal-definition-context?',
u'is-a?', u'is-a?/c', u'keyword->string', u'keyword-apply', u'keyword<?',
u'keyword?', u'keywords-match', u'kill-thread', u'last', u'last-pair',
u'lcm', u'length', u'liberal-define-context?', u'link-exists?', u'list',
u'list*', u'list*of', u'list->bytes', u'list->mutable-set',
u'list->mutable-seteq', u'list->mutable-seteqv', u'list->set',
u'list->seteq', u'list->seteqv', u'list->string', u'list->vector',
u'list->weak-set', u'list->weak-seteq', u'list->weak-seteqv',
u'list-contract?', u'list-prefix?', u'list-ref', u'list-set', u'list-tail',
u'list-update', u'list/c', u'list?', u'listen-port-number?', u'listof',
u'load', u'load-extension', u'load-on-demand-enabled', u'load-relative',
u'load-relative-extension', u'load/cd', u'load/use-compiled',
u'local-expand', u'local-expand/capture-lifts',
u'local-transformer-expand', u'local-transformer-expand/capture-lifts',
u'locale-string-encoding', u'log', u'log-all-levels', u'log-level-evt',
u'log-level?', u'log-max-level', u'log-message', u'log-receiver?',
u'logger-name', u'logger?', u'magnitude', u'make-arity-at-least',
u'make-base-empty-namespace', u'make-base-namespace', u'make-bytes',
u'make-channel', u'make-chaperone-contract',
u'make-continuation-mark-key', u'make-continuation-prompt-tag',
u'make-contract', u'make-custodian', u'make-custodian-box',
u'make-custom-hash', u'make-custom-hash-types', u'make-custom-set',
u'make-custom-set-types', u'make-date', u'make-date*',
u'make-derived-parameter', u'make-directory', u'make-directory*',
u'make-do-sequence', u'make-empty-namespace',
u'make-environment-variables', u'make-ephemeron', u'make-exn',
u'make-exn:break', u'make-exn:break:hang-up', u'make-exn:break:terminate',
u'make-exn:fail', u'make-exn:fail:contract',
u'make-exn:fail:contract:arity', u'make-exn:fail:contract:blame',
u'make-exn:fail:contract:continuation',
u'make-exn:fail:contract:divide-by-zero',
u'make-exn:fail:contract:non-fixnum-result',
u'make-exn:fail:contract:variable', u'make-exn:fail:filesystem',
u'make-exn:fail:filesystem:errno', u'make-exn:fail:filesystem:exists',
u'make-exn:fail:filesystem:missing-module',
u'make-exn:fail:filesystem:version', u'make-exn:fail:network',
u'make-exn:fail:network:errno', u'make-exn:fail:object',
u'make-exn:fail:out-of-memory', u'make-exn:fail:read',
u'make-exn:fail:read:eof', u'make-exn:fail:read:non-char',
u'make-exn:fail:syntax', u'make-exn:fail:syntax:missing-module',
u'make-exn:fail:syntax:unbound', u'make-exn:fail:unsupported',
u'make-exn:fail:user', u'make-file-or-directory-link',
u'make-flat-contract', u'make-fsemaphore', u'make-generic',
u'make-handle-get-preference-locked', u'make-hash',
u'make-hash-placeholder', u'make-hasheq', u'make-hasheq-placeholder',
u'make-hasheqv', u'make-hasheqv-placeholder',
u'make-immutable-custom-hash', u'make-immutable-hash',
u'make-immutable-hasheq', u'make-immutable-hasheqv',
u'make-impersonator-property', u'make-input-port',
u'make-input-port/read-to-peek', u'make-inspector',
u'make-keyword-procedure', u'make-known-char-range-list',
u'make-limited-input-port', u'make-list', u'make-lock-file-name',
u'make-log-receiver', u'make-logger', u'make-mixin-contract',
u'make-mutable-custom-set', u'make-none/c', u'make-object',
u'make-output-port', u'make-parameter', u'make-parent-directory*',
u'make-phantom-bytes', u'make-pipe', u'make-pipe-with-specials',
u'make-placeholder', u'make-plumber', u'make-polar', u'make-prefab-struct',
u'make-primitive-class', u'make-proj-contract',
u'make-pseudo-random-generator', u'make-reader-graph', u'make-readtable',
u'make-rectangular', u'make-rename-transformer',
u'make-resolved-module-path', u'make-security-guard', u'make-semaphore',
u'make-set!-transformer', u'make-shared-bytes', u'make-sibling-inspector',
u'make-special-comment', u'make-srcloc', u'make-string',
u'make-struct-field-accessor', u'make-struct-field-mutator',
u'make-struct-type', u'make-struct-type-property',
u'make-syntax-delta-introducer', u'make-syntax-introducer',
u'make-temporary-file', u'make-tentative-pretty-print-output-port',
u'make-thread-cell', u'make-thread-group', u'make-vector',
u'make-weak-box', u'make-weak-custom-hash', u'make-weak-custom-set',
u'make-weak-hash', u'make-weak-hasheq', u'make-weak-hasheqv',
u'make-will-executor', u'map', u'match-equality-test',
u'matches-arity-exactly?', u'max', u'mcar', u'mcdr', u'mcons', u'member',
u'member-name-key-hash-code', u'member-name-key=?', u'member-name-key?',
u'memf', u'memq', u'memv', u'merge-input', u'method-in-interface?', u'min',
u'mixin-contract', u'module->exports', u'module->imports',
u'module->language-info', u'module->namespace',
u'module-compiled-cross-phase-persistent?', u'module-compiled-exports',
u'module-compiled-imports', u'module-compiled-language-info',
u'module-compiled-name', u'module-compiled-submodules',
u'module-declared?', u'module-path-index-join',
u'module-path-index-resolve', u'module-path-index-split',
u'module-path-index-submodule', u'module-path-index?', u'module-path?',
u'module-predefined?', u'module-provide-protected?', u'modulo', u'mpair?',
u'mutable-set', u'mutable-seteq', u'mutable-seteqv', u'n->th',
u'nack-guard-evt', u'namespace-anchor->empty-namespace',
u'namespace-anchor->namespace', u'namespace-anchor?',
u'namespace-attach-module', u'namespace-attach-module-declaration',
u'namespace-base-phase', u'namespace-mapped-symbols',
u'namespace-module-identifier', u'namespace-module-registry',
u'namespace-require', u'namespace-require/constant',
u'namespace-require/copy', u'namespace-require/expansion-time',
u'namespace-set-variable-value!', u'namespace-symbol->identifier',
u'namespace-syntax-introduce', u'namespace-undefine-variable!',
u'namespace-unprotect-module', u'namespace-variable-value', u'namespace?',
u'nan?', u'natural-number/c', u'negate', u'negative?', u'never-evt',
u'new-∀/c', u'new-∃/c', u'newline', u'ninth', u'non-empty-listof',
u'non-empty-string?', u'none/c', u'normal-case-path', u'normalize-arity',
u'normalize-path', u'normalized-arity?', u'not', u'not/c', u'null', u'null?',
u'number->string', u'number?', u'numerator', u'object%', u'object->vector',
u'object-info', u'object-interface', u'object-method-arity-includes?',
u'object-name', u'object-or-false=?', u'object=?', u'object?', u'odd?',
u'one-of/c', u'open-input-bytes', u'open-input-file',
u'open-input-output-file', u'open-input-string', u'open-output-bytes',
u'open-output-file', u'open-output-nowhere', u'open-output-string',
u'or/c', u'order-of-magnitude', u'ormap', u'other-execute-bit',
u'other-read-bit', u'other-write-bit', u'output-port?', u'pair?',
u'parameter-procedure=?', u'parameter/c', u'parameter?',
u'parameterization?', u'parse-command-line', u'partition', u'path->bytes',
u'path->complete-path', u'path->directory-path', u'path->string',
u'path-add-suffix', u'path-convention-type', u'path-element->bytes',
u'path-element->string', u'path-element?', u'path-for-some-system?',
u'path-list-string->path-list', u'path-only', u'path-replace-suffix',
u'path-string?', u'path<?', u'path?', u'pathlist-closure', u'peek-byte',
u'peek-byte-or-special', u'peek-bytes', u'peek-bytes!', u'peek-bytes!-evt',
u'peek-bytes-avail!', u'peek-bytes-avail!*', u'peek-bytes-avail!-evt',
u'peek-bytes-avail!/enable-break', u'peek-bytes-evt', u'peek-char',
u'peek-char-or-special', u'peek-string', u'peek-string!',
u'peek-string!-evt', u'peek-string-evt', u'peeking-input-port',
u'permutations', u'phantom-bytes?', u'pi', u'pi.f', u'pipe-content-length',
u'place-break', u'place-channel', u'place-channel-get',
u'place-channel-put', u'place-channel-put/get', u'place-channel?',
u'place-dead-evt', u'place-enabled?', u'place-kill', u'place-location?',
u'place-message-allowed?', u'place-sleep', u'place-wait', u'place?',
u'placeholder-get', u'placeholder-set!', u'placeholder?',
u'plumber-add-flush!', u'plumber-flush-all',
u'plumber-flush-handle-remove!', u'plumber-flush-handle?', u'plumber?',
u'poll-guard-evt', u'port->bytes', u'port->bytes-lines', u'port->lines',
u'port->list', u'port->string', u'port-closed-evt', u'port-closed?',
u'port-commit-peeked', u'port-count-lines!', u'port-count-lines-enabled',
u'port-counts-lines?', u'port-display-handler', u'port-file-identity',
u'port-file-unlock', u'port-next-location', u'port-number?',
u'port-print-handler', u'port-progress-evt',
u'port-provides-progress-evts?', u'port-read-handler',
u'port-try-file-lock?', u'port-write-handler', u'port-writes-atomic?',
u'port-writes-special?', u'port?', u'positive?', u'predicate/c',
u'prefab-key->struct-type', u'prefab-key?', u'prefab-struct-key',
u'preferences-lock-file-mode', u'pregexp', u'pregexp?', u'pretty-display',
u'pretty-format', u'pretty-print', u'pretty-print-.-symbol-without-bars',
u'pretty-print-abbreviate-read-macros', u'pretty-print-columns',
u'pretty-print-current-style-table', u'pretty-print-depth',
u'pretty-print-exact-as-decimal', u'pretty-print-extend-style-table',
u'pretty-print-handler', u'pretty-print-newline',
u'pretty-print-post-print-hook', u'pretty-print-pre-print-hook',
u'pretty-print-print-hook', u'pretty-print-print-line',
u'pretty-print-remap-stylable', u'pretty-print-show-inexactness',
u'pretty-print-size-hook', u'pretty-print-style-table?',
u'pretty-printing', u'pretty-write', u'primitive-closure?',
u'primitive-result-arity', u'primitive?', u'print', u'print-as-expression',
u'print-boolean-long-form', u'print-box', u'print-graph',
u'print-hash-table', u'print-mpair-curly-braces',
u'print-pair-curly-braces', u'print-reader-abbreviations',
u'print-struct', u'print-syntax-width', u'print-unreadable',
u'print-vector-length', u'printable/c', u'printable<%>', u'printf',
u'println', u'procedure->method', u'procedure-arity',
u'procedure-arity-includes/c', u'procedure-arity-includes?',
u'procedure-arity?', u'procedure-closure-contents-eq?',
u'procedure-extract-target', u'procedure-keywords',
u'procedure-reduce-arity', u'procedure-reduce-keyword-arity',
u'procedure-rename', u'procedure-result-arity', u'procedure-specialize',
u'procedure-struct-type?', u'procedure?', u'process', u'process*',
u'process*/ports', u'process/ports', u'processor-count', u'progress-evt?',
u'promise-forced?', u'promise-running?', u'promise/c', u'promise/name?',
u'promise?', u'prop:arity-string', u'prop:arrow-contract',
u'prop:arrow-contract-get-info', u'prop:arrow-contract?', u'prop:blame',
u'prop:chaperone-contract', u'prop:checked-procedure', u'prop:contract',
u'prop:contracted', u'prop:custom-print-quotable', u'prop:custom-write',
u'prop:dict', u'prop:dict/contract', u'prop:equal+hash', u'prop:evt',
u'prop:exn:missing-module', u'prop:exn:srclocs',
u'prop:expansion-contexts', u'prop:flat-contract',
u'prop:impersonator-of', u'prop:input-port',
u'prop:liberal-define-context', u'prop:object-name',
u'prop:opt-chaperone-contract', u'prop:opt-chaperone-contract-get-test',
u'prop:opt-chaperone-contract?', u'prop:orc-contract',
u'prop:orc-contract-get-subcontracts', u'prop:orc-contract?',
u'prop:output-port', u'prop:place-location', u'prop:procedure',
u'prop:recursive-contract', u'prop:recursive-contract-unroll',
u'prop:recursive-contract?', u'prop:rename-transformer', u'prop:sequence',
u'prop:set!-transformer', u'prop:stream', u'proper-subset?',
u'pseudo-random-generator->vector', u'pseudo-random-generator-vector?',
u'pseudo-random-generator?', u'put-preferences', u'putenv', u'quotient',
u'quotient/remainder', u'radians->degrees', u'raise',
u'raise-argument-error', u'raise-arguments-error', u'raise-arity-error',
u'raise-blame-error', u'raise-contract-error', u'raise-mismatch-error',
u'raise-not-cons-blame-error', u'raise-range-error',
u'raise-result-error', u'raise-syntax-error', u'raise-type-error',
u'raise-user-error', u'random', u'random-seed', u'range', u'rational?',
u'rationalize', u'read', u'read-accept-bar-quote', u'read-accept-box',
u'read-accept-compiled', u'read-accept-dot', u'read-accept-graph',
u'read-accept-infix-dot', u'read-accept-lang', u'read-accept-quasiquote',
u'read-accept-reader', u'read-byte', u'read-byte-or-special',
u'read-bytes', u'read-bytes!', u'read-bytes!-evt', u'read-bytes-avail!',
u'read-bytes-avail!*', u'read-bytes-avail!-evt',
u'read-bytes-avail!/enable-break', u'read-bytes-evt', u'read-bytes-line',
u'read-bytes-line-evt', u'read-case-sensitive', u'read-cdot', u'read-char',
u'read-char-or-special', u'read-curly-brace-as-paren',
u'read-curly-brace-with-tag', u'read-decimal-as-inexact',
u'read-eval-print-loop', u'read-language', u'read-line', u'read-line-evt',
u'read-on-demand-source', u'read-square-bracket-as-paren',
u'read-square-bracket-with-tag', u'read-string', u'read-string!',
u'read-string!-evt', u'read-string-evt', u'read-syntax',
u'read-syntax/recursive', u'read/recursive', u'readtable-mapping',
u'readtable?', u'real->decimal-string', u'real->double-flonum',
u'real->floating-point-bytes', u'real->single-flonum', u'real-in',
u'real-part', u'real?', u'reencode-input-port', u'reencode-output-port',
u'regexp', u'regexp-match', u'regexp-match*', u'regexp-match-evt',
u'regexp-match-exact?', u'regexp-match-peek',
u'regexp-match-peek-immediate', u'regexp-match-peek-positions',
u'regexp-match-peek-positions*',
u'regexp-match-peek-positions-immediate',
u'regexp-match-peek-positions-immediate/end',
u'regexp-match-peek-positions/end', u'regexp-match-positions',
u'regexp-match-positions*', u'regexp-match-positions/end',
u'regexp-match/end', u'regexp-match?', u'regexp-max-lookbehind',
u'regexp-quote', u'regexp-replace', u'regexp-replace*',
u'regexp-replace-quote', u'regexp-replaces', u'regexp-split',
u'regexp-try-match', u'regexp?', u'relative-path?', u'relocate-input-port',
u'relocate-output-port', u'remainder', u'remf', u'remf*', u'remove',
u'remove*', u'remove-duplicates', u'remq', u'remq*', u'remv', u'remv*',
u'rename-contract', u'rename-file-or-directory',
u'rename-transformer-target', u'rename-transformer?', u'replace-evt',
u'reroot-path', u'resolve-path', u'resolved-module-path-name',
u'resolved-module-path?', u'rest', u'reverse', u'round', u'second',
u'seconds->date', u'security-guard?', u'semaphore-peek-evt',
u'semaphore-peek-evt?', u'semaphore-post', u'semaphore-try-wait?',
u'semaphore-wait', u'semaphore-wait/enable-break', u'semaphore?',
u'sequence->list', u'sequence->stream', u'sequence-add-between',
u'sequence-andmap', u'sequence-append', u'sequence-count',
u'sequence-filter', u'sequence-fold', u'sequence-for-each',
u'sequence-generate', u'sequence-generate*', u'sequence-length',
u'sequence-map', u'sequence-ormap', u'sequence-ref', u'sequence-tail',
u'sequence/c', u'sequence?', u'set', u'set!-transformer-procedure',
u'set!-transformer?', u'set->list', u'set->stream', u'set-add', u'set-add!',
u'set-box!', u'set-clear', u'set-clear!', u'set-copy', u'set-copy-clear',
u'set-count', u'set-empty?', u'set-eq?', u'set-equal?', u'set-eqv?',
u'set-first', u'set-for-each', u'set-implements/c', u'set-implements?',
u'set-intersect', u'set-intersect!', u'set-map', u'set-mcar!', u'set-mcdr!',
u'set-member?', u'set-mutable?', u'set-phantom-bytes!',
u'set-port-next-location!', u'set-remove', u'set-remove!', u'set-rest',
u'set-some-basic-contracts!', u'set-subtract', u'set-subtract!',
u'set-symmetric-difference', u'set-symmetric-difference!', u'set-union',
u'set-union!', u'set-weak?', u'set/c', u'set=?', u'set?', u'seteq', u'seteqv',
u'seventh', u'sgn', u'shared-bytes', u'shell-execute', u'shrink-path-wrt',
u'shuffle', u'simple-form-path', u'simplify-path', u'sin',
u'single-flonum?', u'sinh', u'sixth', u'skip-projection-wrapper?', u'sleep',
u'some-system-path->string', u'sort', u'special-comment-value',
u'special-comment?', u'special-filter-input-port', u'split-at',
u'split-at-right', u'split-common-prefix', u'split-path', u'splitf-at',
u'splitf-at-right', u'sqr', u'sqrt', u'srcloc', u'srcloc->string',
u'srcloc-column', u'srcloc-line', u'srcloc-position', u'srcloc-source',
u'srcloc-span', u'srcloc?', u'stop-after', u'stop-before', u'stream->list',
u'stream-add-between', u'stream-andmap', u'stream-append', u'stream-count',
u'stream-empty?', u'stream-filter', u'stream-first', u'stream-fold',
u'stream-for-each', u'stream-length', u'stream-map', u'stream-ormap',
u'stream-ref', u'stream-rest', u'stream-tail', u'stream/c', u'stream?',
u'string', u'string->bytes/latin-1', u'string->bytes/locale',
u'string->bytes/utf-8', u'string->immutable-string', u'string->keyword',
u'string->list', u'string->number', u'string->path',
u'string->path-element', u'string->some-system-path', u'string->symbol',
u'string->uninterned-symbol', u'string->unreadable-symbol',
u'string-append', u'string-append*', u'string-ci<=?', u'string-ci<?',
u'string-ci=?', u'string-ci>=?', u'string-ci>?', u'string-contains?',
u'string-copy', u'string-copy!', u'string-downcase',
u'string-environment-variable-name?', u'string-fill!', u'string-foldcase',
u'string-join', u'string-len/c', u'string-length', u'string-locale-ci<?',
u'string-locale-ci=?', u'string-locale-ci>?', u'string-locale-downcase',
u'string-locale-upcase', u'string-locale<?', u'string-locale=?',
u'string-locale>?', u'string-no-nuls?', u'string-normalize-nfc',
u'string-normalize-nfd', u'string-normalize-nfkc',
u'string-normalize-nfkd', u'string-normalize-spaces', u'string-port?',
u'string-prefix?', u'string-ref', u'string-replace', u'string-set!',
u'string-split', u'string-suffix?', u'string-titlecase', u'string-trim',
u'string-upcase', u'string-utf-8-length', u'string<=?', u'string<?',
u'string=?', u'string>=?', u'string>?', u'string?', u'struct->vector',
u'struct-accessor-procedure?', u'struct-constructor-procedure?',
u'struct-info', u'struct-mutator-procedure?',
u'struct-predicate-procedure?', u'struct-type-info',
u'struct-type-make-constructor', u'struct-type-make-predicate',
u'struct-type-property-accessor-procedure?', u'struct-type-property/c',
u'struct-type-property?', u'struct-type?', u'struct:arity-at-least',
u'struct:arrow-contract-info', u'struct:date', u'struct:date*',
u'struct:exn', u'struct:exn:break', u'struct:exn:break:hang-up',
u'struct:exn:break:terminate', u'struct:exn:fail',
u'struct:exn:fail:contract', u'struct:exn:fail:contract:arity',
u'struct:exn:fail:contract:blame',
u'struct:exn:fail:contract:continuation',
u'struct:exn:fail:contract:divide-by-zero',
u'struct:exn:fail:contract:non-fixnum-result',
u'struct:exn:fail:contract:variable', u'struct:exn:fail:filesystem',
u'struct:exn:fail:filesystem:errno',
u'struct:exn:fail:filesystem:exists',
u'struct:exn:fail:filesystem:missing-module',
u'struct:exn:fail:filesystem:version', u'struct:exn:fail:network',
u'struct:exn:fail:network:errno', u'struct:exn:fail:object',
u'struct:exn:fail:out-of-memory', u'struct:exn:fail:read',
u'struct:exn:fail:read:eof', u'struct:exn:fail:read:non-char',
u'struct:exn:fail:syntax', u'struct:exn:fail:syntax:missing-module',
u'struct:exn:fail:syntax:unbound', u'struct:exn:fail:unsupported',
u'struct:exn:fail:user', u'struct:srcloc',
u'struct:wrapped-extra-arg-arrow', u'struct?', u'sub1', u'subbytes',
u'subclass?', u'subclass?/c', u'subprocess', u'subprocess-group-enabled',
u'subprocess-kill', u'subprocess-pid', u'subprocess-status',
u'subprocess-wait', u'subprocess?', u'subset?', u'substring', u'suggest/c',
u'symbol->string', u'symbol-interned?', u'symbol-unreadable?', u'symbol<?',
u'symbol=?', u'symbol?', u'symbols', u'sync', u'sync/enable-break',
u'sync/timeout', u'sync/timeout/enable-break', u'syntax->datum',
u'syntax->list', u'syntax-arm', u'syntax-column', u'syntax-debug-info',
u'syntax-disarm', u'syntax-e', u'syntax-line',
u'syntax-local-bind-syntaxes', u'syntax-local-certifier',
u'syntax-local-context', u'syntax-local-expand-expression',
u'syntax-local-get-shadower', u'syntax-local-identifier-as-binding',
u'syntax-local-introduce', u'syntax-local-lift-context',
u'syntax-local-lift-expression', u'syntax-local-lift-module',
u'syntax-local-lift-module-end-declaration',
u'syntax-local-lift-provide', u'syntax-local-lift-require',
u'syntax-local-lift-values-expression',
u'syntax-local-make-definition-context',
u'syntax-local-make-delta-introducer',
u'syntax-local-module-defined-identifiers',
u'syntax-local-module-exports',
u'syntax-local-module-required-identifiers', u'syntax-local-name',
u'syntax-local-phase-level', u'syntax-local-submodules',
u'syntax-local-transforming-module-provides?', u'syntax-local-value',
u'syntax-local-value/immediate', u'syntax-original?', u'syntax-position',
u'syntax-property', u'syntax-property-preserved?',
u'syntax-property-symbol-keys', u'syntax-protect', u'syntax-rearm',
u'syntax-recertify', u'syntax-shift-phase-level', u'syntax-source',
u'syntax-source-module', u'syntax-span', u'syntax-taint',
u'syntax-tainted?', u'syntax-track-origin',
u'syntax-transforming-module-expression?',
u'syntax-transforming-with-lifts?', u'syntax-transforming?', u'syntax/c',
u'syntax?', u'system', u'system*', u'system*/exit-code',
u'system-big-endian?', u'system-idle-evt', u'system-language+country',
u'system-library-subpath', u'system-path-convention-type', u'system-type',
u'system/exit-code', u'tail-marks-match?', u'take', u'take-common-prefix',
u'take-right', u'takef', u'takef-right', u'tan', u'tanh',
u'tcp-abandon-port', u'tcp-accept', u'tcp-accept-evt',
u'tcp-accept-ready?', u'tcp-accept/enable-break', u'tcp-addresses',
u'tcp-close', u'tcp-connect', u'tcp-connect/enable-break', u'tcp-listen',
u'tcp-listener?', u'tcp-port?', u'tentative-pretty-print-port-cancel',
u'tentative-pretty-print-port-transfer', u'tenth', u'terminal-port?',
u'the-unsupplied-arg', u'third', u'thread', u'thread-cell-ref',
u'thread-cell-set!', u'thread-cell-values?', u'thread-cell?',
u'thread-dead-evt', u'thread-dead?', u'thread-group?', u'thread-receive',
u'thread-receive-evt', u'thread-resume', u'thread-resume-evt',
u'thread-rewind-receive', u'thread-running?', u'thread-send',
u'thread-suspend', u'thread-suspend-evt', u'thread-try-receive',
u'thread-wait', u'thread/suspend-to-kill', u'thread?', u'time-apply',
u'touch', u'transplant-input-port', u'transplant-output-port', u'true',
u'truncate', u'udp-addresses', u'udp-bind!', u'udp-bound?', u'udp-close',
u'udp-connect!', u'udp-connected?', u'udp-multicast-interface',
u'udp-multicast-join-group!', u'udp-multicast-leave-group!',
u'udp-multicast-loopback?', u'udp-multicast-set-interface!',
u'udp-multicast-set-loopback!', u'udp-multicast-set-ttl!',
u'udp-multicast-ttl', u'udp-open-socket', u'udp-receive!',
u'udp-receive!*', u'udp-receive!-evt', u'udp-receive!/enable-break',
u'udp-receive-ready-evt', u'udp-send', u'udp-send*', u'udp-send-evt',
u'udp-send-ready-evt', u'udp-send-to', u'udp-send-to*', u'udp-send-to-evt',
u'udp-send-to/enable-break', u'udp-send/enable-break', u'udp?', u'unbox',
u'uncaught-exception-handler', u'unit?', u'unspecified-dom',
u'unsupplied-arg?', u'use-collection-link-paths',
u'use-compiled-file-paths', u'use-user-specific-search-paths',
u'user-execute-bit', u'user-read-bit', u'user-write-bit', u'value-blame',
u'value-contract', u'values', u'variable-reference->empty-namespace',
u'variable-reference->module-base-phase',
u'variable-reference->module-declaration-inspector',
u'variable-reference->module-path-index',
u'variable-reference->module-source', u'variable-reference->namespace',
u'variable-reference->phase',
u'variable-reference->resolved-module-path',
u'variable-reference-constant?', u'variable-reference?', u'vector',
u'vector->immutable-vector', u'vector->list',
u'vector->pseudo-random-generator', u'vector->pseudo-random-generator!',
u'vector->values', u'vector-append', u'vector-argmax', u'vector-argmin',
u'vector-copy', u'vector-copy!', u'vector-count', u'vector-drop',
u'vector-drop-right', u'vector-fill!', u'vector-filter',
u'vector-filter-not', u'vector-immutable', u'vector-immutable/c',
u'vector-immutableof', u'vector-length', u'vector-map', u'vector-map!',
u'vector-member', u'vector-memq', u'vector-memv', u'vector-ref',
u'vector-set!', u'vector-set*!', u'vector-set-performance-stats!',
u'vector-split-at', u'vector-split-at-right', u'vector-take',
u'vector-take-right', u'vector/c', u'vector?', u'vectorof', u'version',
u'void', u'void?', u'weak-box-value', u'weak-box?', u'weak-set',
u'weak-seteq', u'weak-seteqv', u'will-execute', u'will-executor?',
u'will-register', u'will-try-execute', u'with-input-from-bytes',
u'with-input-from-file', u'with-input-from-string',
u'with-output-to-bytes', u'with-output-to-file', u'with-output-to-string',
u'would-be-future', u'wrap-evt', u'wrapped-extra-arg-arrow',
u'wrapped-extra-arg-arrow-extra-neg-party-argument',
u'wrapped-extra-arg-arrow-real-func', u'wrapped-extra-arg-arrow?',
u'writable<%>', u'write', u'write-byte', u'write-bytes',
u'write-bytes-avail', u'write-bytes-avail*', u'write-bytes-avail-evt',
u'write-bytes-avail/enable-break', u'write-char', u'write-special',
u'write-special-avail*', u'write-special-evt', u'write-string',
u'write-to-file', u'writeln', u'xor', u'zero?', u'~.a', u'~.s', u'~.v', u'~a',
u'~e', u'~r', u'~s', u'~v'
)
_opening_parenthesis = r'[([{]'
_closing_parenthesis = r'[)\]}]'
_delimiters = r'()[\]{}",\'`;\s'
_symbol = r'(?:\|[^|]*\||\\[\w\W]|[^|\\%s]+)+' % _delimiters
_exact_decimal_prefix = r'(?:#e)?(?:#d)?(?:#e)?'
_exponent = r'(?:[defls][-+]?\d+)'
_inexact_simple_no_hashes = r'(?:\d+(?:/\d+|\.\d*)?|\.\d+)'
_inexact_simple = (r'(?:%s|(?:\d+#+(?:\.#*|/\d+#*)?|\.\d+#+|'
r'\d+(?:\.\d*#+|/\d+#+)))' % _inexact_simple_no_hashes)
_inexact_normal_no_hashes = r'(?:%s%s?)' % (_inexact_simple_no_hashes,
_exponent)
_inexact_normal = r'(?:%s%s?)' % (_inexact_simple, _exponent)
_inexact_special = r'(?:(?:inf|nan)\.[0f])'
_inexact_real = r'(?:[-+]?%s|[-+]%s)' % (_inexact_normal,
_inexact_special)
_inexact_unsigned = r'(?:%s|%s)' % (_inexact_normal, _inexact_special)
tokens = {
'root': [
(_closing_parenthesis, Error),
(r'(?!\Z)', Text, 'unquoted-datum')
],
'datum': [
(r'(?s)#;|#*', Comment),
(u';[^\\n\\r\x85\u2028\u2029]*', Comment.Single),
(r'#\|', Comment.Multiline, 'block-comment'),
# Whitespaces
(r'(?u)\s+', Text),
# Numbers: Keep in mind Racket reader hash prefixes, which
# can denote the base or the type. These don't map neatly
# onto Pygments token types; some judgment calls here.
# #d or no prefix
(r'(?i)%s[-+]?\d+(?=[%s])' % (_exact_decimal_prefix, _delimiters),
Number.Integer, '#pop'),
(r'(?i)%s[-+]?(\d+(\.\d*)?|\.\d+)([deflst][-+]?\d+)?(?=[%s])' %
(_exact_decimal_prefix, _delimiters), Number.Float, '#pop'),
(r'(?i)%s[-+]?(%s([-+]%s?i)?|[-+]%s?i)(?=[%s])' %
(_exact_decimal_prefix, _inexact_normal_no_hashes,
_inexact_normal_no_hashes, _inexact_normal_no_hashes,
_delimiters), Number, '#pop'),
# Inexact without explicit #i
(r'(?i)(#d)?(%s([-+]%s?i)?|[-+]%s?i|%s@%s)(?=[%s])' %
(_inexact_real, _inexact_unsigned, _inexact_unsigned,
_inexact_real, _inexact_real, _delimiters), Number.Float,
'#pop'),
# The remaining extflonums
(r'(?i)(([-+]?%st[-+]?\d+)|[-+](inf|nan)\.t)(?=[%s])' %
(_inexact_simple, _delimiters), Number.Float, '#pop'),
# #b
(r'(?iu)(#[ei])?#b%s' % _symbol, Number.Bin, '#pop'),
# #o
(r'(?iu)(#[ei])?#o%s' % _symbol, Number.Oct, '#pop'),
# #x
(r'(?iu)(#[ei])?#x%s' % _symbol, Number.Hex, '#pop'),
# #i is always inexact, i.e. float
(r'(?iu)(#d)?#i%s' % _symbol, Number.Float, '#pop'),
# Strings and characters
(r'#?"', String.Double, ('#pop', 'string')),
(r'#<<(.+)\n(^(?!\1$).*$\n)*^\1$', String.Heredoc, '#pop'),
(r'#\\(u[\da-fA-F]{1,4}|U[\da-fA-F]{1,8})', String.Char, '#pop'),
(r'(?is)#\\([0-7]{3}|[a-z]+|.)', String.Char, '#pop'),
(r'(?s)#[pr]x#?"(\\?.)*?"', String.Regex, '#pop'),
# Constants
(r'#(true|false|[tTfF])', Name.Constant, '#pop'),
# Keyword argument names (e.g. #:keyword)
(r'(?u)#:%s' % _symbol, Keyword.Declaration, '#pop'),
# Reader extensions
(r'(#lang |#!)(\S+)',
bygroups(Keyword.Namespace, Name.Namespace)),
(r'#reader', Keyword.Namespace, 'quoted-datum'),
# Other syntax
(r"(?i)\.(?=[%s])|#c[is]|#['`]|#,@?" % _delimiters, Operator),
(r"'|#[s&]|#hash(eqv?)?|#\d*(?=%s)" % _opening_parenthesis,
Operator, ('#pop', 'quoted-datum'))
],
'datum*': [
(r'`|,@?', Operator),
(_symbol, String.Symbol, '#pop'),
(r'[|\\]', Error),
default('#pop')
],
'list': [
(_closing_parenthesis, Punctuation, '#pop')
],
'unquoted-datum': [
include('datum'),
(r'quote(?=[%s])' % _delimiters, Keyword,
('#pop', 'quoted-datum')),
(r'`', Operator, ('#pop', 'quasiquoted-datum')),
(r'quasiquote(?=[%s])' % _delimiters, Keyword,
('#pop', 'quasiquoted-datum')),
(_opening_parenthesis, Punctuation, ('#pop', 'unquoted-list')),
(words(_keywords, prefix='(?u)', suffix='(?=[%s])' % _delimiters),
Keyword, '#pop'),
(words(_builtins, prefix='(?u)', suffix='(?=[%s])' % _delimiters),
Name.Builtin, '#pop'),
(_symbol, Name, '#pop'),
include('datum*')
],
'unquoted-list': [
include('list'),
(r'(?!\Z)', Text, 'unquoted-datum')
],
'quasiquoted-datum': [
include('datum'),
(r',@?', Operator, ('#pop', 'unquoted-datum')),
(r'unquote(-splicing)?(?=[%s])' % _delimiters, Keyword,
('#pop', 'unquoted-datum')),
(_opening_parenthesis, Punctuation, ('#pop', 'quasiquoted-list')),
include('datum*')
],
'quasiquoted-list': [
include('list'),
(r'(?!\Z)', Text, 'quasiquoted-datum')
],
'quoted-datum': [
include('datum'),
(_opening_parenthesis, Punctuation, ('#pop', 'quoted-list')),
include('datum*')
],
'quoted-list': [
include('list'),
(r'(?!\Z)', Text, 'quoted-datum')
],
'block-comment': [
(r'#\|', Comment.Multiline, '#push'),
(r'\|#', Comment.Multiline, '#pop'),
(r'[^#|]+|.', Comment.Multiline)
],
'string': [
(r'"', String.Double, '#pop'),
(r'(?s)\\([0-7]{1,3}|x[\da-fA-F]{1,2}|u[\da-fA-F]{1,4}|'
r'U[\da-fA-F]{1,8}|.)', String.Escape),
(r'[^\\"]+', String.Double)
]
}
class NewLispLexer(RegexLexer):
"""
For `newLISP. <http://www.newlisp.org/>`_ source code (version 10.3.0).
.. versionadded:: 1.5
"""
name = 'NewLisp'
aliases = ['newlisp']
filenames = ['*.lsp', '*.nl', '*.kif']
mimetypes = ['text/x-newlisp', 'application/x-newlisp']
flags = re.IGNORECASE | re.MULTILINE | re.UNICODE
# list of built-in functions for newLISP version 10.3
builtins = (
'^', '--', '-', ':', '!', '!=', '?', '@', '*', '/', '&', '%', '+', '++',
'<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10',
'$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7',
'$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs',
'acos', 'acosh', 'add', 'address', 'amb', 'and', 'append-file',
'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin',
'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec',
'base64-enc', 'bayes-query', 'bayes-train', 'begin',
'beta', 'betai', 'bind', 'binomial', 'bits', 'callback',
'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean',
'close', 'command-event', 'cond', 'cons', 'constant',
'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count',
'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry',
'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec',
'def-new', 'default', 'define-macro', 'define',
'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device',
'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while',
'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup',
'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event',
'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand',
'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter',
'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt',
'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln',
'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string',
'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc',
'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert',
'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error',
'last', 'legal?', 'length', 'let', 'letex', 'letn',
'list?', 'list', 'load', 'local', 'log', 'lookup',
'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat',
'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply',
'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error',
'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local',
'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping',
'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select',
'net-send-to', 'net-send-udp', 'net-send', 'net-service',
'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper',
'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack',
'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop',
'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print',
'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event',
'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand',
'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file',
'read-key', 'read-line', 'read-utf8', 'reader-event',
'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex',
'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse',
'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self',
'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all',
'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent',
'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt',
'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?',
'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term',
'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case',
'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?',
'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until',
'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while',
'write', 'write-char', 'write-file', 'write-line',
'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?',
)
# valid names
valid_name = r'([\w!$%&*+.,/<=>?@^~|-])+|(\[.*?\])+'
tokens = {
'root': [
# shebang
(r'#!(.*?)$', Comment.Preproc),
# comments starting with semicolon
(r';.*$', Comment.Single),
# comments starting with #
(r'#.*$', Comment.Single),
# whitespace
(r'\s+', Text),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
# braces
(r'\{', String, "bracestring"),
# [text] ... [/text] delimited strings
(r'\[text\]*', String, "tagstring"),
# 'special' operators...
(r"('|:)", Operator),
# highlight the builtins
(words(builtins, suffix=r'\b'),
Keyword),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Variable),
# the remaining variables
(valid_name, String.Symbol),
# parentheses
(r'(\(|\))', Punctuation),
],
# braced strings...
'bracestring': [
(r'\{', String, "#push"),
(r'\}', String, "#pop"),
('[^{}]+', String),
],
# tagged [text]...[/text] delimited strings...
'tagstring': [
(r'(?s)(.*?)(\[/text\])', String, '#pop'),
],
}
class EmacsLispLexer(RegexLexer):
"""
An ELisp lexer, parsing a stream and outputting the tokens
needed to highlight elisp code.
.. versionadded:: 2.1
"""
name = 'EmacsLisp'
aliases = ['emacs', 'elisp', 'emacs-lisp']
filenames = ['*.el']
mimetypes = ['text/x-elisp', 'application/x-elisp']
flags = re.MULTILINE
# couple of useful regexes
# characters that are not macro-characters and can be used to begin a symbol
nonmacro = r'\\.|[\w!$%&*+-/<=>?@^{}~|]'
constituent = nonmacro + '|[#.:]'
terminated = r'(?=[ "()\]\'\n,;`])' # whitespace or terminating macro characters
# symbol token, reverse-engineered from hyperspec
# Take a deep breath...
symbol = r'((?:%s)(?:%s)*)' % (nonmacro, constituent)
macros = set((
'atomic-change-group', 'case', 'block', 'cl-block', 'cl-callf', 'cl-callf2',
'cl-case', 'cl-decf', 'cl-declaim', 'cl-declare',
'cl-define-compiler-macro', 'cl-defmacro', 'cl-defstruct',
'cl-defsubst', 'cl-deftype', 'cl-defun', 'cl-destructuring-bind',
'cl-do', 'cl-do*', 'cl-do-all-symbols', 'cl-do-symbols', 'cl-dolist',
'cl-dotimes', 'cl-ecase', 'cl-etypecase', 'eval-when', 'cl-eval-when', 'cl-flet',
'cl-flet*', 'cl-function', 'cl-incf', 'cl-labels', 'cl-letf',
'cl-letf*', 'cl-load-time-value', 'cl-locally', 'cl-loop',
'cl-macrolet', 'cl-multiple-value-bind', 'cl-multiple-value-setq',
'cl-progv', 'cl-psetf', 'cl-psetq', 'cl-pushnew', 'cl-remf',
'cl-return', 'cl-return-from', 'cl-rotatef', 'cl-shiftf',
'cl-symbol-macrolet', 'cl-tagbody', 'cl-the', 'cl-typecase',
'combine-after-change-calls', 'condition-case-unless-debug', 'decf',
'declaim', 'declare', 'declare-function', 'def-edebug-spec',
'defadvice', 'defclass', 'defcustom', 'defface', 'defgeneric',
'defgroup', 'define-advice', 'define-alternatives',
'define-compiler-macro', 'define-derived-mode', 'define-generic-mode',
'define-global-minor-mode', 'define-globalized-minor-mode',
'define-minor-mode', 'define-modify-macro',
'define-obsolete-face-alias', 'define-obsolete-function-alias',
'define-obsolete-variable-alias', 'define-setf-expander',
'define-skeleton', 'defmacro', 'defmethod', 'defsetf', 'defstruct',
'defsubst', 'deftheme', 'deftype', 'defun', 'defvar-local',
'delay-mode-hooks', 'destructuring-bind', 'do', 'do*',
'do-all-symbols', 'do-symbols', 'dolist', 'dont-compile', 'dotimes',
'dotimes-with-progress-reporter', 'ecase', 'ert-deftest', 'etypecase',
'eval-and-compile', 'eval-when-compile', 'flet', 'ignore-errors',
'incf', 'labels', 'lambda', 'letrec', 'lexical-let', 'lexical-let*',
'loop', 'multiple-value-bind', 'multiple-value-setq', 'noreturn',
'oref', 'oref-default', 'oset', 'oset-default', 'pcase',
'pcase-defmacro', 'pcase-dolist', 'pcase-exhaustive', 'pcase-let',
'pcase-let*', 'pop', 'psetf', 'psetq', 'push', 'pushnew', 'remf',
'return', 'rotatef', 'rx', 'save-match-data', 'save-selected-window',
'save-window-excursion', 'setf', 'setq-local', 'shiftf',
'track-mouse', 'typecase', 'unless', 'use-package', 'when',
'while-no-input', 'with-case-table', 'with-category-table',
'with-coding-priority', 'with-current-buffer', 'with-demoted-errors',
'with-eval-after-load', 'with-file-modes', 'with-local-quit',
'with-output-to-string', 'with-output-to-temp-buffer',
'with-parsed-tramp-file-name', 'with-selected-frame',
'with-selected-window', 'with-silent-modifications', 'with-slots',
'with-syntax-table', 'with-temp-buffer', 'with-temp-file',
'with-temp-message', 'with-timeout', 'with-tramp-connection-property',
'with-tramp-file-property', 'with-tramp-progress-reporter',
'with-wrapper-hook', 'load-time-value', 'locally', 'macrolet', 'progv',
'return-from',
))
special_forms = set((
'and', 'catch', 'cond', 'condition-case', 'defconst', 'defvar',
'function', 'if', 'interactive', 'let', 'let*', 'or', 'prog1',
'prog2', 'progn', 'quote', 'save-current-buffer', 'save-excursion',
'save-restriction', 'setq', 'setq-default', 'subr-arity',
'unwind-protect', 'while',
))
builtin_function = set((
'%', '*', '+', '-', '/', '/=', '1+', '1-', '<', '<=', '=', '>', '>=',
'Snarf-documentation', 'abort-recursive-edit', 'abs',
'accept-process-output', 'access-file', 'accessible-keymaps', 'acos',
'active-minibuffer-window', 'add-face-text-property',
'add-name-to-file', 'add-text-properties', 'all-completions',
'append', 'apply', 'apropos-internal', 'aref', 'arrayp', 'aset',
'ash', 'asin', 'assoc', 'assoc-string', 'assq', 'atan', 'atom',
'autoload', 'autoload-do-load', 'backtrace', 'backtrace--locals',
'backtrace-debug', 'backtrace-eval', 'backtrace-frame',
'backward-char', 'backward-prefix-chars', 'barf-if-buffer-read-only',
'base64-decode-region', 'base64-decode-string',
'base64-encode-region', 'base64-encode-string', 'beginning-of-line',
'bidi-find-overridden-directionality', 'bidi-resolved-levels',
'bitmap-spec-p', 'bobp', 'bolp', 'bool-vector',
'bool-vector-count-consecutive', 'bool-vector-count-population',
'bool-vector-exclusive-or', 'bool-vector-intersection',
'bool-vector-not', 'bool-vector-p', 'bool-vector-set-difference',
'bool-vector-subsetp', 'bool-vector-union', 'boundp',
'buffer-base-buffer', 'buffer-chars-modified-tick',
'buffer-enable-undo', 'buffer-file-name', 'buffer-has-markers-at',
'buffer-list', 'buffer-live-p', 'buffer-local-value',
'buffer-local-variables', 'buffer-modified-p', 'buffer-modified-tick',
'buffer-name', 'buffer-size', 'buffer-string', 'buffer-substring',
'buffer-substring-no-properties', 'buffer-swap-text', 'bufferp',
'bury-buffer-internal', 'byte-code', 'byte-code-function-p',
'byte-to-position', 'byte-to-string', 'byteorder',
'call-interactively', 'call-last-kbd-macro', 'call-process',
'call-process-region', 'cancel-kbd-macro-events', 'capitalize',
'capitalize-region', 'capitalize-word', 'car', 'car-less-than-car',
'car-safe', 'case-table-p', 'category-docstring',
'category-set-mnemonics', 'category-table', 'category-table-p',
'ccl-execute', 'ccl-execute-on-string', 'ccl-program-p', 'cdr',
'cdr-safe', 'ceiling', 'char-after', 'char-before',
'char-category-set', 'char-charset', 'char-equal', 'char-or-string-p',
'char-resolve-modifiers', 'char-syntax', 'char-table-extra-slot',
'char-table-p', 'char-table-parent', 'char-table-range',
'char-table-subtype', 'char-to-string', 'char-width', 'characterp',
'charset-after', 'charset-id-internal', 'charset-plist',
'charset-priority-list', 'charsetp', 'check-coding-system',
'check-coding-systems-region', 'clear-buffer-auto-save-failure',
'clear-charset-maps', 'clear-face-cache', 'clear-font-cache',
'clear-image-cache', 'clear-string', 'clear-this-command-keys',
'close-font', 'clrhash', 'coding-system-aliases',
'coding-system-base', 'coding-system-eol-type', 'coding-system-p',
'coding-system-plist', 'coding-system-priority-list',
'coding-system-put', 'color-distance', 'color-gray-p',
'color-supported-p', 'combine-after-change-execute',
'command-error-default-function', 'command-remapping', 'commandp',
'compare-buffer-substrings', 'compare-strings',
'compare-window-configurations', 'completing-read',
'compose-region-internal', 'compose-string-internal',
'composition-get-gstring', 'compute-motion', 'concat', 'cons',
'consp', 'constrain-to-field', 'continue-process',
'controlling-tty-p', 'coordinates-in-window-p', 'copy-alist',
'copy-category-table', 'copy-file', 'copy-hash-table', 'copy-keymap',
'copy-marker', 'copy-sequence', 'copy-syntax-table', 'copysign',
'cos', 'current-active-maps', 'current-bidi-paragraph-direction',
'current-buffer', 'current-case-table', 'current-column',
'current-global-map', 'current-idle-time', 'current-indentation',
'current-input-mode', 'current-local-map', 'current-message',
'current-minor-mode-maps', 'current-time', 'current-time-string',
'current-time-zone', 'current-window-configuration',
'cygwin-convert-file-name-from-windows',
'cygwin-convert-file-name-to-windows', 'daemon-initialized',
'daemonp', 'dbus--init-bus', 'dbus-get-unique-name',
'dbus-message-internal', 'debug-timer-check', 'declare-equiv-charset',
'decode-big5-char', 'decode-char', 'decode-coding-region',
'decode-coding-string', 'decode-sjis-char', 'decode-time',
'default-boundp', 'default-file-modes', 'default-printer-name',
'default-toplevel-value', 'default-value', 'define-category',
'define-charset-alias', 'define-charset-internal',
'define-coding-system-alias', 'define-coding-system-internal',
'define-fringe-bitmap', 'define-hash-table-test', 'define-key',
'define-prefix-command', 'delete',
'delete-all-overlays', 'delete-and-extract-region', 'delete-char',
'delete-directory-internal', 'delete-field', 'delete-file',
'delete-frame', 'delete-other-windows-internal', 'delete-overlay',
'delete-process', 'delete-region', 'delete-terminal',
'delete-window-internal', 'delq', 'describe-buffer-bindings',
'describe-vector', 'destroy-fringe-bitmap', 'detect-coding-region',
'detect-coding-string', 'ding', 'directory-file-name',
'directory-files', 'directory-files-and-attributes', 'discard-input',
'display-supports-face-attributes-p', 'do-auto-save', 'documentation',
'documentation-property', 'downcase', 'downcase-region',
'downcase-word', 'draw-string', 'dump-colors', 'dump-emacs',
'dump-face', 'dump-frame-glyph-matrix', 'dump-glyph-matrix',
'dump-glyph-row', 'dump-redisplay-history', 'dump-tool-bar-row',
'elt', 'emacs-pid', 'encode-big5-char', 'encode-char',
'encode-coding-region', 'encode-coding-string', 'encode-sjis-char',
'encode-time', 'end-kbd-macro', 'end-of-line', 'eobp', 'eolp', 'eq',
'eql', 'equal', 'equal-including-properties', 'erase-buffer',
'error-message-string', 'eval', 'eval-buffer', 'eval-region',
'event-convert-list', 'execute-kbd-macro', 'exit-recursive-edit',
'exp', 'expand-file-name', 'expt', 'external-debugging-output',
'face-attribute-relative-p', 'face-attributes-as-vector', 'face-font',
'fboundp', 'fceiling', 'fetch-bytecode', 'ffloor',
'field-beginning', 'field-end', 'field-string',
'field-string-no-properties', 'file-accessible-directory-p',
'file-acl', 'file-attributes', 'file-attributes-lessp',
'file-directory-p', 'file-executable-p', 'file-exists-p',
'file-locked-p', 'file-modes', 'file-name-absolute-p',
'file-name-all-completions', 'file-name-as-directory',
'file-name-completion', 'file-name-directory',
'file-name-nondirectory', 'file-newer-than-file-p', 'file-readable-p',
'file-regular-p', 'file-selinux-context', 'file-symlink-p',
'file-system-info', 'file-system-info', 'file-writable-p',
'fillarray', 'find-charset-region', 'find-charset-string',
'find-coding-systems-region-internal', 'find-composition-internal',
'find-file-name-handler', 'find-font', 'find-operation-coding-system',
'float', 'float-time', 'floatp', 'floor', 'fmakunbound',
'following-char', 'font-at', 'font-drive-otf', 'font-face-attributes',
'font-family-list', 'font-get', 'font-get-glyphs',
'font-get-system-font', 'font-get-system-normal-font', 'font-info',
'font-match-p', 'font-otf-alternates', 'font-put',
'font-shape-gstring', 'font-spec', 'font-variation-glyphs',
'font-xlfd-name', 'fontp', 'fontset-font', 'fontset-info',
'fontset-list', 'fontset-list-all', 'force-mode-line-update',
'force-window-update', 'format', 'format-mode-line',
'format-network-address', 'format-time-string', 'forward-char',
'forward-comment', 'forward-line', 'forward-word',
'frame-border-width', 'frame-bottom-divider-width',
'frame-can-run-window-configuration-change-hook', 'frame-char-height',
'frame-char-width', 'frame-face-alist', 'frame-first-window',
'frame-focus', 'frame-font-cache', 'frame-fringe-width', 'frame-list',
'frame-live-p', 'frame-or-buffer-changed-p', 'frame-parameter',
'frame-parameters', 'frame-pixel-height', 'frame-pixel-width',
'frame-pointer-visible-p', 'frame-right-divider-width',
'frame-root-window', 'frame-scroll-bar-height',
'frame-scroll-bar-width', 'frame-selected-window', 'frame-terminal',
'frame-text-cols', 'frame-text-height', 'frame-text-lines',
'frame-text-width', 'frame-total-cols', 'frame-total-lines',
'frame-visible-p', 'framep', 'frexp', 'fringe-bitmaps-at-pos',
'fround', 'fset', 'ftruncate', 'funcall', 'funcall-interactively',
'function-equal', 'functionp', 'gap-position', 'gap-size',
'garbage-collect', 'gc-status', 'generate-new-buffer-name', 'get',
'get-buffer', 'get-buffer-create', 'get-buffer-process',
'get-buffer-window', 'get-byte', 'get-char-property',
'get-char-property-and-overlay', 'get-file-buffer', 'get-file-char',
'get-internal-run-time', 'get-load-suffixes', 'get-pos-property',
'get-process', 'get-screen-color', 'get-text-property',
'get-unicode-property-internal', 'get-unused-category',
'get-unused-iso-final-char', 'getenv-internal', 'gethash',
'gfile-add-watch', 'gfile-rm-watch', 'global-key-binding',
'gnutls-available-p', 'gnutls-boot', 'gnutls-bye', 'gnutls-deinit',
'gnutls-error-fatalp', 'gnutls-error-string', 'gnutls-errorp',
'gnutls-get-initstage', 'gnutls-peer-status',
'gnutls-peer-status-warning-describe', 'goto-char', 'gpm-mouse-start',
'gpm-mouse-stop', 'group-gid', 'group-real-gid',
'handle-save-session', 'handle-switch-frame', 'hash-table-count',
'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'hash-table-weakness', 'iconify-frame', 'identity', 'image-flush',
'image-mask-p', 'image-metadata', 'image-size', 'imagemagick-types',
'imagep', 'indent-to', 'indirect-function', 'indirect-variable',
'init-image-library', 'inotify-add-watch', 'inotify-rm-watch',
'input-pending-p', 'insert', 'insert-and-inherit',
'insert-before-markers', 'insert-before-markers-and-inherit',
'insert-buffer-substring', 'insert-byte', 'insert-char',
'insert-file-contents', 'insert-startup-screen', 'int86',
'integer-or-marker-p', 'integerp', 'interactive-form', 'intern',
'intern-soft', 'internal--track-mouse', 'internal-char-font',
'internal-complete-buffer', 'internal-copy-lisp-face',
'internal-default-process-filter',
'internal-default-process-sentinel', 'internal-describe-syntax-value',
'internal-event-symbol-parse-modifiers',
'internal-face-x-get-resource', 'internal-get-lisp-face-attribute',
'internal-lisp-face-attribute-values', 'internal-lisp-face-empty-p',
'internal-lisp-face-equal-p', 'internal-lisp-face-p',
'internal-make-lisp-face', 'internal-make-var-non-special',
'internal-merge-in-global-face',
'internal-set-alternative-font-family-alist',
'internal-set-alternative-font-registry-alist',
'internal-set-font-selection-order',
'internal-set-lisp-face-attribute',
'internal-set-lisp-face-attribute-from-resource',
'internal-show-cursor', 'internal-show-cursor-p', 'interrupt-process',
'invisible-p', 'invocation-directory', 'invocation-name', 'isnan',
'iso-charset', 'key-binding', 'key-description',
'keyboard-coding-system', 'keymap-parent', 'keymap-prompt', 'keymapp',
'keywordp', 'kill-all-local-variables', 'kill-buffer', 'kill-emacs',
'kill-local-variable', 'kill-process', 'last-nonminibuffer-frame',
'lax-plist-get', 'lax-plist-put', 'ldexp', 'length',
'libxml-parse-html-region', 'libxml-parse-xml-region',
'line-beginning-position', 'line-end-position', 'line-pixel-height',
'list', 'list-fonts', 'list-system-processes', 'listp', 'load',
'load-average', 'local-key-binding', 'local-variable-if-set-p',
'local-variable-p', 'locale-info', 'locate-file-internal',
'lock-buffer', 'log', 'logand', 'logb', 'logior', 'lognot', 'logxor',
'looking-at', 'lookup-image', 'lookup-image-map', 'lookup-key',
'lower-frame', 'lsh', 'macroexpand', 'make-bool-vector',
'make-byte-code', 'make-category-set', 'make-category-table',
'make-char', 'make-char-table', 'make-directory-internal',
'make-frame-invisible', 'make-frame-visible', 'make-hash-table',
'make-indirect-buffer', 'make-keymap', 'make-list',
'make-local-variable', 'make-marker', 'make-network-process',
'make-overlay', 'make-serial-process', 'make-sparse-keymap',
'make-string', 'make-symbol', 'make-symbolic-link', 'make-temp-name',
'make-terminal-frame', 'make-variable-buffer-local',
'make-variable-frame-local', 'make-vector', 'makunbound',
'map-char-table', 'map-charset-chars', 'map-keymap',
'map-keymap-internal', 'mapatoms', 'mapc', 'mapcar', 'mapconcat',
'maphash', 'mark-marker', 'marker-buffer', 'marker-insertion-type',
'marker-position', 'markerp', 'match-beginning', 'match-data',
'match-end', 'matching-paren', 'max', 'max-char', 'md5', 'member',
'memory-info', 'memory-limit', 'memory-use-counts', 'memq', 'memql',
'menu-bar-menu-at-x-y', 'menu-or-popup-active-p',
'menu-or-popup-active-p', 'merge-face-attribute', 'message',
'message-box', 'message-or-box', 'min',
'minibuffer-completion-contents', 'minibuffer-contents',
'minibuffer-contents-no-properties', 'minibuffer-depth',
'minibuffer-prompt', 'minibuffer-prompt-end',
'minibuffer-selected-window', 'minibuffer-window', 'minibufferp',
'minor-mode-key-binding', 'mod', 'modify-category-entry',
'modify-frame-parameters', 'modify-syntax-entry',
'mouse-pixel-position', 'mouse-position', 'move-overlay',
'move-point-visually', 'move-to-column', 'move-to-window-line',
'msdos-downcase-filename', 'msdos-long-file-names', 'msdos-memget',
'msdos-memput', 'msdos-mouse-disable', 'msdos-mouse-enable',
'msdos-mouse-init', 'msdos-mouse-p', 'msdos-remember-default-colors',
'msdos-set-keyboard', 'msdos-set-mouse-buttons',
'multibyte-char-to-unibyte', 'multibyte-string-p', 'narrow-to-region',
'natnump', 'nconc', 'network-interface-info',
'network-interface-list', 'new-fontset', 'newline-cache-check',
'next-char-property-change', 'next-frame', 'next-overlay-change',
'next-property-change', 'next-read-file-uses-dialog-p',
'next-single-char-property-change', 'next-single-property-change',
'next-window', 'nlistp', 'nreverse', 'nth', 'nthcdr', 'null',
'number-or-marker-p', 'number-to-string', 'numberp',
'open-dribble-file', 'open-font', 'open-termscript',
'optimize-char-table', 'other-buffer', 'other-window-for-scrolling',
'overlay-buffer', 'overlay-end', 'overlay-get', 'overlay-lists',
'overlay-properties', 'overlay-put', 'overlay-recenter',
'overlay-start', 'overlayp', 'overlays-at', 'overlays-in',
'parse-partial-sexp', 'play-sound-internal', 'plist-get',
'plist-member', 'plist-put', 'point', 'point-marker', 'point-max',
'point-max-marker', 'point-min', 'point-min-marker',
'pos-visible-in-window-p', 'position-bytes', 'posix-looking-at',
'posix-search-backward', 'posix-search-forward', 'posix-string-match',
'posn-at-point', 'posn-at-x-y', 'preceding-char',
'prefix-numeric-value', 'previous-char-property-change',
'previous-frame', 'previous-overlay-change',
'previous-property-change', 'previous-single-char-property-change',
'previous-single-property-change', 'previous-window', 'prin1',
'prin1-to-string', 'princ', 'print', 'process-attributes',
'process-buffer', 'process-coding-system', 'process-command',
'process-connection', 'process-contact', 'process-datagram-address',
'process-exit-status', 'process-filter', 'process-filter-multibyte-p',
'process-id', 'process-inherit-coding-system-flag', 'process-list',
'process-mark', 'process-name', 'process-plist',
'process-query-on-exit-flag', 'process-running-child-p',
'process-send-eof', 'process-send-region', 'process-send-string',
'process-sentinel', 'process-status', 'process-tty-name',
'process-type', 'processp', 'profiler-cpu-log',
'profiler-cpu-running-p', 'profiler-cpu-start', 'profiler-cpu-stop',
'profiler-memory-log', 'profiler-memory-running-p',
'profiler-memory-start', 'profiler-memory-stop', 'propertize',
'purecopy', 'put', 'put-text-property',
'put-unicode-property-internal', 'puthash', 'query-font',
'query-fontset', 'quit-process', 'raise-frame', 'random', 'rassoc',
'rassq', 're-search-backward', 're-search-forward', 'read',
'read-buffer', 'read-char', 'read-char-exclusive',
'read-coding-system', 'read-command', 'read-event',
'read-from-minibuffer', 'read-from-string', 'read-function',
'read-key-sequence', 'read-key-sequence-vector',
'read-no-blanks-input', 'read-non-nil-coding-system', 'read-string',
'read-variable', 'recent-auto-save-p', 'recent-doskeys',
'recent-keys', 'recenter', 'recursion-depth', 'recursive-edit',
'redirect-debugging-output', 'redirect-frame-focus', 'redisplay',
'redraw-display', 'redraw-frame', 'regexp-quote', 'region-beginning',
'region-end', 'register-ccl-program', 'register-code-conversion-map',
'remhash', 'remove-list-of-text-properties', 'remove-text-properties',
'rename-buffer', 'rename-file', 'replace-match',
'reset-this-command-lengths', 'resize-mini-window-internal',
'restore-buffer-modified-p', 'resume-tty', 'reverse', 'round',
'run-hook-with-args', 'run-hook-with-args-until-failure',
'run-hook-with-args-until-success', 'run-hook-wrapped', 'run-hooks',
'run-window-configuration-change-hook', 'run-window-scroll-functions',
'safe-length', 'scan-lists', 'scan-sexps', 'scroll-down',
'scroll-left', 'scroll-other-window', 'scroll-right', 'scroll-up',
'search-backward', 'search-forward', 'secure-hash', 'select-frame',
'select-window', 'selected-frame', 'selected-window',
'self-insert-command', 'send-string-to-terminal', 'sequencep',
'serial-process-configure', 'set', 'set-buffer',
'set-buffer-auto-saved', 'set-buffer-major-mode',
'set-buffer-modified-p', 'set-buffer-multibyte', 'set-case-table',
'set-category-table', 'set-char-table-extra-slot',
'set-char-table-parent', 'set-char-table-range', 'set-charset-plist',
'set-charset-priority', 'set-coding-system-priority',
'set-cursor-size', 'set-default', 'set-default-file-modes',
'set-default-toplevel-value', 'set-file-acl', 'set-file-modes',
'set-file-selinux-context', 'set-file-times', 'set-fontset-font',
'set-frame-height', 'set-frame-position', 'set-frame-selected-window',
'set-frame-size', 'set-frame-width', 'set-fringe-bitmap-face',
'set-input-interrupt-mode', 'set-input-meta-mode', 'set-input-mode',
'set-keyboard-coding-system-internal', 'set-keymap-parent',
'set-marker', 'set-marker-insertion-type', 'set-match-data',
'set-message-beep', 'set-minibuffer-window',
'set-mouse-pixel-position', 'set-mouse-position',
'set-network-process-option', 'set-output-flow-control',
'set-process-buffer', 'set-process-coding-system',
'set-process-datagram-address', 'set-process-filter',
'set-process-filter-multibyte',
'set-process-inherit-coding-system-flag', 'set-process-plist',
'set-process-query-on-exit-flag', 'set-process-sentinel',
'set-process-window-size', 'set-quit-char',
'set-safe-terminal-coding-system-internal', 'set-screen-color',
'set-standard-case-table', 'set-syntax-table',
'set-terminal-coding-system-internal', 'set-terminal-local-value',
'set-terminal-parameter', 'set-text-properties', 'set-time-zone-rule',
'set-visited-file-modtime', 'set-window-buffer',
'set-window-combination-limit', 'set-window-configuration',
'set-window-dedicated-p', 'set-window-display-table',
'set-window-fringes', 'set-window-hscroll', 'set-window-margins',
'set-window-new-normal', 'set-window-new-pixel',
'set-window-new-total', 'set-window-next-buffers',
'set-window-parameter', 'set-window-point', 'set-window-prev-buffers',
'set-window-redisplay-end-trigger', 'set-window-scroll-bars',
'set-window-start', 'set-window-vscroll', 'setcar', 'setcdr',
'setplist', 'show-face-resources', 'signal', 'signal-process', 'sin',
'single-key-description', 'skip-chars-backward', 'skip-chars-forward',
'skip-syntax-backward', 'skip-syntax-forward', 'sleep-for', 'sort',
'sort-charsets', 'special-variable-p', 'split-char',
'split-window-internal', 'sqrt', 'standard-case-table',
'standard-category-table', 'standard-syntax-table', 'start-kbd-macro',
'start-process', 'stop-process', 'store-kbd-macro-event', 'string',
'string-as-multibyte', 'string-as-unibyte', 'string-bytes',
'string-collate-equalp', 'string-collate-lessp', 'string-equal',
'string-lessp', 'string-make-multibyte', 'string-make-unibyte',
'string-match', 'string-to-char', 'string-to-multibyte',
'string-to-number', 'string-to-syntax', 'string-to-unibyte',
'string-width', 'stringp', 'subr-name', 'subrp',
'subst-char-in-region', 'substitute-command-keys',
'substitute-in-file-name', 'substring', 'substring-no-properties',
'suspend-emacs', 'suspend-tty', 'suspicious-object', 'sxhash',
'symbol-function', 'symbol-name', 'symbol-plist', 'symbol-value',
'symbolp', 'syntax-table', 'syntax-table-p', 'system-groups',
'system-move-file-to-trash', 'system-name', 'system-users', 'tan',
'terminal-coding-system', 'terminal-list', 'terminal-live-p',
'terminal-local-value', 'terminal-name', 'terminal-parameter',
'terminal-parameters', 'terpri', 'test-completion',
'text-char-description', 'text-properties-at', 'text-property-any',
'text-property-not-all', 'this-command-keys',
'this-command-keys-vector', 'this-single-command-keys',
'this-single-command-raw-keys', 'time-add', 'time-less-p',
'time-subtract', 'tool-bar-get-system-style', 'tool-bar-height',
'tool-bar-pixel-width', 'top-level', 'trace-redisplay',
'trace-to-stderr', 'translate-region-internal', 'transpose-regions',
'truncate', 'try-completion', 'tty-display-color-cells',
'tty-display-color-p', 'tty-no-underline',
'tty-suppress-bold-inverse-default-colors', 'tty-top-frame',
'tty-type', 'type-of', 'undo-boundary', 'unencodable-char-position',
'unhandled-file-name-directory', 'unibyte-char-to-multibyte',
'unibyte-string', 'unicode-property-table-internal', 'unify-charset',
'unintern', 'unix-sync', 'unlock-buffer', 'upcase', 'upcase-initials',
'upcase-initials-region', 'upcase-region', 'upcase-word',
'use-global-map', 'use-local-map', 'user-full-name',
'user-login-name', 'user-real-login-name', 'user-real-uid',
'user-uid', 'variable-binding-locus', 'vconcat', 'vector',
'vector-or-char-table-p', 'vectorp', 'verify-visited-file-modtime',
'vertical-motion', 'visible-frame-list', 'visited-file-modtime',
'w16-get-clipboard-data', 'w16-selection-exists-p',
'w16-set-clipboard-data', 'w32-battery-status',
'w32-default-color-map', 'w32-define-rgb-color',
'w32-display-monitor-attributes-list', 'w32-frame-menu-bar-size',
'w32-frame-rect', 'w32-get-clipboard-data',
'w32-get-codepage-charset', 'w32-get-console-codepage',
'w32-get-console-output-codepage', 'w32-get-current-locale-id',
'w32-get-default-locale-id', 'w32-get-keyboard-layout',
'w32-get-locale-info', 'w32-get-valid-codepages',
'w32-get-valid-keyboard-layouts', 'w32-get-valid-locale-ids',
'w32-has-winsock', 'w32-long-file-name', 'w32-reconstruct-hot-key',
'w32-register-hot-key', 'w32-registered-hot-keys',
'w32-selection-exists-p', 'w32-send-sys-command',
'w32-set-clipboard-data', 'w32-set-console-codepage',
'w32-set-console-output-codepage', 'w32-set-current-locale',
'w32-set-keyboard-layout', 'w32-set-process-priority',
'w32-shell-execute', 'w32-short-file-name', 'w32-toggle-lock-key',
'w32-unload-winsock', 'w32-unregister-hot-key', 'w32-window-exists-p',
'w32notify-add-watch', 'w32notify-rm-watch',
'waiting-for-user-input-p', 'where-is-internal', 'widen',
'widget-apply', 'widget-get', 'widget-put',
'window-absolute-pixel-edges', 'window-at', 'window-body-height',
'window-body-width', 'window-bottom-divider-width', 'window-buffer',
'window-combination-limit', 'window-configuration-frame',
'window-configuration-p', 'window-dedicated-p',
'window-display-table', 'window-edges', 'window-end', 'window-frame',
'window-fringes', 'window-header-line-height', 'window-hscroll',
'window-inside-absolute-pixel-edges', 'window-inside-edges',
'window-inside-pixel-edges', 'window-left-child',
'window-left-column', 'window-line-height', 'window-list',
'window-list-1', 'window-live-p', 'window-margins',
'window-minibuffer-p', 'window-mode-line-height', 'window-new-normal',
'window-new-pixel', 'window-new-total', 'window-next-buffers',
'window-next-sibling', 'window-normal-size', 'window-old-point',
'window-parameter', 'window-parameters', 'window-parent',
'window-pixel-edges', 'window-pixel-height', 'window-pixel-left',
'window-pixel-top', 'window-pixel-width', 'window-point',
'window-prev-buffers', 'window-prev-sibling',
'window-redisplay-end-trigger', 'window-resize-apply',
'window-resize-apply-total', 'window-right-divider-width',
'window-scroll-bar-height', 'window-scroll-bar-width',
'window-scroll-bars', 'window-start', 'window-system',
'window-text-height', 'window-text-pixel-size', 'window-text-width',
'window-top-child', 'window-top-line', 'window-total-height',
'window-total-width', 'window-use-time', 'window-valid-p',
'window-vscroll', 'windowp', 'write-char', 'write-region',
'x-backspace-delete-keys-p', 'x-change-window-property',
'x-change-window-property', 'x-close-connection',
'x-close-connection', 'x-create-frame', 'x-create-frame',
'x-delete-window-property', 'x-delete-window-property',
'x-disown-selection-internal', 'x-display-backing-store',
'x-display-backing-store', 'x-display-color-cells',
'x-display-color-cells', 'x-display-grayscale-p',
'x-display-grayscale-p', 'x-display-list', 'x-display-list',
'x-display-mm-height', 'x-display-mm-height', 'x-display-mm-width',
'x-display-mm-width', 'x-display-monitor-attributes-list',
'x-display-pixel-height', 'x-display-pixel-height',
'x-display-pixel-width', 'x-display-pixel-width', 'x-display-planes',
'x-display-planes', 'x-display-save-under', 'x-display-save-under',
'x-display-screens', 'x-display-screens', 'x-display-visual-class',
'x-display-visual-class', 'x-family-fonts', 'x-file-dialog',
'x-file-dialog', 'x-file-dialog', 'x-focus-frame', 'x-frame-geometry',
'x-frame-geometry', 'x-get-atom-name', 'x-get-resource',
'x-get-selection-internal', 'x-hide-tip', 'x-hide-tip',
'x-list-fonts', 'x-load-color-file', 'x-menu-bar-open-internal',
'x-menu-bar-open-internal', 'x-open-connection', 'x-open-connection',
'x-own-selection-internal', 'x-parse-geometry', 'x-popup-dialog',
'x-popup-menu', 'x-register-dnd-atom', 'x-select-font',
'x-select-font', 'x-selection-exists-p', 'x-selection-owner-p',
'x-send-client-message', 'x-server-max-request-size',
'x-server-max-request-size', 'x-server-vendor', 'x-server-vendor',
'x-server-version', 'x-server-version', 'x-show-tip', 'x-show-tip',
'x-synchronize', 'x-synchronize', 'x-uses-old-gtk-dialog',
'x-window-property', 'x-window-property', 'x-wm-set-size-hint',
'xw-color-defined-p', 'xw-color-defined-p', 'xw-color-values',
'xw-color-values', 'xw-display-color-p', 'xw-display-color-p',
'yes-or-no-p', 'zlib-available-p', 'zlib-decompress-region',
'forward-point',
))
builtin_function_highlighted = set((
'defvaralias', 'provide', 'require',
'with-no-warnings', 'define-widget', 'with-electric-help',
'throw', 'defalias', 'featurep'
))
lambda_list_keywords = set((
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
))
error_keywords = set((
'cl-assert', 'cl-check-type', 'error', 'signal',
'user-error', 'warn',
))
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in EmacsLispLexer.builtin_function:
yield index, Name.Function, value
continue
if value in EmacsLispLexer.special_forms:
yield index, Keyword, value
continue
if value in EmacsLispLexer.error_keywords:
yield index, Name.Exception, value
continue
if value in EmacsLispLexer.builtin_function_highlighted:
yield index, Name.Builtin, value
continue
if value in EmacsLispLexer.macros:
yield index, Name.Builtin, value
continue
if value in EmacsLispLexer.lambda_list_keywords:
yield index, Keyword.Pseudo, value
continue
yield index, token, value
tokens = {
'root': [
default('body'),
],
'body': [
# whitespace
(r'\s+', Text),
# single-line comment
(r';.*$', Comment.Single),
# strings and characters
(r'"', String, 'string'),
(r'\?([^\\]|\\.)', String.Char),
# quoting
(r":" + symbol, Name.Builtin),
(r"::" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
# decimal numbers
(r'[-+]?\d+\.?' + terminated, Number.Integer),
(r'[-+]?\d+/\d+' + terminated, Number),
(r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' +
terminated, Number.Float),
# vectors
(r'\[|\]', Punctuation),
# uninterned symbol
(r'#:' + symbol, String.Symbol),
# read syntax for char tables
(r'#\^\^?', Operator),
# function shorthand
(r'#\'', Name.Function),
# binary rational
(r'#[bB][+-]?[01]+(/[01]+)?', Number.Bin),
# octal rational
(r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct),
# hex rational
(r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex),
# radix rational
(r'#\d+r[+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number),
# reference
(r'#\d+=', Operator),
(r'#\d+#', Operator),
# special operators that should have been parsed already
(r'(,@|,|\.|:)', Operator),
# special constants
(r'(t|nil)' + terminated, Name.Constant),
# functions and variables
(r'\*' + symbol + r'\*', Name.Variable.Global),
(symbol, Name.Variable),
# parentheses
(r'#\(', Operator, 'body'),
(r'\(', Punctuation, 'body'),
(r'\)', Punctuation, '#pop'),
],
'string': [
(r'[^"\\`]+', String),
(r'`%s\'' % symbol, String.Symbol),
(r'`', String),
(r'\\.', String),
(r'\\\n', String),
(r'"', String, '#pop'),
],
}
class ShenLexer(RegexLexer):
"""
Lexer for `Shen <http://shenlanguage.org/>`_ source code.
.. versionadded:: 2.1
"""
name = 'Shen'
aliases = ['shen']
filenames = ['*.shen']
mimetypes = ['text/x-shen', 'application/x-shen']
DECLARATIONS = (
'datatype', 'define', 'defmacro', 'defprolog', 'defcc',
'synonyms', 'declare', 'package', 'type', 'function',
)
SPECIAL_FORMS = (
'lambda', 'get', 'let', 'if', 'cases', 'cond', 'put', 'time', 'freeze',
'value', 'load', '$', 'protect', 'or', 'and', 'not', 'do', 'output',
'prolog?', 'trap-error', 'error', 'make-string', '/.', 'set', '@p',
'@s', '@v',
)
BUILTINS = (
'==', '=', '*', '+', '-', '/', '<', '>', '>=', '<=', '<-address',
'<-vector', 'abort', 'absvector', 'absvector?', 'address->', 'adjoin',
'append', 'arity', 'assoc', 'bind', 'boolean?', 'bound?', 'call', 'cd',
'close', 'cn', 'compile', 'concat', 'cons', 'cons?', 'cut', 'destroy',
'difference', 'element?', 'empty?', 'enable-type-theory',
'error-to-string', 'eval', 'eval-kl', 'exception', 'explode', 'external',
'fail', 'fail-if', 'file', 'findall', 'fix', 'fst', 'fwhen', 'gensym',
'get-time', 'hash', 'hd', 'hdstr', 'hdv', 'head', 'identical',
'implementation', 'in', 'include', 'include-all-but', 'inferences',
'input', 'input+', 'integer?', 'intern', 'intersection', 'is', 'kill',
'language', 'length', 'limit', 'lineread', 'loaded', 'macro', 'macroexpand',
'map', 'mapcan', 'maxinferences', 'mode', 'n->string', 'nl', 'nth', 'null',
'number?', 'occurrences', 'occurs-check', 'open', 'os', 'out', 'port',
'porters', 'pos', 'pr', 'preclude', 'preclude-all-but', 'print', 'profile',
'profile-results', 'ps', 'quit', 'read', 'read+', 'read-byte', 'read-file',
'read-file-as-bytelist', 'read-file-as-string', 'read-from-string',
'release', 'remove', 'return', 'reverse', 'run', 'save', 'set',
'simple-error', 'snd', 'specialise', 'spy', 'step', 'stinput', 'stoutput',
'str', 'string->n', 'string->symbol', 'string?', 'subst', 'symbol?',
'systemf', 'tail', 'tc', 'tc?', 'thaw', 'tl', 'tlstr', 'tlv', 'track',
'tuple?', 'undefmacro', 'unify', 'unify!', 'union', 'unprofile',
'unspecialise', 'untrack', 'variable?', 'vector', 'vector->', 'vector?',
'verified', 'version', 'warn', 'when', 'write-byte', 'write-to-file',
'y-or-n?',
)
BUILTINS_ANYWHERE = ('where', 'skip', '>>', '_', '!', '<e>', '<!>')
MAPPINGS = dict((s, Keyword) for s in DECLARATIONS)
MAPPINGS.update((s, Name.Builtin) for s in BUILTINS)
MAPPINGS.update((s, Keyword) for s in SPECIAL_FORMS)
valid_symbol_chars = r'[\w!$%*+,<=>?/.\'@&#:-]'
valid_name = '%s+' % valid_symbol_chars
symbol_name = r'[a-z!$%%*+,<=>?/.\'@&#_-]%s*' % valid_symbol_chars
variable = r'[A-Z]%s*' % valid_symbol_chars
tokens = {
'string': [
(r'"', String, '#pop'),
(r'c#\d{1,3};', String.Escape),
(r'~[ARS%]', String.Interpol),
(r'(?s).', String),
],
'root': [
(r'(?s)\\\*.*?\*\\', Comment.Multiline), # \* ... *\
(r'\\\\.*', Comment.Single), # \\ ...
(r'\s+', Text),
(r'_{5,}', Punctuation),
(r'={5,}', Punctuation),
(r'(;|:=|\||--?>|<--?)', Punctuation),
(r'(:-|:|\{|\})', Literal),
(r'[+-]*\d*\.\d+(e[+-]?\d+)?', Number.Float),
(r'[+-]*\d+', Number.Integer),
(r'"', String, 'string'),
(variable, Name.Variable),
(r'(true|false|<>|\[\])', Keyword.Pseudo),
(symbol_name, Literal),
(r'(\[|\]|\(|\))', Punctuation),
],
}
def get_tokens_unprocessed(self, text):
tokens = RegexLexer.get_tokens_unprocessed(self, text)
tokens = self._process_symbols(tokens)
tokens = self._process_declarations(tokens)
return tokens
def _relevant(self, token):
return token not in (Text, Comment.Single, Comment.Multiline)
def _process_declarations(self, tokens):
opening_paren = False
for index, token, value in tokens:
yield index, token, value
if self._relevant(token):
if opening_paren and token == Keyword and value in self.DECLARATIONS:
declaration = value
for index, token, value in \
self._process_declaration(declaration, tokens):
yield index, token, value
opening_paren = value == '(' and token == Punctuation
def _process_symbols(self, tokens):
opening_paren = False
for index, token, value in tokens:
if opening_paren and token in (Literal, Name.Variable):
token = self.MAPPINGS.get(value, Name.Function)
elif token == Literal and value in self.BUILTINS_ANYWHERE:
token = Name.Builtin
opening_paren = value == '(' and token == Punctuation
yield index, token, value
def _process_declaration(self, declaration, tokens):
for index, token, value in tokens:
if self._relevant(token):
break
yield index, token, value
if declaration == 'datatype':
prev_was_colon = False
token = Keyword.Type if token == Literal else token
yield index, token, value
for index, token, value in tokens:
if prev_was_colon and token == Literal:
token = Keyword.Type
yield index, token, value
if self._relevant(token):
prev_was_colon = token == Literal and value == ':'
elif declaration == 'package':
token = Name.Namespace if token == Literal else token
yield index, token, value
elif declaration == 'define':
token = Name.Function if token == Literal else token
yield index, token, value
for index, token, value in tokens:
if self._relevant(token):
break
yield index, token, value
if value == '{' and token == Literal:
yield index, Punctuation, value
for index, token, value in self._process_signature(tokens):
yield index, token, value
else:
yield index, token, value
else:
token = Name.Function if token == Literal else token
yield index, token, value
return
def _process_signature(self, tokens):
for index, token, value in tokens:
if token == Literal and value == '}':
yield index, Punctuation, value
return
elif token in (Literal, Name.Function):
token = Name.Variable if value.istitle() else Keyword.Type
yield index, token, value
class CPSALexer(SchemeLexer):
"""
A CPSA lexer based on the CPSA language as of version 2.2.12
.. versionadded:: 2.1
"""
name = 'CPSA'
aliases = ['cpsa']
filenames = ['*.cpsa']
mimetypes = []
# list of known keywords and builtins taken form vim 6.4 scheme.vim
# syntax file.
_keywords = (
'herald', 'vars', 'defmacro', 'include', 'defprotocol', 'defrole',
'defskeleton', 'defstrand', 'deflistener', 'non-orig', 'uniq-orig',
'pen-non-orig', 'precedes', 'trace', 'send', 'recv', 'name', 'text',
'skey', 'akey', 'data', 'mesg',
)
_builtins = (
'cat', 'enc', 'hash', 'privk', 'pubk', 'invk', 'ltk', 'gen', 'exp',
)
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
tokens = {
'root': [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
# (r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
(words(_keywords, suffix=r'\b'), Keyword),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
(words(_builtins, prefix=r'(?<=\()', suffix=r'\b'), Name.Builtin),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\))', Punctuation),
(r'(\[|\])', Punctuation),
],
}
class XtlangLexer(RegexLexer):
"""An xtlang lexer for the `Extempore programming environment
<http://extempore.moso.com.au>`_.
This is a mixture of Scheme and xtlang, really. Keyword lists are
taken from the Extempore Emacs mode
(https://github.com/extemporelang/extempore-emacs-mode)
.. versionadded:: 2.2
"""
name = 'xtlang'
aliases = ['extempore']
filenames = ['*.xtm']
mimetypes = []
common_keywords = (
'lambda', 'define', 'if', 'else', 'cond', 'and',
'or', 'let', 'begin', 'set!', 'map', 'for-each',
)
scheme_keywords = (
'do', 'delay', 'quasiquote', 'unquote', 'unquote-splicing', 'eval',
'case', 'let*', 'letrec', 'quote',
)
xtlang_bind_keywords = (
'bind-func', 'bind-val', 'bind-lib', 'bind-type', 'bind-alias',
'bind-poly', 'bind-dylib', 'bind-lib-func', 'bind-lib-val',
)
xtlang_keywords = (
'letz', 'memzone', 'cast', 'convert', 'dotimes', 'doloop',
)
common_functions = (
'*', '+', '-', '/', '<', '<=', '=', '>', '>=', '%', 'abs', 'acos',
'angle', 'append', 'apply', 'asin', 'assoc', 'assq', 'assv',
'atan', 'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar',
'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', 'cadar',
'caddar', 'cadddr', 'caddr', 'cadr', 'car', 'cdaaar',
'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr',
'cddr', 'cdr', 'ceiling', 'cons', 'cos', 'floor', 'length',
'list', 'log', 'max', 'member', 'min', 'modulo', 'not',
'reverse', 'round', 'sin', 'sqrt', 'substring', 'tan',
'println', 'random', 'null?', 'callback', 'now',
)
scheme_functions = (
'call-with-current-continuation', 'call-with-input-file',
'call-with-output-file', 'call-with-values', 'call/cc',
'char->integer', 'char-alphabetic?', 'char-ci<=?', 'char-ci<?',
'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
'char-lower-case?', 'char-numeric?', 'char-ready?',
'char-upcase', 'char-upper-case?', 'char-whitespace?',
'char<=?', 'char<?', 'char=?', 'char>=?', 'char>?', 'char?',
'close-input-port', 'close-output-port', 'complex?',
'current-input-port', 'current-output-port', 'denominator',
'display', 'dynamic-wind', 'eof-object?', 'eq?', 'equal?',
'eqv?', 'even?', 'exact->inexact', 'exact?', 'exp', 'expt',
'force', 'gcd', 'imag-part', 'inexact->exact', 'inexact?',
'input-port?', 'integer->char', 'integer?',
'interaction-environment', 'lcm', 'list->string',
'list->vector', 'list-ref', 'list-tail', 'list?', 'load',
'magnitude', 'make-polar', 'make-rectangular', 'make-string',
'make-vector', 'memq', 'memv', 'negative?', 'newline',
'null-environment', 'number->string', 'number?',
'numerator', 'odd?', 'open-input-file', 'open-output-file',
'output-port?', 'pair?', 'peek-char', 'port?', 'positive?',
'procedure?', 'quotient', 'rational?', 'rationalize', 'read',
'read-char', 'real-part', 'real?',
'remainder', 'scheme-report-environment', 'set-car!', 'set-cdr!',
'string', 'string->list', 'string->number', 'string->symbol',
'string-append', 'string-ci<=?', 'string-ci<?', 'string-ci=?',
'string-ci>=?', 'string-ci>?', 'string-copy', 'string-fill!',
'string-length', 'string-ref', 'string-set!', 'string<=?',
'string<?', 'string=?', 'string>=?', 'string>?', 'string?',
'symbol->string', 'symbol?', 'transcript-off', 'transcript-on',
'truncate', 'values', 'vector', 'vector->list', 'vector-fill!',
'vector-length', 'vector?',
'with-input-from-file', 'with-output-to-file', 'write',
'write-char', 'zero?',
)
xtlang_functions = (
'toString', 'afill!', 'pfill!', 'tfill!', 'tbind', 'vfill!',
'array-fill!', 'pointer-fill!', 'tuple-fill!', 'vector-fill!', 'free',
'array', 'tuple', 'list', '~', 'cset!', 'cref', '&', 'bor',
'ang-names', '<<', '>>', 'nil', 'printf', 'sprintf', 'null', 'now',
'pset!', 'pref-ptr', 'vset!', 'vref', 'aset!', 'aref', 'aref-ptr',
'tset!', 'tref', 'tref-ptr', 'salloc', 'halloc', 'zalloc', 'alloc',
'schedule', 'exp', 'log', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sqrt', 'expt', 'floor', 'ceiling', 'truncate', 'round',
'llvm_printf', 'push_zone', 'pop_zone', 'memzone', 'callback',
'llvm_sprintf', 'make-array', 'array-set!', 'array-ref',
'array-ref-ptr', 'pointer-set!', 'pointer-ref', 'pointer-ref-ptr',
'stack-alloc', 'heap-alloc', 'zone-alloc', 'make-tuple', 'tuple-set!',
'tuple-ref', 'tuple-ref-ptr', 'closure-set!', 'closure-ref', 'pref',
'pdref', 'impc_null', 'bitcast', 'void', 'ifret', 'ret->', 'clrun->',
'make-env-zone', 'make-env', '<>', 'dtof', 'ftod', 'i1tof',
'i1tod', 'i1toi8', 'i1toi32', 'i1toi64', 'i8tof', 'i8tod',
'i8toi1', 'i8toi32', 'i8toi64', 'i32tof', 'i32tod', 'i32toi1',
'i32toi8', 'i32toi64', 'i64tof', 'i64tod', 'i64toi1',
'i64toi8', 'i64toi32',
)
# valid names for Scheme identifiers (names cannot consist fully
# of numbers, but this should be good enough for now)
valid_scheme_name = r'[\w!$%&*+,/:<=>?@^~|-]+'
# valid characters in xtlang names & types
valid_xtlang_name = r'[\w.!-]+'
valid_xtlang_type = r'[]{}[\w<>,*/|!-]+'
tokens = {
# keep track of when we're exiting the xtlang form
'xtlang': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(?<=bind-func\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-val\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-type\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-alias\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-poly\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-lib\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-dylib\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-lib-func\s)' + valid_xtlang_name, Name.Function),
(r'(?<=bind-lib-val\s)' + valid_xtlang_name, Name.Function),
# type annotations
(r':' + valid_xtlang_type, Keyword.Type),
# types
(r'(<' + valid_xtlang_type + r'>|\|' + valid_xtlang_type + r'\||/' +
valid_xtlang_type + r'/|' + valid_xtlang_type + r'\*)\**',
Keyword.Type),
# keywords
(words(xtlang_keywords, prefix=r'(?<=\()'), Keyword),
# builtins
(words(xtlang_functions, prefix=r'(?<=\()'), Name.Function),
include('common'),
# variables
(valid_xtlang_name, Name.Variable),
],
'scheme': [
# quoted symbols
(r"'" + valid_scheme_name, String.Symbol),
# char literals
(r"#\\([()/'\"._!§$%& ?=+-]|[a-zA-Z0-9]+)", String.Char),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# keywords
(words(scheme_keywords, prefix=r'(?<=\()'), Keyword),
# builtins
(words(scheme_functions, prefix=r'(?<=\()'), Name.Function),
include('common'),
# variables
(valid_scheme_name, Name.Variable),
],
# common to both xtlang and Scheme
'common': [
# comments
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# binary/oct/hex literals
(r'(#b|#o|#x)[\d.]+', Number),
# strings
(r'"(\\\\|\\"|[^"])*"', String),
# true/false constants
(r'(#t|#f)', Name.Constant),
# keywords
(words(common_keywords, prefix=r'(?<=\()'), Keyword),
# builtins
(words(common_functions, prefix=r'(?<=\()'), Name.Function),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
'root': [
# go into xtlang mode
(words(xtlang_bind_keywords, prefix=r'(?<=\()', suffix=r'\b'),
Keyword, 'xtlang'),
include('scheme')
],
}
class FennelLexer(RegexLexer):
"""A lexer for the `Fennel programming language <https://fennel-lang.org>`_.
Fennel compiles to Lua, so all the Lua builtins are recognized as well
as the special forms that are particular to the Fennel compiler.
.. versionadded:: 2.3
"""
name = 'Fennel'
aliases = ['fennel', 'fnl']
filenames = ['*.fnl']
# these two lists are taken from fennel-mode.el:
# https://gitlab.com/technomancy/fennel-mode
# this list is current as of Fennel version 0.1.0.
special_forms = (
u'require-macros', u'eval-compiler',
u'do', u'values', u'if', u'when', u'each', u'for', u'fn', u'lambda',
u'λ', u'set', u'global', u'var', u'local', u'let', u'tset', u'doto',
u'set-forcibly!', u'defn', u'partial', u'while', u'or', u'and', u'true',
u'false', u'nil', u'.', u'+', u'..', u'^', u'-', u'*', u'%', u'/', u'>',
u'<', u'>=', u'<=', u'=', u'~=', u'#', u'...', u':', u'->', u'->>',
)
# Might be nicer to use the list from _lua_builtins.py but it's unclear how?
builtins = (
u'_G', u'_VERSION', u'arg', u'assert', u'bit32', u'collectgarbage',
u'coroutine', u'debug', u'dofile', u'error', u'getfenv',
u'getmetatable', u'io', u'ipairs', u'load', u'loadfile', u'loadstring',
u'math', u'next', u'os', u'package', u'pairs', u'pcall', u'print',
u'rawequal', u'rawget', u'rawlen', u'rawset', u'require', u'select',
u'setfenv', u'setmetatable', u'string', u'table', u'tonumber',
u'tostring', u'type', u'unpack', u'xpcall'
)
# based on the scheme definition, but disallowing leading digits and commas
valid_name = r'[a-zA-Z_!$%&*+/:<=>?@^~|-][\w!$%&*+/:<=>?@^~|\.-]*'
tokens = {
'root': [
# the only comment form is a semicolon; goes to the end of the line
(r';.*$', Comment.Single),
(r'[,\s]+', Text),
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
# these are technically strings, but it's worth visually
# distinguishing them because their intent is different
# from regular strings.
(r':' + valid_name, String.Symbol),
# special forms are keywords
(words(special_forms, suffix=' '), Keyword),
# lua standard library are builtins
(words(builtins, suffix=' '), Name.Builtin),
# special-case the vararg symbol
(r'\.\.\.', Name.Variable),
# regular identifiers
(valid_name, Name.Variable),
# all your normal paired delimiters for your programming enjoyment
(r'(\(|\))', Punctuation),
(r'(\[|\])', Punctuation),
(r'(\{|\})', Punctuation),
]
}
| unreal666/outwiker | plugins/source/source/pygments/lexers/lisp.py | Python | gpl-3.0 | 143,585 |
#!/usr/bin/python
# Copyright (C) Vladimir Prus 2006.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Tests for the target id resolution process.
from BoostBuild import Tester, List
# Create a temporary working directory
t = Tester()
# Create the needed files
t.write("Jamroot", """
exe hello : hello.cpp ;
# This should use the 'hello' target, even if there's
# 'hello' file in current dir.
install s : hello : <location>. ;
""")
t.write("hello.cpp", """
int main()
{
return 0;
}
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/hello.obj")
t.touch("hello.cpp")
t.run_build_system("s")
# If 'hello' in the 's' target resolved to file in
# current dir, nothing will be rebuilt.
t.expect_touch("bin/$toolset/debug/hello.obj")
t.cleanup()
| gorkinovich/DefendersOfMankind | dependencies/luabind/boost-build/test/resolution.py | Python | gpl-3.0 | 876 |
from alias import Alias
from source import Source, SourceUrlCouldNotBeRetrieved, SourceCouldNotParseTimeString
from venue import Venue
from sitting import Sitting
from entry import Entry
| hzj123/56th | pombola/hansard/models/__init__.py | Python | agpl-3.0 | 194 |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the Account Settings page.
"""
from datetime import datetime
from unittest import skip
import six
from bok_choy.page_object import XSS_INJECTION
from pytz import timezone, utc
from common.test.acceptance.pages.common.auto_auth import FULL_NAME, AutoAuthPage
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.tests.helpers import AcceptanceTest, EventsTestMixin
class AccountSettingsTestMixin(EventsTestMixin, AcceptanceTest):
"""
Mixin with helper methods to test the account settings page.
"""
CHANGE_INITIATED_EVENT_NAME = u"edx.user.settings.change_initiated"
USER_SETTINGS_CHANGED_EVENT_NAME = 'edx.user.settings.changed'
ACCOUNT_SETTINGS_REFERER = u"/account/settings"
shard = 23
def visit_account_settings_page(self, gdpr=False):
"""
Visit the account settings page for the current user, and store the page instance
as self.account_settings_page.
"""
self.account_settings_page = AccountSettingsPage(self.browser)
self.account_settings_page.visit()
self.account_settings_page.wait_for_ajax()
# TODO: LEARNER-4422 - delete when we clean up flags
if gdpr:
self.account_settings_page.browser.get(self.browser.current_url + "?course_experience.gdpr=1")
self.account_settings_page.wait_for_page()
def log_in_as_unique_user(self, email=None, full_name=None, password=None):
"""
Create a unique user and return the account's username and id.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(
self.browser,
username=username,
email=email,
full_name=full_name,
password=password
).visit()
user_id = auto_auth_page.get_user_id()
return username, user_id
def settings_changed_event_filter(self, event):
"""Filter out any events that are not "settings changed" events."""
return event['event_type'] == self.USER_SETTINGS_CHANGED_EVENT_NAME
def expected_settings_changed_event(self, setting, old, new, table=None):
"""A dictionary representing the expected fields in a "settings changed" event."""
return {
'username': self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': self.user_id,
'setting': setting,
'old': old,
'new': new,
'truncated': [],
'table': table or 'auth_userprofile'
}
}
def settings_change_initiated_event_filter(self, event):
"""Filter out any events that are not "settings change initiated" events."""
return event['event_type'] == self.CHANGE_INITIATED_EVENT_NAME
def expected_settings_change_initiated_event(self, setting, old, new, username=None, user_id=None):
"""A dictionary representing the expected fields in a "settings change initiated" event."""
return {
'username': username or self.username,
'referer': self.get_settings_page_url(),
'event': {
'user_id': user_id or self.user_id,
'setting': setting,
'old': old,
'new': new,
}
}
def get_settings_page_url(self):
"""The absolute URL of the account settings page given the test context."""
return self.relative_path_to_absolute_uri(self.ACCOUNT_SETTINGS_REFERER)
def assert_no_setting_changed_event(self):
"""Assert no setting changed event has been emitted thus far."""
self.assert_no_matching_events_were_emitted({'event_type': self.USER_SETTINGS_CHANGED_EVENT_NAME})
class DashboardMenuTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Tests that the dashboard menu works correctly with the account settings page.
"""
shard = 8
def test_link_on_dashboard_works(self):
"""
Scenario: Verify that the "Account" link works from the dashboard.
Given that I am a registered user
And I visit my dashboard
And I click on "Account" in the top drop down
Then I should see my account settings page
"""
self.log_in_as_unique_user()
dashboard_page = DashboardPage(self.browser)
dashboard_page.visit()
dashboard_page.click_username_dropdown()
self.assertIn('Account', dashboard_page.username_dropdown_link_text)
dashboard_page.click_account_settings_link()
class AccountSettingsPageTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Tests that verify behaviour of the Account Settings page.
"""
SUCCESS_MESSAGE = 'Your changes have been saved.'
shard = 8
def setUp(self):
"""
Initialize account and pages.
"""
super(AccountSettingsPageTest, self).setUp()
self.full_name = FULL_NAME
self.social_link = ''
self.username, self.user_id = self.log_in_as_unique_user(full_name=self.full_name)
self.visit_account_settings_page()
def test_page_view_event(self):
"""
Scenario: An event should be recorded when the "Account Settings"
page is viewed.
Given that I am a registered user
And I visit my account settings page
Then a page view analytics event should be recorded
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.user.settings.viewed'}, number_of_matches=1)
self.assert_events_match(
[
{
'event': {
'user_id': self.user_id,
'page': 'account',
'visibility': None
}
}
],
actual_events
)
def test_all_sections_and_fields_are_present(self):
"""
Scenario: Verify that all sections and fields are present on the page.
"""
expected_sections_structure = [
{
'title': 'Basic Account Information',
'fields': [
'Username',
'Full Name',
'Email Address (Sign In)',
'Password',
'Language',
'Country or Region of Residence',
'Time Zone',
]
},
{
'title': 'Additional Information',
'fields': [
'Education Completed',
'Gender',
'Year of Birth',
'Preferred Language',
]
},
{
'title': 'Social Media Links',
'fields': sorted([
'Twitter Link',
'Facebook Link',
'LinkedIn Link',
])
},
{
'title': 'Delete My Account',
'fields': []
},
]
sections_structure = self.account_settings_page.sections_structure()
sections_structure[2]['fields'] = sorted(sections_structure[2]['fields'])
self.assertEqual(sections_structure, expected_sections_structure)
def _test_readonly_field(self, field_id, title, value):
"""
Test behavior of a readonly field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_readonly_field(field_id), value)
def _test_text_field(
self, field_id, title, initial_value, new_invalid_value, new_valid_values, success_message=SUCCESS_MESSAGE,
assert_after_reload=True
):
"""
Test behaviour of a text field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_text_field(field_id), initial_value)
self.assertEqual(
self.account_settings_page.value_for_text_field(field_id, new_invalid_value), new_invalid_value
)
self.account_settings_page.wait_for_indicator(field_id, 'validation-error')
self.browser.refresh()
self.assertNotEqual(self.account_settings_page.value_for_text_field(field_id), new_invalid_value)
for new_value in new_valid_values:
self.assertEqual(self.account_settings_page.value_for_text_field(field_id, new_value), new_value)
self.account_settings_page.wait_for_message(field_id, success_message)
if assert_after_reload:
self.browser.refresh()
self.assertEqual(self.account_settings_page.value_for_text_field(field_id), new_value)
def _test_dropdown_field(
self,
field_id,
title,
initial_value,
new_values,
success_message=SUCCESS_MESSAGE, # pylint: disable=unused-argument
reloads_on_save=False
):
"""
Test behaviour of a dropdown field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id, focus_out=True), initial_value)
for new_value in new_values:
self.assertEqual(
self.account_settings_page.value_for_dropdown_field(field_id, new_value, focus_out=True),
new_value
)
# An XHR request is made when changing the field
self.account_settings_page.wait_for_ajax()
if reloads_on_save:
self.account_settings_page.wait_for_loading_indicator()
else:
self.browser.refresh()
self.account_settings_page.wait_for_page()
self.assertEqual(self.account_settings_page.value_for_dropdown_field(field_id, focus_out=True), new_value)
def _test_link_field(self, field_id, title, link_title, field_type, success_message):
"""
Test behaviour a link field.
"""
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title)
self.account_settings_page.click_on_link_in_link_field(field_id, field_type=field_type)
self.account_settings_page.wait_for_message(field_id, success_message)
def test_username_field(self):
"""
Test behaviour of "Username" field.
"""
self._test_readonly_field('username', 'Username', self.username)
def test_full_name_field(self):
"""
Test behaviour of "Full Name" field.
"""
self._test_text_field(
u'name',
u'Full Name',
self.full_name,
u' ',
[u'<h1>another name<h1>', u'<script>'],
'Full Name cannot contain the following characters: < >',
False
)
def test_email_field(self):
"""
Test behaviour of "Email" field.
"""
email = u"[email protected]"
username, user_id = self.log_in_as_unique_user(email=email)
self.visit_account_settings_page()
self._test_text_field(
u'email',
u'Email Address (Sign In)',
email,
u'[email protected]' + XSS_INJECTION,
[u'[email protected]', u'[email protected]'],
success_message='Click the link in the message to update your email address.',
assert_after_reload=False
)
actual_events = self.wait_for_events(
event_filter=self.settings_change_initiated_event_filter, number_of_matches=2)
self.assert_events_match(
[
self.expected_settings_change_initiated_event(
'email', email, '[email protected]', username=username, user_id=user_id),
# NOTE the first email change was never confirmed, so old has not changed.
self.expected_settings_change_initiated_event(
'email', email, '[email protected]', username=username, user_id=user_id),
],
actual_events
)
# Email is not saved until user confirms, so no events should have been
# emitted.
self.assert_no_setting_changed_event()
def test_password_field(self):
"""
Test behaviour of "Password" field.
"""
self._test_link_field(
u'password',
u'Password',
u'Reset Your Password',
u'button',
success_message='Click the link in the message to reset your password.',
)
event_filter = self.expected_settings_change_initiated_event('password', None, None)
self.wait_for_events(event_filter=event_filter, number_of_matches=1)
# Like email, since the user has not confirmed their password change,
# the field has not yet changed, so no events will have been emitted.
self.assert_no_setting_changed_event()
@skip(
'On bokchoy test servers, language changes take a few reloads to fully realize '
'which means we can no longer reliably match the strings in the html in other tests.'
)
def test_language_field(self):
"""
Test behaviour of "Language" field.
"""
self._test_dropdown_field(
u'pref-lang',
u'Language',
u'English',
[u'Dummy Language (Esperanto)', u'English'],
reloads_on_save=True,
)
def test_country_field(self):
"""
Test behaviour of "Country or Region" field.
"""
self._test_dropdown_field(
u'country',
u'Country or Region of Residence',
u'',
[u'Pakistan', u'Palau'],
)
def test_time_zone_field(self):
"""
Test behaviour of "Time Zone" field
"""
kiev_abbr, kiev_offset = self._get_time_zone_info('Europe/Kiev')
pacific_abbr, pacific_offset = self._get_time_zone_info('US/Pacific')
self._test_dropdown_field(
u'time_zone',
u'Time Zone',
u'Default (Local Time Zone)',
[
u'Europe/Kiev ({abbr}, UTC{offset})'.format(abbr=kiev_abbr, offset=kiev_offset),
u'US/Pacific ({abbr}, UTC{offset})'.format(abbr=pacific_abbr, offset=pacific_offset),
],
)
def _get_time_zone_info(self, time_zone_str):
"""
Helper that returns current time zone abbreviation and UTC offset
and accounts for daylight savings time
"""
time_zone = datetime.now(utc).astimezone(timezone(time_zone_str))
abbr = time_zone.strftime('%Z')
offset = time_zone.strftime('%z')
return abbr, offset
def test_social_links_field(self):
"""
Test behaviour of one of the social media links field.
"""
first_social_media_link = self.account_settings_page.get_social_first_element()
valid_value = 'https://www.twitter.com/edX'
if 'face' in first_social_media_link.lower():
valid_value = 'https://www.facebook.com/edX'
elif 'linked' in first_social_media_link.lower():
valid_value = 'https://www.linkedin.com/in/edX'
self._test_text_field(
'social_links',
first_social_media_link,
self.social_link,
'www.google.com/invalidlink)',
[valid_value, self.social_link],
)
def test_linked_accounts(self):
"""
Test that fields for third party auth providers exist.
Currently there is no way to test the whole authentication process
because that would require accounts with the providers.
"""
providers = (
['auth-oa2-facebook', 'Facebook', 'Link Your Account'],
['auth-oa2-google-oauth2', 'Google', 'Link Your Account'],
)
# switch to "Linked Accounts" tab
self.account_settings_page.switch_account_settings_tabs('accounts-tab')
for field_id, title, link_title in providers:
self.assertEqual(self.account_settings_page.title_for_field(field_id), title)
self.assertEqual(self.account_settings_page.link_title_for_link_field(field_id), link_title)
def test_order_history(self):
"""
Test that we can see orders on Order History tab.
"""
# switch to "Order History" tab
self.account_settings_page.switch_account_settings_tabs('orders-tab')
# verify that we are on correct tab
self.assertTrue(self.account_settings_page.is_order_history_tab_visible)
expected_order_data_first_row = {
'number': 'Order Number:\nEdx-123',
'date': 'Date Placed:\nApr 21, 2016',
'price': 'Cost:\n$100.00',
}
expected_order_data_second_row = {
'number': 'Product Name:\nTest Course',
'date': 'Date Placed:\nApr 21, 2016',
'price': 'Cost:\n$100.00',
}
for field_name, value in six.iteritems(expected_order_data_first_row):
self.assertEqual(
self.account_settings_page.get_value_of_order_history_row_item('order-Edx-123', field_name)[0], value
)
for field_name, value in six.iteritems(expected_order_data_second_row):
self.assertEqual(
self.account_settings_page.get_value_of_order_history_row_item('order-Edx-123', field_name)[1], value
)
self.assertTrue(self.account_settings_page.order_button_is_visible('order-Edx-123'))
class AccountSettingsDeleteAccountTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Tests for the account deletion workflow.
"""
def setUp(self):
"""
Initialize account and pages.
"""
super(AccountSettingsDeleteAccountTest, self).setUp()
self.full_name = FULL_NAME
self.social_link = ''
self.password = 'password'
self.username, self.user_id = self.log_in_as_unique_user(full_name=self.full_name, password=self.password)
self.visit_account_settings_page(gdpr=True)
def test_button_visible(self):
self.assertTrue(
self.account_settings_page.is_delete_button_visible
)
def test_delete_modal(self):
self.account_settings_page.click_delete_button()
self.assertTrue(
self.account_settings_page.is_delete_modal_visible
)
self.assertFalse(
self.account_settings_page.delete_confirm_button_enabled()
)
self.account_settings_page.fill_in_password_field(self.password)
self.assertTrue(
self.account_settings_page.delete_confirm_button_enabled()
)
class AccountSettingsA11yTest(AccountSettingsTestMixin, AcceptanceTest):
"""
Class to test account settings accessibility.
"""
a11y = True
def test_account_settings_a11y(self):
"""
Test the accessibility of the account settings page.
"""
self.log_in_as_unique_user()
self.visit_account_settings_page()
self.account_settings_page.a11y_audit.config.set_rules({
"ignore": [
'aria-valid-attr', # TODO: LEARNER-6611 & LEARNER-6865
'region', # TODO: AC-932
]
})
self.account_settings_page.a11y_audit.check_for_accessibility_errors()
| edx-solutions/edx-platform | common/test/acceptance/tests/lms/test_account_settings.py | Python | agpl-3.0 | 19,924 |
"""SCons.Tool.aixf77
Tool-specific initialization for IBM Visual Age f77 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os.path
#import SCons.Platform.aix
from . import f77
# It would be good to look for the AIX F77 package the same way we're now
# looking for the C and C++ packages. This should be as easy as supplying
# the correct package names in the following list and uncommenting the
# SCons.Platform.aix_get_xlc() call in the function below.
packages = []
def get_xlf77(env):
xlf77 = env.get('F77', 'xlf77')
xlf77_r = env.get('SHF77', 'xlf77_r')
#return SCons.Platform.aix.get_xlc(env, xlf77, xlf77_r, packages)
return (None, xlf77, xlf77_r, None)
def generate(env):
"""
Add Builders and construction variables for the Visual Age FORTRAN
compiler to an Environment.
"""
path, _f77, _shf77, version = get_xlf77(env)
if path:
_f77 = os.path.join(path, _f77)
_shf77 = os.path.join(path, _shf77)
f77.generate(env)
env['F77'] = _f77
env['SHF77'] = _shf77
def exists(env):
path, _f77, _shf77, version = get_xlf77(env)
if path and _f77:
xlf77 = os.path.join(path, _f77)
if os.path.exists(xlf77):
return xlf77
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lightmare/mapnik | scons/scons-local-4.1.0/SCons/Tool/aixf77.py | Python | lgpl-2.1 | 2,630 |
#!/usr/bin/env python
#pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import unittest
import MooseDocs
from MooseDocs.common import moose_docs_app_syntax
from MooseDocs.common.nodes import SyntaxNode, MooseObjectNode, ActionNode, MooseObjectActionNode
class TestMooseAppSyntax(unittest.TestCase):
"""
TestCase for MooseAppSyntax class.
"""
@classmethod
def setUpClass(cls):
exe = os.path.join(MooseDocs.ROOT_DIR, 'modules', 'combined')
hide = {'framework': ['/Functions', '/Functions/ParsedFunction',
'/Functions/AddFunctionAction'],
'all': ['/Modules/PhaseField', '/Modules/PhaseField/EulerAngles2RGB']}
cls._syntax = moose_docs_app_syntax(exe, hide)
def testFindall(self):
"""
Test findall method.
"""
nodes = self._syntax.findall('Diffusion')
self.assertIsInstance(nodes, list)
for syntax in [u'/Kernels/MatDiffusion', u'/Kernels/Diffusion', u'/Kernels/Diffusion']:
found = False
for n in nodes:
if n.full_name == syntax:
found = True
break
self.assertTrue(found, 'Failed to locate: {}'.format(syntax))
return nodes
def testFind(self):
"""
Test finding various node types.
"""
# MooseObject
obj = self._syntax.findall('/Kernels/Diffusion')[0]
self.assertIsInstance(obj, MooseObjectNode)
self.assertEqual(obj.full_name, u'/Kernels/Diffusion')
# MooseObjectAction
moa = self._syntax.findall('/Kernels/AddKernelAction')[0]
self.assertIsInstance(moa, MooseObjectActionNode)
self.assertEqual(moa.full_name, u'/Kernels/AddKernelAction')
self.assertIsInstance(moa.parameters, dict)
self.assertIn('isObjectAction', moa.parameters)
# Action
act = self._syntax.findall('/Outputs/CommonOutputAction')[0]
self.assertIsInstance(act, ActionNode)
self.assertEqual(act.full_name, u'/Outputs/CommonOutputAction')
self.assertIsInstance(act.parameters, dict)
self.assertNotIn('isObjectAction', act.parameters)
# Syntax
syntax = self._syntax.findall('/Kernels')[0]
self.assertEqual(syntax.full_name, u'/Kernels')
def testParameters(self):
"""
Test parameters access.
"""
node = self._syntax.findall('/Kernels/Diffusion')[0]
self.assertIsInstance(node.parameters, dict)
self.assertEqual(node.parameters['type']['default'], 'Diffusion')
def testDescription(self):
"""
Test description access.
"""
node = self._syntax.findall('/Kernels/Diffusion')[0]
self.assertIsInstance(node.description, unicode)
self.assertIn('Laplacian', node.description)
def testObjects(self):
"""
Test that MooseObjects can be found for given syntax.
"""
nodes = self._syntax.objects('/Kernels', recursive=True)
self.assertTrue(len(nodes))
for node in nodes:
self.assertNotIsInstance(node, (ActionNode, MooseObjectActionNode))
def testActions(self):
"""
Test that Actions can be located for given syntax.
"""
nodes = self._syntax.actions('/Kernels', recursive=True)
self.assertTrue(len(nodes))
for node in nodes:
self.assertNotIsInstance(node, MooseObjectNode)
def testNotFound(self):
"""
Test that findall exits when bad syntax is given.
"""
nodes = self._syntax.findall('/NOT/VALID/SYNTAX')
self.assertEqual(len(nodes), 0)
def testNamedObjects(self):
"""
Test that named objects are handled.
"""
nodes = self._syntax.findall('/Functions/ParsedFunction')
self.assertTrue(len(nodes) == 1)
self.assertIsInstance(nodes[0], MooseObjectNode)
self.assertEqual(nodes[0].name, 'ParsedFunction')
self.assertEqual(nodes[0].class_name, 'MooseParsedFunction')
def testAppName(self):
"""
Test that the app name is discovered.
"""
node = self._syntax.findall('/Kernels/Diffusion')[0]
self.assertEqual(node.groups, {u'framework':u'Framework'})
node = self._syntax.findall('/Kernels/LevelSetAdvection')[0]
self.assertEqual(node.groups, {'level_set':'Level Set'})
node = self._syntax.findall('/Outputs/CommonOutputAction')[0]
self.assertEqual(node.groups, {'moose':'Moose', 'framework':'Framework'})
node = self._syntax.findall('/Kernels/AddKernelAction')[0]
self.assertEqual(node.groups, {u'framework':u'Framework'})
def testGroups(self):
"""
Test that the group method is working.
"""
node = self._syntax.findall('/Functions')[0]
self.assertIsInstance(node, SyntaxNode)
self.assertEqual(node.groups, {'moose':'Moose', 'framework':'Framework'})
node = self._syntax.findall('/Functions/LevelSetOlssonBubble')[0]
self.assertIsInstance(node, MooseObjectNode)
self.assertEqual(node.groups, {'level_set':'Level Set'})
node = self._syntax.findall('/Functions/ImageFunction')[0]
self.assertIsInstance(node, MooseObjectNode)
self.assertEqual(node.groups, {'framework':'Framework'})
def testHidden(self):
"""
Test the actions, syntax, and objects can be hidden.
"""
node = self._syntax.findall('/Functions')[0]
self.assertTrue(node.hidden)
node = self._syntax.findall('/Functions/ParsedFunction')[0]
self.assertTrue(node.hidden)
node = self._syntax.findall('/Functions/AddFunctionAction')[0]
self.assertTrue(node.hidden)
node = self._syntax.findall('/Adaptivity/Markers')[0]
self.assertFalse(node.hidden)
node = self._syntax.findall('/Adaptivity/Markers/BoxMarker')[0]
self.assertFalse(node.hidden)
node = self._syntax.findall('/Adaptivity/Markers/AddMarkerAction')[0]
self.assertFalse(node.hidden)
node = self._syntax.findall('/Adaptivity/Markers/AddMarkerAction')[0]
self.assertFalse(node.hidden)
node = self._syntax.findall('/Modules/PhaseField')[0]
self.assertTrue(node.hidden)
node = self._syntax.findall('/Modules/PhaseField/EulerAngles2RGB')[0]
self.assertTrue(node.hidden)
def testPostprocessorAndUserObjects(self):
"""
Test that Postprocessors don't show up as UserObjects.
"""
nodes = self._syntax.findall('UserObjects/NumVars')
self.assertNotEqual(nodes, [])
self.assertTrue(nodes[0].hidden)
def testActionGroups(self):
"""
Test that groups are assigned to Actions.
"""
nodes = self._syntax.findall('/AddMarkerAction')
self.assertTrue(nodes[0].groups, ['framework'])
if __name__ == '__main__':
unittest.main(verbosity=2)
| liuwenf/moose | python/MooseDocs/tests/common/moose_app_syntax/test_MooseAppSyntax.py | Python | lgpl-2.1 | 8,374 |
#!/usr/bin/env python
from tempfile import TemporaryFile, SpooledTemporaryFile
import os, sys, re, socket, time, pickle, csv, uuid, subprocess, argparse, decimal, select, platform, signal
class Debugger:
"""
The Debugger class is the entry point to our stack tracing capabilities.
It determins which debugger to inherit based on parsed arguments and
platform specs.
"""
def __init__(self, arguments):
if arguments.debugger == 'lldb':
self.debugger = lldbAPI(arguments)
else:
self.debugger = DebugInterpreter(arguments)
def getProcess(self, pid):
return self.debugger.getProcess(pid)
def getStackTrace(self, getProcess_tuple):
return self.debugger.getStackTrace(getProcess_tuple)
class lldbAPI:
def __init__(self, arguments):
self.debugger = lldb.SBDebugger.Create()
self.debugger.SetAsync(True)
def __del__(self):
lldb.SBDebugger.Destroy(self.debugger)
def getProcess(self, pid):
# Create and attach to the pid and return our debugger as a tuple
target = self.debugger.CreateTargetWithFileAndArch(None, None)
return target, pid
def getStackTrace(self, process_tuple):
target, pid = process_tuple
lldb_results = []
# reuse the process object if available
if target.process.id is not 0:
process = target.Attach(lldb.SBAttachInfo(target.process.id), lldb.SBError())
else:
process = target.Attach(lldb.SBAttachInfo(int(pid)), lldb.SBError())
# test if we succeeded at attaching to PID process
if process:
# grab thread information
lldb_results.append(process.GetThreadAtIndex(0).__str__())
# iterate through all frames and collect back trace information
for i in xrange(process.GetThreadAtIndex(0).GetNumFrames()):
lldb_results.append(process.GetThreadAtIndex(0).GetFrameAtIndex(i).__str__())
# Unfortunately we must detach each time we perform a stack
# trace. This severely limits our sample rate. It _appears_ to
# to be a bug in LLDB's Python API. Otherwise we would be able to:
#
# process.Stop()
# ..collect back trace..
# process.Continue()
#
# instead we have to:
process.Detach()
return '\n'.join(lldb_results)
else:
return ''
class DebugInterpreter:
"""
Currently, interfacing with LLDB via subprocess is impossible. This is due to lldb not printing
to stdout, or stderr when displaying the prompt to the user (informing the user, the debugger
is ready to receive input). However, this class may someday be able to, which is why
the self.debugger variable is present.
"""
def __init__(self, arguments):
self.last_position = 0
self.debugger = arguments.debugger
def _parseStackTrace(self, gibberish):
not_gibberish = re.findall(r'\(' + self.debugger + '\) (#.*)\(' + self.debugger + '\)', gibberish, re.DOTALL)
if len(not_gibberish) != 0:
return not_gibberish[0]
else:
# Return a blank line, as to not pollute the log. Gibberish here
# usually indicates a bunch of warnings or information about
# loading symbols
return ''
def _waitForResponse(self, dbg_stdout):
# Allow a maximum of 5 seconds to obtain a debugger prompt position.
# Otherwise we can hang indefinitely
end_queue = time.time() + float(5)
while time.time() < end_queue:
dbg_stdout.seek(self.last_position)
for line in dbg_stdout:
if line == '(' + self.debugger + ') ':
self.last_position = dbg_stdout.tell()
return True
time.sleep(0.01)
return False
def getProcess(self, pid):
# Create a temporary file the debugger can write stdout/err to
dbg_stdout = SpooledTemporaryFile()
# Create and attach to running proccess
process = subprocess.Popen([which(self.debugger)], stdin=subprocess.PIPE, stdout=dbg_stdout, stderr=dbg_stdout)
for command in [ 'attach ' + pid + '\n' ]:
if self._waitForResponse(dbg_stdout):
try:
process.stdin.write(command)
except:
return (False, self.debugger, 'quit unexpectedly')
else:
return (False, 'could not attach to process in allotted time')
return (process, dbg_stdout)
def getStackTrace(self, process_tuple):
process, dbg_stdout = process_tuple
# Store our current file position so we can return to it and read
# the eventual entire stack trace output
batch_position = dbg_stdout.tell()
# Loop through commands necessary to create a back trace
for command in ['ctrl-c', 'bt\n', 'c\n']:
if command == 'ctrl-c':
process.send_signal(signal.SIGINT)
else:
if self._waitForResponse(dbg_stdout):
process.stdin.write(command)
else:
dbg_stdout.seek(batch_position)
return self.detachProcess(process_tuple)
# Return to previous file position so that we can return the entire
# stack trace
dbg_stdout.seek(batch_position)
return self._parseStackTrace(dbg_stdout.read())
def detachProcess(self, process):
process, dbg_stdout = process
# Offset the position due to ctrl-c not generating a newline event
tmp_position = (dbg_stdout.tell() - 1)
for command in ['ctrl-c', 'quit\n', 'y\n']:
if command == 'ctrl-c':
process.send_signal(signal.SIGINT)
else:
# When these two variables are not equal, its a safe assumption the
# debugger is ready to receive input
if tmp_position != dbg_stdout.tell():
tmp_position = dbg_stdout.tell()
try:
process.stdin.write(command)
except:
# Because we are trying to detach and quit the debugger just pass
pass
# Always return True for a detach call. What would we do if it failed anyway?
# Why am I even leaving a comment about this?
return True
class Server:
def __init__(self, arguments):
self.arguments = arguments
self.arguments.cwd = os.getcwd()
# Test to see if we are starting as a server
if self.arguments.pbs == True:
if os.getenv('PBS_NODEFILE') != None:
# Initialize an agent, strictly for holding our stdout logs. Give it the UUID of 'server'
self.agent = Agent(self.arguments, 'server')
if self.arguments.recover:
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
self.logfile = WriteCSV(self.arguments.outfile[0], True)
self.client_connections = []
self.startServer()
else:
print 'I could not find your PBS_NODEFILE. Is PBS loaded?'
sys.exit(1)
# If we are not a server, start the single client
else:
self.startClient()
def startServer(self):
# Setup the TCP socket
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.bind((socket.gethostname(), 0))
self.server_socket.listen(5)
(self.host, self.port) = self.server_socket.getsockname()
# We will store all connections (sockets objects) made to the server in a list
self.client_connections.append(self.server_socket)
# Launch the actual binary we want to track
self._launchJob()
# Now launch all pbs agents
self._launchClients()
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to listen and accept active connections from agents
# until all agents report a STOP command.
AGENTS_ACTIVE = True
while AGENTS_ACTIVE:
read_sockets, write_sockets, error_sockets = select.select(self.client_connections,[],[])
for sock in read_sockets:
if sock == self.server_socket:
# Accept an incomming connection
self.client_connections.append(self.server_socket.accept()[0])
else:
# Deal with the data being sent to the server by its agents
self.handleAgent()
# Check to see if _all_ agents are telling the server to stop
agent_count = len(self.agent.agent_data.keys())
current_count = 0
for agent in self.agent.agent_data.keys():
if self.agent.agent_data[agent]['STOP']:
current_count += 1
# if All Agents have reported a STOP command, begin to exit
if current_count == agent_count:
AGENTS_ACTIVE = False
# Gotta get out of the for loop somehow...
break
# Sleep a bit before reading additional data
time.sleep(self.arguments.repeat_rate[-1])
# Close the server socket
self.server_socket.close()
# Close the logfile as the server is about to exit
self.logfile.close()
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Normal exiting procedures
print '\n\nAll agents have stopped. Log file saved to:', self.arguments.outfile[0]
sys.exit(0)
def startClient(self):
Client(self.arguments)
def _launchClients(self):
# Read the environment PBS_NODEFILE
self._PBS_NODEFILE = open(os.getenv('PBS_NODEFILE'), 'r')
nodes = set(self._PBS_NODEFILE.read().split())
# Print some useful information about our setup
print 'Memory Logger running on Host:', self.host, 'Port:', self.port, \
'\nNodes:', ', '.join(nodes), \
'\nSample rate (including stdout):', self.arguments.repeat_rate[-1], 's (use --repeat-rate to adjust)', \
'\nRemote agents delaying', self.arguments.pbs_delay[-1], 'second/s before tracking. (use --pbs-delay to adjust)\n'
# Build our command list based on the PBS_NODEFILE
command = []
for node in nodes:
command.append([ 'ssh', node,
'bash --login -c "source /etc/profile && ' \
+ 'sleep ' + str(self.arguments.pbs_delay[-1]) + ' && ' \
+ os.path.abspath(__file__) \
+ ' --call-back-host ' \
+ self.host + ' ' + str(self.port) \
+ '"'])
# remote into each node and execute another copy of memory_logger.py
# with a call back argument to recieve further instructions
for pbs_node in command:
subprocess.Popen(pbs_node, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Launch the binary we intend to track
def _launchJob(self):
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.agent.log, stderr=self.agent.log)
# A connection has been made from client to server
# Capture that data, and determin what to do with it
def handleAgent(self):
# Loop through all client connections, and receive data if any
for agent_socket in self.client_connections:
# Completely ignore the server_socket object
if agent_socket == self.server_socket:
continue
# Assign an AgentConnector for the task of handling data between client and server
reporting_agent = AgentConnector(self.arguments, agent_socket)
# OK... get data from a client and begin
new_data = reporting_agent.readData()
if new_data != None:
# There should be only one dictionary key (were reading data from just one client at a time)
agent_uuid = new_data.keys()[0]
# Update our dictionary of an agents data
self.agent.agent_data[agent_uuid] = new_data[agent_uuid]
# Modify incoming Agents timestamp to match Server's time (because every node is a little bit off)
if self.arguments.recover:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now - self.agent.delta
else:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now
# update total usage for all known reporting agents
total_usage = 0
for one_agent in self.agent.agent_data.keys():
total_usage += self.agent.agent_data[one_agent]['MEMORY']
self.agent.agent_data[agent_uuid]['TOTAL'] = int(total_usage)
# Get any stdout thats happened thus far and apply it to what ever agent just sent us data
self.agent.agent_data[agent_uuid]['STDOUT'] = self.agent._getStdout()
# Write to our logfile
self.logfile.write(self.agent.agent_data[agent_uuid])
# Check for any agents sending a stop command. If we find one,
# set some zeroing values, and close that agent's socket.
if self.agent.agent_data[agent_uuid]['STOP']:
self.agent.agent_data[agent_uuid]['MEMORY'] = 0
agent_socket.close()
if agent_socket != self.server_socket:
self.client_connections.remove(agent_socket)
# Go ahead and set our server agent to STOP as well.
# The server will continue recording samples from agents
self.agent.agent_data['server']['STOP'] = True
# If an Agent has made a request for instructions, handle it here
update_client = False
if new_data[agent_uuid]['REQUEST'] != None:
for request in new_data[agent_uuid]['REQUEST'].iteritems():
if new_data[agent_uuid]['REQUEST'][request[0]] == '':
update_client = True
# We only support sending any arguments supplied to ther server, back to the agent
for request_type in dir(self.arguments):
if request[0] == str(request_type):
self.agent.agent_data[agent_uuid]['REQUEST'][request[0]] = getattr(self.arguments, request[0])
# If an Agent needed additional instructions, go ahead and re-send those instructions
if update_client:
reporting_agent.sendData(self.agent.agent_data[agent_uuid])
class Client:
def __init__(self, arguments):
self.arguments = arguments
# Initialize an Agent with a UUID based on our hostname
self.my_agent = Agent(arguments, str(uuid.uuid3(uuid.NAMESPACE_DNS, socket.gethostname())))
# Initialize an AgentConnector
self.remote_server = AgentConnector(self.arguments)
# If client will talk to a server (PBS)
if self.arguments.call_back_host:
# We know by initializing an agent, agent_data contains the necessary message asking for further instructions
self.my_agent.agent_data[self.my_agent.my_uuid] = self.remote_server.sendData(self.my_agent.agent_data)
# Apply new instructions received from server (this basically updates our arguments)
for request in self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'].iteritems():
for request_type in dir(self.arguments):
if request[0] == str(request_type):
setattr(self.arguments, request[0], request[1])
# Requests have been satisfied, set to None
self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'] = None
# Change to the same directory as the server was when initiated (needed for PBS stuff)
os.chdir(self.arguments.cwd)
# Client will not be talking to a server, save data to a file instead
else:
# Deal with --recover
if self.arguments.recover:
# Do not overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
# Overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], True)
# Lets begin!
self.startProcess()
# This function handles the starting and stoping of the sampler process.
# We loop until an agent returns a stop command.
def startProcess(self):
AGENTS_ACTIVE = True
# If we know we are the only client, go ahead and start the process we want to track.
if self.arguments.call_back_host == None:
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.my_agent.log, stderr=self.my_agent.log)
# Delay just a bit to keep from recording a possible zero memory usage as the binary starts up
time.sleep(self.arguments.sample_delay[0])
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to process data until an Agent reports a STOP command
while AGENTS_ACTIVE:
# Take a sample
current_data = self.my_agent.takeSample()
# Handle the data supplied by the Agent.
self._handleData(current_data)
# If an Agent reported a STOP command, go ahead and begin the shutdown phase
if current_data[current_data.keys()[0]]['STOP']:
AGENTS_ACTIVE = False
# Sleep just a bit between samples, as to not saturate the machine
time.sleep(self.arguments.repeat_rate[-1])
# An agent reported a stop command... so let everyone know where the log was saved, and exit!
if self.arguments.call_back_host == None:
print 'Binary has exited and a log file has been written. You can now attempt to view this file by running' \
'\nthe memory_logger with either the --plot or --read arguments:\n\n', sys.argv[0], '--plot', self.arguments.outfile[0], \
'\n\nSee --help for additional viewing options.'
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
self.logfile.close()
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Everything went smooth.
sys.exit(0)
# Figure out what to do with the sampled data
def _handleData(self, data):
# Sending the sampled data to a server
if self.arguments.call_back_host:
self.remote_server.sendData(data)
# Saving the sampled data to a file
else:
# Compute the TOTAL memory usage to be how much our one agent reported
# Because were the only client doing any work
data[self.my_agent.my_uuid]['TOTAL'] = data[self.my_agent.my_uuid]['MEMORY']
self.logfile.write(data[self.my_agent.my_uuid])
# If the agent has been told to stop, close the database file
if self.my_agent.agent_data[self.my_agent.my_uuid]['STOP'] == True:
self.logfile.close()
class AgentConnector:
"""
Functions used to communicate to and from Client and Server.
Both Client and Server classes use this object.
readData()
sendData('message', socket_connection=None)
if sendData's socket_connection is None, it will create a new connection to the server
based on supplied arguments
"""
def __init__(self, arguments, connection=None):
self.arguments = arguments
self.connection = connection
self.CREATED_CONNECTION = False
# If the connection is None, meaning this object was instanced by a client,
# we must create a connection to the server first
if self.connection == None and self.arguments.call_back_host != None:
self.CREATED_CONNECTION = True
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((self.arguments.call_back_host[0], int(self.arguments.call_back_host[1])))
# read all data sent by an agent
def readData(self):
# Get how much data there is to receive
# The first eight bytes is our data length
data_width = int(self.connection.recv(8))
tmp_received = ''
# We need to receive precisely the ammount of data the
# client is trying to send us.
while len(tmp_received) < data_width:
if data_width - len(tmp_received) > 1024:
tmp_received += self.connection.recv(1024)
else:
tmp_received += self.connection.recv(data_width - (len(tmp_received)))
# unpickle the received message
return self._unpickleMessage(tmp_received)
# send data to an agent
def sendData(self, message):
# pickle the data up, and send the message
self.connection.sendall(self._pickleMessage(message))
# If we had to create the socket (connection was none), and this client/agent is requesting
# instructions, go ahead and read the data that _better be there_ sent to us by the server.
if self.CREATED_CONNECTION and message[message.keys()[0]]['REQUEST'] != None:
return self.readData()
# The following two functions pickle up the data for easy socket transport
def _pickleMessage(self, message):
t = TemporaryFile()
pickle.dump(message, t)
t.seek(0)
str_msg = t.read()
str_len = len(str_msg)
message = "%-8d" % (str_len,) + str_msg
return message
def _unpickleMessage(self, message):
t = TemporaryFile()
t.write(message)
t.seek(0)
try:
return pickle.load(t)
except KeyError:
print 'Socket data was not pickled data: ', message
except:
raise
class WriteCSV:
def __init__(self, logfile, overwrite):
if overwrite:
self.file_object = open(logfile, 'w', 1)
else:
self.file_object = open(logfile, 'a', 1)
csv.field_size_limit(sys.maxsize)
self.log_file = csv.writer(self.file_object, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Close the logfile
def close(self):
self.file_object.close()
# Write a CSV row
def write(self, data):
formatted_string = self._formatString(data)
self.log_file.writerow(formatted_string)
# Format the CSV output
def _formatString(self, data):
# We will be saving this data in CSV format. Before we do, lets format it a bit here
format_order = ['TIMESTAMP', 'TOTAL', 'STDOUT', 'STACK', 'HOSTNAME', 'MEMORY']
formatted_text = []
for item in format_order:
# We have to handle python's way of formatting floats to strings specially
if item == 'TIMESTAMP':
formatted_text.append('%.6f' % data[item])
else:
formatted_text.append(data[item])
return formatted_text
class Agent:
"""
Each agent object contains its own sampled log data. The Agent class is responsible for
collecting and storing data. machine_id is used to identify the agent.
machine_id is supplied by the client class. This allows for multiple agents if desired
"""
def __init__(self, arguments, machine_id):
self.arguments = arguments
self.my_uuid = machine_id
self.track_process = ''
self.process = None
# This log object is for stdout purposes
self.log = TemporaryFile()
self.log_position = 0
# Discover if --recover is being used. If so, we need to obtain the
# timestamp of the last entry in the outfile log... a little bulky
# to do... and not a very good place to do it.
if self.arguments.recover:
if os.path.exists(self.arguments.outfile[-1]):
memory_list = []
history_file = open(self.arguments.outfile[-1], 'r')
csv.field_size_limit(sys.maxsize)
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Get last item in list. Unfortunately, no way to do this until
# we have read the entire file...? Lucky for us, most memory log
# files are in the single digit megabytes
for row in reader:
memory_list.append(row)
history_file.close()
last_entry = float(memory_list[-1][0]) + self.arguments.repeat_rate[-1]
self.delta = (GetTime().now - last_entry)
else:
print 'Recovery options detected, but I could not find your previous memory log file.'
sys.exit(1)
else:
self.delta = 0
# Create the dictionary to which all sampled data will be stored
# NOTE: REQUEST dictionary items are instructions (arguments) we will
# ask the server to provide (if we are running with --pbs)
# Simply add them here. We _can not_ make the arguments match the
# server exactly, this would cause every agent launched to perform
# like a server... bad stuff
# Example: We added repeat_rate (see dictionary below). Now every
# agent would update their repeat_rate according to what the user
# supplied as an argument (--repeat_rate 0.02)
self.agent_data = { self.my_uuid :
{ 'HOSTNAME' : socket.gethostname(),
'STDOUT' : '',
'STACK' : '',
'MEMORY' : 0,
'TIMESTAMP' : GetTime().now - self.delta,
'REQUEST' : { 'run' : '',
'pstack' : '',
'repeat_rate' : '',
'cwd' : '',
'debugger' : ''},
'STOP' : False,
'TOTAL' : 0,
'DEBUG_LOG' : ''
}
}
# we need to create a place holder for our debugger because when
# memory_logger is run via --pbs, this Agent will not know what
# kind of debugger to use until it has made contact with the server
self.stack_trace = None
# NOTE: This is the only function that should be called in this class
def takeSample(self):
if self.arguments.pstack:
if self.stack_trace is None:
self.stack_trace = Debugger(self.arguments)
self.agent_data[self.my_uuid]['STACK'] = self._getStack()
# Always do the following
self.agent_data[self.my_uuid]['MEMORY'] = self._getMemory()
self.agent_data[self.my_uuid]['STDOUT'] = self._getStdout()
if self.arguments.recover:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now - self.delta
else:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now
# Return the data to whom ever asked for it
return self.agent_data
def _getStdout(self):
self.log.seek(self.log_position)
output = self.log.read()
self.log_position = self.log.tell()
sys.stdout.write(output)
return output
def _getMemory(self):
tmp_pids = self._getPIDs()
memory_usage = 0
if tmp_pids != {}:
for single_pid in tmp_pids.iteritems():
memory_usage += int(single_pid[1][0])
if memory_usage == 0:
# Memory usage hit zero? Then assume the binary being tracked has exited. So lets begin doing the same.
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found the total memory usage of all my processes hit 0. Stopping'
self.agent_data[self.my_uuid]['STOP'] = True
return 0
return int(memory_usage)
# No binay even detected? Lets assume it exited, so we should begin doing the same.
self.agent_data[self.my_uuid]['STOP'] = True
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found no processes running. Stopping'
return 0
def _getStack(self):
# Create a process object if none already exists. Reuse the old one if it does.
if self.process is None:
tmp_pids = self._getPIDs()
# Check if we actually found any running processes
if tmp_pids != {}:
# Obtain a single process id, any process id will do. This will be the process we attach to and perform stack traces
one_pid = tmp_pids.keys()[0]
self.process = self.stack_trace.getProcess(str(one_pid))
return self.stack_trace.getStackTrace(self.process)
else:
return ''
else:
return self.stack_trace.getStackTrace(self.process)
def _getPIDs(self):
pid_list = {}
# Determin the binary to sample and store it. Doing the findCommand is a little expensive.
if self.track_process == '':
self.track_process = self._findCommand(''.join(self.arguments.run))
# If we are tracking a binary
if self.arguments.run:
command = [which('ps'), '-e', '-o', 'pid,rss,user,args']
tmp_proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
all_pids = tmp_proc.communicate()[0].split('\n')
# Figure out what we are allowed to track (strip away mpiexec, processes not owned by us, etc)
for single_pid in all_pids:
if single_pid.find(self.track_process) != -1 and \
single_pid.find(__file__) == -1 and \
single_pid.find('mpirun') == -1 and \
single_pid.find(os.getenv('USER')) != -1 and \
single_pid.find('mpiexec') == -1:
pid_list[int(single_pid.split()[0])] = []
pid_list[int(single_pid.split()[0])].extend([single_pid.split()[1], single_pid.split()[3]])
return pid_list
# Determine the command we are going to track
# A few things are happening here; first we strip off any MPI commands
# we then loop through the remaining items until we find a matching path
# exp: mpiexec -n 12 ../../../moose_test-opt -i simple_diffusion.i -r 6
# would first strip off mpiexec, check for the presence of -n in our
# current directory, then 12, then ../../../moose_test-opt <- found. It would
# stop and return the base name (moose_test-opt).
def _findCommand(self, command):
if command.find('mpiexec') == 0 or command.find('mpirun') == 0:
for binary in command.split():
if os.path.exists(binary):
return os.path.split(binary)[1]
elif os.path.exists(command.split()[0]):
return os.path.split(command.split()[0])[1]
class GetTime:
"""A simple formatted time object.
"""
def __init__(self, posix_time=None):
import datetime
if posix_time == None:
self.posix_time = datetime.datetime.now()
else:
self.posix_time = datetime.datetime.fromtimestamp(posix_time)
self.now = float(datetime.datetime.now().strftime('%s.%f'))
self.microsecond = self.posix_time.microsecond
self.second = self.posix_time.second
self.minute = self.posix_time.strftime('%M')
self.hour = self.posix_time.strftime('%H')
self.day = self.posix_time.strftime('%d')
self.month = self.posix_time.strftime('%m')
self.year = self.posix_time.year
self.dayname = self.posix_time.strftime('%a')
self.monthname = self.posix_time.strftime('%b')
class MemoryPlotter:
def __init__(self, arguments):
self.arguments = arguments
self.buildGraph()
def buildPlots(self):
plot_dictionary = {}
for log in self.arguments.plot:
memory_list = []
if os.path.exists(log):
log_file = open(log, 'r')
csv.field_size_limit(sys.maxsize)
reader = csv.reader(log_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
for row in reader:
memory_list.append(row)
log_file.close()
plot_dictionary[log.split('/')[-1:][0]] = memory_list
else:
print 'log not found:', log
sys.exit(1)
return plot_dictionary
def buildGraph(self):
try:
import matplotlib.pyplot as plt
except ImportError:
print 'Error importing matplotlib. Matplotlib not available on this system?'
sys.exit(1)
plot_dictionary = self.buildPlots()
fig = plt.figure()
plot_list = []
tmp_plot = []
tmp_legend = []
self.stdout_msgs = {}
self.pstack_msgs = {}
self.multiples = 1
self.memory_label = 'Memory in Bytes'
# Try and calculate memory sizes, so we can move annotations around a bit more accurately
largest_memory = []
for plot_name, value_list in plot_dictionary.iteritems():
for records in value_list:
largest_memory.append(int(records[1]))
largest_memory.sort()
# Determine the scale of the graph
suffixes = ["Terabytes", "Gigabytes", "Megabytes", "Kilobytes", "Bytes"]
multiplier = 1 << 40;
index = 0
while largest_memory[-1] < multiplier and multiplier >= 1:
multiplier = multiplier >> 10
index = index + 1
self.multiples = multiplier
self.memory_label = "Memory in " + suffixes[index-1]
# Loop through each log file
for plot_name, value_list in plot_dictionary.iteritems():
plot_list.append(fig.add_subplot(111))
tmp_memory = []
tmp_time = []
tmp_stdout_x = []
tmp_stdout_y = []
tmp_pstack_x = []
tmp_pstack_y = []
stdout_msg = []
pstack_msg = []
# Get the start time, and make this 0
try:
tmp_zero = decimal.Decimal(value_list[0][0])
except:
print 'Could not parse log file:', plot_name, 'is this a valid memory_logger file?'
sys.exit(1)
# Populate the graph
for records in value_list:
tmp_memory.append(decimal.Decimal(records[1]) / self.multiples)
tmp_time.append(str(decimal.Decimal(records[0]) - tmp_zero))
if len(records[2]) > 0 and self.arguments.stdout:
tmp_stdout_x.append(tmp_time[-1])
tmp_stdout_y.append(tmp_memory[-1])
stdout_msg.append(records[2])
if len(records[3]) > 0 and self.arguments.pstack:
tmp_pstack_x.append(tmp_time[-1])
tmp_pstack_y.append(tmp_memory[-1])
pstack_msg.append(records[3])
# Do the actual plotting:
f, = plot_list[-1].plot(tmp_time, tmp_memory)
tmp_plot.append(f)
tmp_legend.append(plot_name)
plot_list[-1].grid(True)
plot_list[-1].set_ylabel(self.memory_label)
plot_list[-1].set_xlabel('Time in Seconds')
# Enable dork mode
if self.arguments.darkmode:
fig.set_facecolor('0.1')
plot_list[-1].set_axis_bgcolor('0.1')
plot_list[-1].spines['bottom'].set_color('white')
plot_list[-1].spines['top'].set_color('white')
plot_list[-1].spines['right'].set_color('white')
plot_list[-1].spines['left'].set_color('white')
plot_list[-1].tick_params(axis='x', colors='white')
plot_list[-1].tick_params(axis='y', colors='white')
plot_list[-1].xaxis.label.set_color('white')
plot_list[-1].yaxis.label.set_color('white')
plot_list[-1].grid(color='0.6')
# Plot annotations
if self.arguments.stdout:
stdout_line, = plot_list[-1].plot(tmp_stdout_x, tmp_stdout_y, 'x', picker=10, color=f.get_color(), markeredgecolor='0.08', markeredgewidth=0.1)
next_index = str(len(plot_list))
stdout_line.set_gid('stdout' + next_index)
self.stdout_msgs[next_index] = stdout_msg
self.buildAnnotation(plot_list[-1], tmp_stdout_x, tmp_stdout_y, stdout_msg, f.get_color())
if self.arguments.pstack:
pstack_line, = plot_list[-1].plot(tmp_pstack_x, tmp_pstack_y, 'o', picker=10, color=f.get_color(), markeredgecolor='0.08', markeredgewidth=0.1)
next_index = str(len(plot_list))
pstack_line.set_gid('pstack' + next_index)
self.pstack_msgs[next_index] = pstack_msg
# Make points clickable
fig.canvas.mpl_connect('pick_event', self)
# Create legend
legend = plt.legend(tmp_plot, tmp_legend, loc = self.arguments.legend)
legend.get_frame().set_alpha(0.7)
# More dork mode settings
if self.arguments.darkmode:
legend.get_frame().set_facecolor('0.2')
for text in legend.get_texts():
text.set_color('0.8')
plt.show()
def __call__(self, event):
color_codes = {'RESET':'\033[0m', 'r':'\033[31m','g':'\033[32m','c':'\033[36m','y':'\033[33m', 'b':'\033[34m', 'm':'\033[35m', 'k':'\033[0m', 'w':'\033[0m' }
line = event.artist
ind = event.ind
name = line.get_gid()[:-1]
index = line.get_gid()[-1]
if self.arguments.stdout and name == 'stdout':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "stdout -----------------------------------------------------\n"
for id in ind:
print self.stdout_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
if self.arguments.pstack and name == 'pstack':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "pstack -----------------------------------------------------\n"
for id in ind:
print self.pstack_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
def buildAnnotation(self,fig,x,y,msg,c):
for i in range(len(x)):
fig.annotate(str(msg[i].split('\n')[0][:self.arguments.trim_text[-1]]),
xy=(x[i], y[i]),
rotation=self.arguments.rotate_text[-1],
xytext=(decimal.Decimal(x[i]) + decimal.Decimal(self.arguments.move_text[0]), decimal.Decimal(y[i]) + decimal.Decimal(self.arguments.move_text[1])),
color=c, horizontalalignment='center', verticalalignment='bottom',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.5",
color=c
)
)
class ReadLog:
"""Read a memory_logger log file, and display the results to stdout in an easy to read form.
"""
def __init__(self, arguments):
self.arguments = arguments
history_file = open(self.arguments.read[-1], 'r')
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
self.memory_list = []
for row in reader:
self.memory_list.append(row)
history_file.close()
self.sorted_list = []
self.mem_list = []
self.use_nodes = False
self.printHistory()
def printHistory(self):
RESET = '\033[0m'
BOLD = '\033[1m'
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
CYAN = '\033[36m'
YELLOW = '\033[33m'
last_memory = 0.0
(terminal_width, terminal_height) = self.getTerminalSize()
for timestamp in self.memory_list:
to = GetTime(float(timestamp[0]))
total_memory = int(timestamp[1])
log = timestamp[2].split('\n')
pstack = timestamp[3].split('\n')
node_name = str(timestamp[4])
node_memory = int(timestamp[5])
self.mem_list.append(total_memory)
self.sorted_list.append([str(to.day) + ' ' + str(to.monthname) + ' ' + str(to.hour) + ':' + str(to.minute) + ':' + '{:02.0f}'.format(to.second) + '.' + '{:06.0f}'.format(to.microsecond), total_memory, log, pstack, node_name, node_memory])
largest_memory = decimal.Decimal(max(self.mem_list))
if len(set([x[4] for x in self.sorted_list])) > 1:
self.use_nodes = True
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
for item in self.sorted_list:
tmp_str = ''
if decimal.Decimal(item[1]) == largest_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RESET, terminal_width)
elif item[1] > last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RED, terminal_width)
elif item[1] == last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], CYAN, terminal_width)
else:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], GREEN, terminal_width)
last_memory = item[1]
sys.stdout.write(tmp_str)
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
def formatText(self, largest_memory, date, total_memory, node_memory, log, pstack, reporting_host, color_code, terminal_width):
RESET = '\033[0m'
if decimal.Decimal(total_memory) == largest_memory:
percent = '100'
elif (decimal.Decimal(total_memory) / largest_memory) == 0:
percent = '0'
else:
percent = str(decimal.Decimal(total_memory) / largest_memory)[2:4] + '.' + str(decimal.Decimal(total_memory) / largest_memory)[4:6]
header = len(date) + 18
footer = len(percent) + 6
additional_correction = 0
max_length = decimal.Decimal(terminal_width - header) / largest_memory
total_position = total_memory * decimal.Decimal(max_length)
node_position = node_memory * decimal.Decimal(max_length)
tmp_log = ''
if self.arguments.stdout:
for single_log in log:
if single_log != '':
tmp_log += ' '*(header - len(' stdout |')) + ' stdout | ' + single_log + '\n'
if self.arguments.pstack:
for single_pstack in pstack:
if single_pstack != '':
tmp_log += ' '*(header - len(' pstack |')) + ' pstack | ' + single_pstack + '\n'
if self.arguments.separate and self.use_nodes != False:
message = '< ' + RESET + reporting_host + ' - ' + '{:10,.0f}'.format(node_memory) + ' K' + color_code + ' >'
additional_correction = len(RESET) + len(color_code)
elif self.use_nodes:
message = '< >'
else:
node_position = 0
message = ''
return date + '{:15,.0f}'.format(total_memory) + ' K | ' + color_code + '-'*int(node_position) + message + '-'*(int(total_position) - (int(node_position) + ((len(message) - additional_correction) + footer))) + RESET + '| ' + percent + '%\n' + tmp_log
def getTerminalSize(self):
"""Quicky to get terminal window size"""
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
# A simple which function to return path to program
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
print 'I could not find the following binary:', program
sys.exit(1)
def verifyArgs(args):
possible_positions = [ 'center',
'center left',
'center right',
'upper center',
'lower center',
'best',
'right',
'left',
'upper right',
'lower right',
'upper left',
'lower left']
if args.legend not in possible_positions:
print 'Invalid legend position requested. Possible values are:\n\t', '\n\t'.join([x for x in possible_positions])
sys.exit(1)
option_count = 0
if args.read:
option_count += 1
if args.run:
option_count += 1
if args.plot:
option_count += 1
if option_count != 1 and args.pbs != True:
if args.call_back_host == None:
print 'You must use one of the following: run, read, or plot'
sys.exit(1)
args.cwd = os.getcwd()
# Work with --recover (a MOOSE application specific option)
args.recover = False
if args.run:
if args.run[0].find('--recover') != -1:
args.recover = True
if args.run[0].find('~') != -1:
print "You must use absolute paths. Python does not understand the '~' path discriptor.\nYou can use environment vairables (eg: $HOME) so long as they are absolute paths."
sys.exit(1)
if args.outfile == None and args.run:
# Attempt to build the output file based on input file
if re.findall(r'-i (\w+)', args.run[0]) != []:
args.outfile = [os.getcwd() + '/' + re.findall(r'-i (\w+)', args.run[0])[0] + '_memory.log']
else:
args.outfile = [os.getcwd() + '/' + args.run[0].replace('..', '').replace('/', '').replace(' ', '_') + '.log']
if args.pstack and (args.read is None and args.plot is None):
if args.debugger is not None:
if args.debugger == 'lldb':
if platform.platform().find('Darwin') != -1:
try:
import lldb
except ImportError:
lldbImportError()
sys.exit(1)
else:
results = which('lldb')
elif args.debugger == 'gdb':
results = which('gdb')
else:
print 'Invalid debugger selected. You must choose between gdb and lldb using the --debugger argument'
sys.exit(1)
return args
def parseArguments(args=None):
parser = argparse.ArgumentParser(description='Track and Display memory usage')
rungroup = parser.add_argument_group('Tracking', 'The following options control how the memory logger tracks memory usage')
rungroup.add_argument('--run', nargs=1, metavar='command', help='Run specified command using absolute paths. You must encapsulate the command in quotes.')
rungroup.add_argument('--pbs', dest='pbs', metavar='', action='store_const', const=True, default=False, help='Instruct memory logger to tally all launches on all nodes\n ')
rungroup.add_argument('--pbs-delay', dest='pbs_delay', metavar='float', nargs=1, type=float, default=[1.0], help='For larger jobs, you may need to increase the delay as to when the memory_logger will launch the tracking agents\n ')
rungroup.add_argument('--sample-delay', dest='sample_delay', metavar='float', nargs=1, type=float, default=[0.25], help='The time to delay before taking the first sample (when not using pbs)')
rungroup.add_argument('--repeat-rate', nargs=1, metavar='float', type=float, default=[0.25], help='Indicate the sleep delay in float seconds to check memory usage (default 0.25 seconds)\n ')
rungroup.add_argument('--outfile', nargs=1, metavar='file', help='Save log to specified file. (Defaults based on run command)\n ')
readgroup = parser.add_argument_group('Read / Display', 'Options to manipulate or read log files created by the memory_logger')
readgroup.add_argument('--read', nargs=1, metavar='file', help='Read a specified memory log file to stdout\n ')
readgroup.add_argument('--separate', dest='separate', action='store_const', const=True, default=False, help='Display individual node memory usage (read mode only)\n ')
readgroup.add_argument('--plot', nargs="+", metavar='file', help='Display a graphical representation of memory usage (Requires Matplotlib). Specify a single file or a list of files to plot\n ')
readgroup.add_argument('--legend', metavar='"lower left"', default='lower left', help='Place legend in one of the following locations (default --legend "lower left") "center", "center left", "center right", "upper center", "lower center", "best", "right", "left", "upper right", "lower right", "upper left", "lower left"\n ')
commongroup = parser.add_argument_group('Common Options', 'The following options can be used when displaying the results')
commongroup.add_argument('--pstack', dest='pstack', action='store_const', const=True, default=False, help='Display/Record stack trace information (if available)\n ')
commongroup.add_argument('--stdout', dest='stdout', action='store_const', const=True, default=False, help='Display stdout information\n ')
commongroup.add_argument('--debugger', dest='debugger', metavar='gdb | lldb', nargs='?', help='Specify the debugger to use. Possible values: gdb or lldb\n ')
plotgroup = parser.add_argument_group('Plot Options', 'Additional options when using --plot')
plotgroup.add_argument('--rotate-text', nargs=1, metavar='int', type=int, default=[30], help='Rotate stdout/pstack text by this ammount (default 30)\n ')
plotgroup.add_argument('--move-text', nargs=2, metavar='int', default=['0', '0'], help='Move text X and Y by this ammount (default 0 0)\n ')
plotgroup.add_argument('--trim-text', nargs=1, metavar='int', type=int, default=[15], help='Display this many characters in stdout/pstack (default 15)\n ')
plotgroup.add_argument('--no-color', dest='no_color', metavar='', action='store_const', const=False, help='When printing output to stdout do not use color codes\n ')
plotgroup.add_argument('--darkmode', dest='darkmode', metavar='', action='store_const', const=True, help='When you want to be cool\n ')
internalgroup = parser.add_argument_group('Internal PBS Options', 'The following options are used to control how memory_logger as a tracking agent connects back to the caller. These are set automatically when using PBS and can be ignored.')
internalgroup.add_argument('--call-back-host', nargs=2, help='Server hostname and port that launched memory_logger\n ')
return verifyArgs(parser.parse_args(args))
def lldbImportError():
print """
Unable to import lldb
The Python lldb API is now supplied by Xcode but not
automatically set in your PYTHONPATH. Please search
the internet for how to do this if you wish to use
--pstack on Mac OS X.
Note: If you installed Xcode to the default location of
/Applications, you should only have to perform the following:
export PYTHONPATH=/Applications/Xcode.app/Contents/SharedFrameworks/LLDB.framework/Resources/Python:$PYTHONPATH
###!! IMPORTANT !!###
It may also be necessary to unload the miniconda module.
If you receive a fatal Python error about PyThreadState
try using your system's version of Python instead.
"""
if __name__ == '__main__':
args = parseArguments()
if args.read:
ReadLog(args)
sys.exit(0)
if args.plot:
MemoryPlotter(args)
sys.exit(0)
Server(args)
| joshua-cogliati-inl/moose | scripts/memory_logger.py | Python | lgpl-2.1 | 49,156 |
from __future__ import unicode_literals
import errno
import os
import socket
import time
import random
import re
from .common import FileDownloader
from ..compat import (
compat_str,
compat_urllib_error,
)
from ..utils import (
ContentTooShortError,
encodeFilename,
int_or_none,
sanitize_open,
sanitized_Request,
write_xattr,
XAttrMetadataError,
XAttrUnavailableError,
)
class HttpFD(FileDownloader):
def real_download(self, filename, info_dict):
url = info_dict['url']
class DownloadContext(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
ctx = DownloadContext()
ctx.filename = filename
ctx.tmpfilename = self.temp_name(filename)
ctx.stream = None
# Do not include the Accept-Encoding header
headers = {'Youtubedl-no-compression': 'True'}
add_headers = info_dict.get('http_headers')
if add_headers:
headers.update(add_headers)
is_test = self.params.get('test', False)
chunk_size = self._TEST_FILE_SIZE if is_test else (
info_dict.get('downloader_options', {}).get('http_chunk_size') or
self.params.get('http_chunk_size') or 0)
ctx.open_mode = 'wb'
ctx.resume_len = 0
ctx.data_len = None
ctx.block_size = self.params.get('buffersize', 1024)
ctx.start_time = time.time()
ctx.chunk_size = None
if self.params.get('continuedl', True):
# Establish possible resume length
if os.path.isfile(encodeFilename(ctx.tmpfilename)):
ctx.resume_len = os.path.getsize(
encodeFilename(ctx.tmpfilename))
ctx.is_resume = ctx.resume_len > 0
count = 0
retries = self.params.get('retries', 0)
class SucceedDownload(Exception):
pass
class RetryDownload(Exception):
def __init__(self, source_error):
self.source_error = source_error
class NextFragment(Exception):
pass
def set_range(req, start, end):
range_header = 'bytes=%d-' % start
if end:
range_header += compat_str(end)
req.add_header('Range', range_header)
def establish_connection():
ctx.chunk_size = (random.randint(int(chunk_size * 0.95), chunk_size)
if not is_test and chunk_size else chunk_size)
if ctx.resume_len > 0:
range_start = ctx.resume_len
if ctx.is_resume:
self.report_resuming_byte(ctx.resume_len)
ctx.open_mode = 'ab'
elif ctx.chunk_size > 0:
range_start = 0
else:
range_start = None
ctx.is_resume = False
range_end = range_start + ctx.chunk_size - 1 if ctx.chunk_size else None
if range_end and ctx.data_len is not None and range_end >= ctx.data_len:
range_end = ctx.data_len - 1
has_range = range_start is not None
ctx.has_range = has_range
request = sanitized_Request(url, None, headers)
if has_range:
set_range(request, range_start, range_end)
# Establish connection
try:
ctx.data = self.ydl.urlopen(request)
# When trying to resume, Content-Range HTTP header of response has to be checked
# to match the value of requested Range HTTP header. This is due to a webservers
# that don't support resuming and serve a whole file with no Content-Range
# set in response despite of requested Range (see
# https://github.com/ytdl-org/youtube-dl/issues/6057#issuecomment-126129799)
if has_range:
content_range = ctx.data.headers.get('Content-Range')
if content_range:
content_range_m = re.search(r'bytes (\d+)-(\d+)?(?:/(\d+))?', content_range)
# Content-Range is present and matches requested Range, resume is possible
if content_range_m:
if range_start == int(content_range_m.group(1)):
content_range_end = int_or_none(content_range_m.group(2))
content_len = int_or_none(content_range_m.group(3))
accept_content_len = (
# Non-chunked download
not ctx.chunk_size or
# Chunked download and requested piece or
# its part is promised to be served
content_range_end == range_end or
content_len < range_end)
if accept_content_len:
ctx.data_len = content_len
return
# Content-Range is either not present or invalid. Assuming remote webserver is
# trying to send the whole file, resume is not possible, so wiping the local file
# and performing entire redownload
self.report_unable_to_resume()
ctx.resume_len = 0
ctx.open_mode = 'wb'
ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None))
return
except (compat_urllib_error.HTTPError, ) as err:
if err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
ctx.data = self.ydl.urlopen(
sanitized_Request(url, None, headers))
content_length = ctx.data.info()['Content-Length']
except (compat_urllib_error.HTTPError, ) as err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if (content_length is not None and
(ctx.resume_len - 100 < int(content_length) < ctx.resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(ctx.filename)
self.try_rename(ctx.tmpfilename, ctx.filename)
self._hook_progress({
'filename': ctx.filename,
'status': 'finished',
'downloaded_bytes': ctx.resume_len,
'total_bytes': ctx.resume_len,
})
raise SucceedDownload()
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
ctx.resume_len = 0
ctx.open_mode = 'wb'
return
elif err.code < 500 or err.code >= 600:
# Unexpected HTTP error
raise
raise RetryDownload(err)
except socket.error as err:
if err.errno != errno.ECONNRESET:
# Connection reset is no problem, just retry
raise
raise RetryDownload(err)
def download():
data_len = ctx.data.info().get('Content-length', None)
# Range HTTP header may be ignored/unsupported by a webserver
# (e.g. extractor/scivee.py, extractor/bambuser.py).
# However, for a test we still would like to download just a piece of a file.
# To achieve this we limit data_len to _TEST_FILE_SIZE and manually control
# block size when downloading a file.
if is_test and (data_len is None or int(data_len) > self._TEST_FILE_SIZE):
data_len = self._TEST_FILE_SIZE
if data_len is not None:
data_len = int(data_len) + ctx.resume_len
min_data_len = self.params.get('min_filesize')
max_data_len = self.params.get('max_filesize')
if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
if max_data_len is not None and data_len > max_data_len:
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
return False
byte_counter = 0 + ctx.resume_len
block_size = ctx.block_size
start = time.time()
# measure time over whole while-loop, so slow_down() and best_block_size() work together properly
now = None # needed for slow_down() in the first loop run
before = start # start measuring
def retry(e):
to_stdout = ctx.tmpfilename == '-'
if not to_stdout:
ctx.stream.close()
ctx.stream = None
ctx.resume_len = byte_counter if to_stdout else os.path.getsize(encodeFilename(ctx.tmpfilename))
raise RetryDownload(e)
while True:
try:
# Download and write
data_block = ctx.data.read(block_size if not is_test else min(block_size, data_len - byte_counter))
# socket.timeout is a subclass of socket.error but may not have
# errno set
except socket.timeout as e:
retry(e)
except socket.error as e:
if e.errno not in (errno.ECONNRESET, errno.ETIMEDOUT):
raise
retry(e)
byte_counter += len(data_block)
# exit loop when download is finished
if len(data_block) == 0:
break
# Open destination file just in time
if ctx.stream is None:
try:
ctx.stream, ctx.tmpfilename = sanitize_open(
ctx.tmpfilename, ctx.open_mode)
assert ctx.stream is not None
ctx.filename = self.undo_temp_name(ctx.tmpfilename)
self.report_destination(ctx.filename)
except (OSError, IOError) as err:
self.report_error('unable to open for writing: %s' % str(err))
return False
if self.params.get('xattr_set_filesize', False) and data_len is not None:
try:
write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode('utf-8'))
except (XAttrUnavailableError, XAttrMetadataError) as err:
self.report_error('unable to set filesize xattr: %s' % str(err))
try:
ctx.stream.write(data_block)
except (IOError, OSError) as err:
self.to_stderr('\n')
self.report_error('unable to write data: %s' % str(err))
return False
# Apply rate limit
self.slow_down(start, now, byte_counter - ctx.resume_len)
# end measuring of one loop run
now = time.time()
after = now
# Adjust block size
if not self.params.get('noresizebuffer', False):
block_size = self.best_block_size(after - before, len(data_block))
before = after
# Progress message
speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
if ctx.data_len is None:
eta = None
else:
eta = self.calc_eta(start, time.time(), ctx.data_len - ctx.resume_len, byte_counter - ctx.resume_len)
self._hook_progress({
'status': 'downloading',
'downloaded_bytes': byte_counter,
'total_bytes': ctx.data_len,
'tmpfilename': ctx.tmpfilename,
'filename': ctx.filename,
'eta': eta,
'speed': speed,
'elapsed': now - ctx.start_time,
})
if is_test and byte_counter == data_len:
break
if not is_test and ctx.chunk_size and ctx.data_len is not None and byte_counter < ctx.data_len:
ctx.resume_len = byte_counter
# ctx.block_size = block_size
raise NextFragment()
if ctx.stream is None:
self.to_stderr('\n')
self.report_error('Did not get any data blocks')
return False
if ctx.tmpfilename != '-':
ctx.stream.close()
if data_len is not None and byte_counter != data_len:
err = ContentTooShortError(byte_counter, int(data_len))
if count <= retries:
retry(err)
raise err
self.try_rename(ctx.tmpfilename, ctx.filename)
# Update file modification time
if self.params.get('updatetime', True):
info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.info().get('last-modified', None))
self._hook_progress({
'downloaded_bytes': byte_counter,
'total_bytes': byte_counter,
'filename': ctx.filename,
'status': 'finished',
'elapsed': time.time() - ctx.start_time,
})
return True
while count <= retries:
try:
establish_connection()
return download()
except RetryDownload as e:
count += 1
if count <= retries:
self.report_retry(e.source_error, count, retries)
continue
except NextFragment:
continue
except SucceedDownload:
return True
self.report_error('giving up after %s retries' % retries)
return False
| rrooij/youtube-dl | youtube_dl/downloader/http.py | Python | unlicense | 15,515 |
# Copyright 2013-2016 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra import ConsistencyLevel
from cassandra.cqlengine import operators
from cassandra.cqlengine.named import NamedKeyspace
from cassandra.cqlengine.operators import EqualsOperator, GreaterThanOrEqualOperator
from cassandra.cqlengine.query import ResultObject
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.cqlengine import models
from tests.integration.cqlengine import setup_connection, execute_count
from tests.integration.cqlengine.base import BaseCassEngTestCase
from tests.integration.cqlengine.query.test_queryset import BaseQuerySetUsage
from tests.integration import BasicSharedKeyspaceUnitTestCase, greaterthanorequalcass30
class TestQuerySetOperation(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestQuerySetOperation, cls).setUpClass()
cls.keyspace = NamedKeyspace('cqlengine_test')
cls.table = cls.keyspace.table('test_model')
def test_query_filter_parsing(self):
"""
Tests the queryset filter method parses it's kwargs properly
"""
query1 = self.table.objects(test_id=5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(expected_result__gte=1)
assert len(query2._where) == 2
op = query2._where[1]
assert isinstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_query_expression_parsing(self):
""" Tests that query experessions are evaluated properly """
query1 = self.table.filter(self.table.column('test_id') == 5)
assert len(query1._where) == 1
op = query1._where[0]
assert isinstance(op.operator, operators.EqualsOperator)
assert op.value == 5
query2 = query1.filter(self.table.column('expected_result') >= 1)
assert len(query2._where) == 2
op = query2._where[1]
assert isinstance(op.operator, operators.GreaterThanOrEqualOperator)
assert op.value == 1
def test_filter_method_where_clause_generation(self):
"""
Tests the where clause creation
"""
query1 = self.table.objects(test_id=5)
self.assertEqual(len(query1._where), 1)
where = query1._where[0]
self.assertEqual(where.field, 'test_id')
self.assertEqual(where.value, 5)
query2 = query1.filter(expected_result__gte=1)
self.assertEqual(len(query2._where), 2)
where = query2._where[0]
self.assertEqual(where.field, 'test_id')
self.assertIsInstance(where.operator, EqualsOperator)
self.assertEqual(where.value, 5)
where = query2._where[1]
self.assertEqual(where.field, 'expected_result')
self.assertIsInstance(where.operator, GreaterThanOrEqualOperator)
self.assertEqual(where.value, 1)
def test_query_expression_where_clause_generation(self):
"""
Tests the where clause creation
"""
query1 = self.table.objects(self.table.column('test_id') == 5)
self.assertEqual(len(query1._where), 1)
where = query1._where[0]
self.assertEqual(where.field, 'test_id')
self.assertEqual(where.value, 5)
query2 = query1.filter(self.table.column('expected_result') >= 1)
self.assertEqual(len(query2._where), 2)
where = query2._where[0]
self.assertEqual(where.field, 'test_id')
self.assertIsInstance(where.operator, EqualsOperator)
self.assertEqual(where.value, 5)
where = query2._where[1]
self.assertEqual(where.field, 'expected_result')
self.assertIsInstance(where.operator, GreaterThanOrEqualOperator)
self.assertEqual(where.value, 1)
class TestQuerySetCountSelectionAndIteration(BaseQuerySetUsage):
@classmethod
def setUpClass(cls):
super(TestQuerySetCountSelectionAndIteration, cls).setUpClass()
from tests.integration.cqlengine.query.test_queryset import TestModel
ks, tn = TestModel.column_family_name().split('.')
cls.keyspace = NamedKeyspace(ks)
cls.table = cls.keyspace.table(tn)
@execute_count(2)
def test_count(self):
""" Tests that adding filtering statements affects the count query as expected """
assert self.table.objects.count() == 12
q = self.table.objects(test_id=0)
assert q.count() == 4
@execute_count(2)
def test_query_expression_count(self):
""" Tests that adding query statements affects the count query as expected """
assert self.table.objects.count() == 12
q = self.table.objects(self.table.column('test_id') == 0)
assert q.count() == 4
@execute_count(3)
def test_iteration(self):
""" Tests that iterating over a query set pulls back all of the expected results """
q = self.table.objects(test_id=0)
# tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with regular filtering
q = self.table.objects(attempt_id=3).allow_filtering()
assert len(q) == 3
# tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# test with query method
q = self.table.objects(self.table.column('attempt_id') == 3).allow_filtering()
assert len(q) == 3
# tuple of expected test_id, expected_result values
compare_set = set([(0, 20), (1, 20), (2, 75)])
for t in q:
val = t.test_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
@execute_count(2)
def test_multiple_iterations_work_properly(self):
""" Tests that iterating over a query set more than once works """
# test with both the filtering method and the query method
for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)):
# tuple of expected attempt_id, expected_result values
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
# try it again
compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)])
for t in q:
val = t.attempt_id, t.expected_result
assert val in compare_set
compare_set.remove(val)
assert len(compare_set) == 0
@execute_count(2)
def test_multiple_iterators_are_isolated(self):
"""
tests that the use of one iterator does not affect the behavior of another
"""
for q in (self.table.objects(test_id=0), self.table.objects(self.table.column('test_id') == 0)):
q = q.order_by('attempt_id')
expected_order = [0, 1, 2, 3]
iter1 = iter(q)
iter2 = iter(q)
for attempt_id in expected_order:
assert next(iter1).attempt_id == attempt_id
assert next(iter2).attempt_id == attempt_id
@execute_count(3)
def test_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = self.table.objects.get(test_id=0, attempt_id=0)
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
q = self.table.objects(test_id=0, attempt_id=0)
m = q.get()
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
q = self.table.objects(test_id=0)
m = q.get(attempt_id=0)
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
@execute_count(3)
def test_query_expression_get_success_case(self):
"""
Tests that the .get() method works on new and existing querysets
"""
m = self.table.get(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0)
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
q = self.table.objects(self.table.column('test_id') == 0, self.table.column('attempt_id') == 0)
m = q.get()
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
q = self.table.objects(self.table.column('test_id') == 0)
m = q.get(self.table.column('attempt_id') == 0)
assert isinstance(m, ResultObject)
assert m.test_id == 0
assert m.attempt_id == 0
@execute_count(1)
def test_get_doesnotexist_exception(self):
"""
Tests that get calls that don't return a result raises a DoesNotExist error
"""
with self.assertRaises(self.table.DoesNotExist):
self.table.objects.get(test_id=100)
@execute_count(1)
def test_get_multipleobjects_exception(self):
"""
Tests that get calls that return multiple results raise a MultipleObjectsReturned error
"""
with self.assertRaises(self.table.MultipleObjectsReturned):
self.table.objects.get(test_id=1)
class TestNamedWithMV(BasicSharedKeyspaceUnitTestCase):
@classmethod
def setUpClass(cls):
super(TestNamedWithMV, cls).setUpClass()
cls.default_keyspace = models.DEFAULT_KEYSPACE
models.DEFAULT_KEYSPACE = cls.ks_name
@classmethod
def tearDownClass(cls):
models.DEFAULT_KEYSPACE = cls.default_keyspace
setup_connection(models.DEFAULT_KEYSPACE)
super(TestNamedWithMV, cls).tearDownClass()
@greaterthanorequalcass30
@execute_count(5)
def test_named_table_with_mv(self):
"""
Test NamedTable access to materialized views
Creates some materialized views using Traditional CQL. Then ensures we can access those materialized view using
the NamedKeyspace, and NamedTable interfaces. Tests basic filtering as well.
@since 3.0.0
@jira_ticket PYTHON-406
@expected_result Named Tables should have access to materialized views
@test_category materialized_view
"""
ks = models.DEFAULT_KEYSPACE
self.session.execute("DROP MATERIALIZED VIEW IF EXISTS {0}.alltimehigh".format(ks))
self.session.execute("DROP MATERIALIZED VIEW IF EXISTS {0}.monthlyhigh".format(ks))
self.session.execute("DROP TABLE IF EXISTS {0}.scores".format(ks))
create_table = """CREATE TABLE {0}.scores(
user TEXT,
game TEXT,
year INT,
month INT,
day INT,
score INT,
PRIMARY KEY (user, game, year, month, day)
)""".format(ks)
self.session.execute(create_table)
create_mv = """CREATE MATERIALIZED VIEW {0}.monthlyhigh AS
SELECT game, year, month, score, user, day FROM {0}.scores
WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL
PRIMARY KEY ((game, year, month), score, user, day)
WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)""".format(ks)
self.session.execute(create_mv)
create_mv_alltime = """CREATE MATERIALIZED VIEW {0}.alltimehigh AS
SELECT * FROM {0}.scores
WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL
PRIMARY KEY (game, score, user, year, month, day)
WITH CLUSTERING ORDER BY (score DESC)""".format(ks)
self.session.execute(create_mv_alltime)
# Populate the base table with data
prepared_insert = self.session.prepare("""INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)""".format(ks))
parameters = (('pcmanus', 'Coup', 2015, 5, 1, 4000),
('jbellis', 'Coup', 2015, 5, 3, 1750),
('yukim', 'Coup', 2015, 5, 3, 2250),
('tjake', 'Coup', 2015, 5, 3, 500),
('iamaleksey', 'Coup', 2015, 6, 1, 2500),
('tjake', 'Coup', 2015, 6, 2, 1000),
('pcmanus', 'Coup', 2015, 6, 2, 2000),
('jmckenzie', 'Coup', 2015, 6, 9, 2700),
('jbellis', 'Coup', 2015, 6, 20, 3500),
('jbellis', 'Checkers', 2015, 6, 20, 1200),
('jbellis', 'Chess', 2015, 6, 21, 3500),
('pcmanus', 'Chess', 2015, 1, 25, 3200))
prepared_insert.consistency_level = ConsistencyLevel.ALL
execute_concurrent_with_args(self.session, prepared_insert, parameters)
# Attempt to query the data using Named Table interface
# Also test filtering on mv's
key_space = NamedKeyspace(ks)
mv_monthly = key_space.table("monthlyhigh")
mv_all_time = key_space.table("alltimehigh")
self.assertTrue(self.check_table_size("scores", key_space, len(parameters)))
self.assertTrue(self.check_table_size("monthlyhigh", key_space, len(parameters)))
self.assertTrue(self.check_table_size("alltimehigh", key_space, len(parameters)))
filtered_mv_monthly_objects = mv_monthly.objects.filter(game='Chess', year=2015, month=6)
self.assertEqual(len(filtered_mv_monthly_objects), 1)
self.assertEqual(filtered_mv_monthly_objects[0]['score'], 3500)
self.assertEqual(filtered_mv_monthly_objects[0]['user'], 'jbellis')
filtered_mv_alltime_objects = mv_all_time.objects.filter(game='Chess')
self.assertEqual(len(filtered_mv_alltime_objects), 2)
self.assertEqual(filtered_mv_alltime_objects[0]['score'], 3500)
def check_table_size(self, table_name, key_space, expected_size):
table = key_space.table(table_name)
attempts = 0
while attempts < 10:
attempts += 1
table_size = len(table.objects.all())
if(table_size is not expected_size):
print("Table {0} size was {1} and was expected to be {2}".format(table_name, table_size, expected_size))
else:
return True
return False
| Richard-Mathie/cassandra_benchmark | vendor/github.com/datastax/python-driver/tests/integration/cqlengine/query/test_named.py | Python | apache-2.0 | 15,782 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# Lightly modified from version of this script in incubator-parquet-format
from __future__ import print_function
from requests.auth import HTTPBasicAuth
import requests
import os
import six
import subprocess
import sys
import textwrap
IMPYLA_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
PROJECT_NAME = 'impyla'
print("IMPYLA_HOME = " + IMPYLA_HOME)
# Remote name with the PR
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "upstream")
# Remote name where results pushed
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "upstream")
GITHUB_BASE = "https://github.com/cloudera/" + PROJECT_NAME + "/pull"
GITHUB_API_BASE = "https://api.github.com/repos/cloudera/" + PROJECT_NAME
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(IMPYLA_HOME)
auth_required = False
if auth_required:
GITHUB_USERNAME = os.environ['GITHUB_USER']
import getpass
GITHUB_PASSWORD = getpass.getpass('Enter github.com password for %s:'
% GITHUB_USERNAME)
def get_json_auth(url):
auth = HTTPBasicAuth(GITHUB_USERNAME, GITHUB_PASSWORD)
req = requests.get(url, auth=auth)
return req.json()
get_json = get_json_auth
else:
def get_json_no_auth(url):
req = requests.get(url)
return req.json()
get_json = get_json_no_auth
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
# py2.6 does not have subprocess.check_output
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
popenargs = [cmd]
kwargs = {}
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def continue_maybe(prompt):
result = raw_input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num, target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num, pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref, target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = "Error merging: %s\nWould you like to manually fix-up this merge?" % e
continue_maybe(msg)
msg = "Okay, please fix any conflicts and 'git add' conflicting files... Finished?"
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x), reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body != None:
merge_message_flags += ["-m", '\n'.join(textwrap.wrap(body))]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = "This patch had conflicts when merged, resolved by\nCommitter: %s <%s>" % (
committer_name, committer_email)
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:" % (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit', '--author="%s"' % primary_author] +
merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name,
target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
def cherry_pick(pr_num, merge_hash, default_branch):
pick_ref = raw_input("Enter a branch name [%s]: " % default_branch)
if pick_ref == "":
pick_ref = default_branch
pick_branch_name = "%s_PICK_PR_%s_%s" % (BRANCH_PREFIX, pr_num,
pick_ref.upper())
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, pick_ref,
pick_branch_name))
run_cmd("git checkout %s" % pick_branch_name)
run_cmd("git cherry-pick -sx %s" % merge_hash)
continue_maybe("Pick complete (local ref %s). Push to %s?" % (
pick_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, pick_branch_name,
pick_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
pick_hash = run_cmd("git rev-parse %s" % pick_branch_name)[:8]
clean_up()
print("Pull request #%s picked into %s!" % (pr_num, pick_ref))
print("Pick hash: %s" % pick_hash)
return pick_ref
def fix_version_from_branch(branch, versions):
# Note: Assumes this is a sorted (newest->oldest) list of un-released
# versions
if branch == "master":
return versions[0]
else:
branch_ver = branch.replace("branch-", "")
return filter(lambda x: x.name.startswith(branch_ver), versions)[-1]
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"),
[x['name'] for x in branches])
# Assumes branch names can be sorted lexicographically
# latest_branch = sorted(branch_names, reverse=True)[0]
pr_num = raw_input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] is True:
print("Pull request {0} has already been merged, assuming "
"you want to backport".format(pr_num))
merge_commit_desc = run_cmd([
'git', 'log', '--merges', '--first-parent',
'--grep=pull request #%s' % pr_num, '--oneline']).split("\n")[0]
if merge_commit_desc == "":
fail("Couldn't find any merge commit for #{0}"
", you may need to update HEAD.".format(pr_num))
merge_hash = merge_commit_desc[:7]
message = merge_commit_desc[8:]
print("Found: %s" % message)
maybe_cherry_pick(pr_num, merge_hash, latest_branch)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = ("Pull request {0} is not mergeable in its current form.\n"
"Continue? (experts only!)".format(pr_num))
continue_maybe(msg)
print ("\n=== Pull Request #%s ===" % pr_num)
print ("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
pick_prompt = "Would you like to pick %s into another branch?" % merge_hash
while raw_input("\n%s (y/n): " % pick_prompt).lower() == "y":
merged_refs = merged_refs + [cherry_pick(pr_num, merge_hash,
latest_branch)]
| schaffino/impyla | dev/merge-pr.py | Python | apache-2.0 | 9,688 |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates the notes of a single order specified by ID.
To determine which orders exist, run get_all_orders.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
ORDER_ID = 'INSERT_ORDER_ID_HERE'
def main(client, order_id):
# Initialize appropriate service.
order_service = client.GetService('OrderService', version='v201411')
# Create statement object to select a single order by an ID.
values = [{
'key': 'orderId',
'value': {
'xsi_type': 'NumberValue',
'value': order_id
}
}]
query = 'WHERE id = :orderId'
statement = dfp.FilterStatement(query, values)
# Get orders by statement.
response = order_service.getOrdersByStatement(statement.ToStatement())
if 'results' in response:
# Update each local order object by changing its notes.
updated_orders = []
for order in response['results']:
# Archived orders cannot be updated.
if not order['isArchived']:
order['notes'] = 'Spoke to advertiser. All is well.'
updated_orders.append(order)
# Update orders remotely.
orders = order_service.updateOrders(updated_orders)
# Display results.
if orders:
for order in orders:
print ('Order with id \'%s\', name \'%s\', advertiser id \'%s\', and '
'notes \'%s\' was updated.'
% (order['id'], order['name'], order['advertiserId'],
order['notes']))
else:
print 'No orders were updated.'
else:
print 'No orders found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, ORDER_ID)
| coxmediagroup/googleads-python-lib | examples/dfp/v201411/order_service/update_orders.py | Python | apache-2.0 | 2,386 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import eventlet
eventlet.monkey_patch(os=False)
import copy
import gettext
import logging
import os
import shutil
import sys
import uuid
import fixtures
from oslo.config import cfg
from oslo.messaging import conffixture as messaging_conffixture
import testtools
from nova import context
from nova import db
from nova.db import migration
from nova.db.sqlalchemy import api as session
from nova.network import manager as network_manager
from nova import objects
from nova.objects import base as objects_base
from nova.openstack.common.fixture import logging as log_fixture
from nova.openstack.common.fixture import moxstubout
from nova.openstack.common import log as nova_logging
from nova.openstack.common import timeutils
from nova import paths
from nova import rpc
from nova import service
from nova.tests import conf_fixture
from nova.tests import policy_fixture
from nova import utils
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
]
CONF = cfg.CONF
CONF.register_opts(test_opts)
CONF.import_opt('connection',
'nova.openstack.common.db.options',
group='database')
CONF.import_opt('sqlite_db', 'nova.openstack.common.db.options',
group='database')
CONF.import_opt('enabled', 'nova.api.openstack', group='osapi_v3')
CONF.set_override('use_stderr', False)
nova_logging.setup('nova')
# NOTE(comstud): Make sure we have all of the objects loaded. We do this
# at module import time, because we may be using mock decorators in our
# tests that run at import time.
objects.register_all()
_DB_CACHE = None
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class Database(fixtures.Fixture):
def __init__(self, db_session, db_migrate, sql_connection,
sqlite_db, sqlite_clean_db):
self.sql_connection = sql_connection
self.sqlite_db = sqlite_db
self.sqlite_clean_db = sqlite_clean_db
self.engine = db_session.get_engine()
self.engine.dispose()
conn = self.engine.connect()
if sql_connection == "sqlite://":
if db_migrate.db_version() > db_migrate.db_initial_version():
return
else:
testdb = paths.state_path_rel(sqlite_db)
if os.path.exists(testdb):
return
db_migrate.db_sync()
if sql_connection == "sqlite://":
conn = self.engine.connect()
self._DB = "".join(line for line in conn.connection.iterdump())
self.engine.dispose()
else:
cleandb = paths.state_path_rel(sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
def setUp(self):
super(Database, self).setUp()
if self.sql_connection == "sqlite://":
conn = self.engine.connect()
conn.connection.executescript(self._DB)
self.addCleanup(self.engine.dispose)
else:
shutil.copyfile(paths.state_path_rel(self.sqlite_clean_db),
paths.state_path_rel(self.sqlite_db))
class SampleNetworks(fixtures.Fixture):
"""Create sample networks in the database."""
def __init__(self, host=None):
self.host = host
def setUp(self):
super(SampleNetworks, self).setUp()
ctxt = context.get_admin_context()
network = network_manager.VlanManager(host=self.host)
bridge_interface = CONF.flat_interface or CONF.vlan_interface
network.create_networks(ctxt,
label='test',
cidr='10.0.0.0/8',
multi_host=CONF.multi_host,
num_networks=CONF.num_networks,
network_size=CONF.network_size,
cidr_v6=CONF.fixed_range_v6,
gateway=CONF.gateway,
gateway_v6=CONF.gateway_v6,
bridge=CONF.flat_network_bridge,
bridge_interface=bridge_interface,
vpn_start=CONF.vpn_start,
vlan_start=CONF.vlan_start,
dns1=CONF.flat_network_dns)
for net in db.network_get_all(ctxt):
network.set_network_host(ctxt, net)
class ReplaceModule(fixtures.Fixture):
"""Replace a module with a fake module."""
def __init__(self, name, new_value):
self.name = name
self.new_value = new_value
def _restore(self, old_value):
sys.modules[self.name] = old_value
def setUp(self):
super(ReplaceModule, self).setUp()
old_value = sys.modules.get(self.name)
sys.modules[self.name] = self.new_value
self.addCleanup(self._restore, old_value)
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, **kwargs):
name = name
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class TranslationFixture(fixtures.Fixture):
"""Use gettext NullTranslation objects in tests."""
def setUp(self):
super(TranslationFixture, self).setUp()
nulltrans = gettext.NullTranslations()
gettext_fixture = fixtures.MonkeyPatch('gettext.translation',
lambda *x, **y: nulltrans)
self.gettext_patcher = self.useFixture(gettext_fixture)
class TestingException(Exception):
pass
class NullHandler(logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests.
Due to the slowness of DB access, please consider deriving from
`NoDBTestCase` first.
"""
USES_DB = True
# NOTE(rpodolyaka): this attribute can be overridden in subclasses in order
# to scale the global test timeout value set for each
# test case separately. Use 0 value to disable timeout.
TIMEOUT_SCALING_FACTOR = 1
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if self.TIMEOUT_SCALING_FACTOR >= 0:
test_timeout *= self.TIMEOUT_SCALING_FACTOR
else:
raise ValueError('TIMEOUT_SCALING_FACTOR value must be >= 0')
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(TranslationFixture())
self.useFixture(log_fixture.get_logging_handle_error_fixture())
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
rpc.add_extra_exmods('nova.test')
self.addCleanup(rpc.clear_extra_exmods)
self.addCleanup(rpc.cleanup)
# set root logger to debug
root = logging.getLogger()
root.setLevel(logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = logging.DEBUG
else:
level = logging.INFO
# Collect logs
fs = '%(levelname)s [%(name)s] %(message)s'
self.useFixture(fixtures.FakeLogger(format=fs, level=None))
root.handlers[0].setLevel(level)
if level > logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(logging.DEBUG)
self.useFixture(conf_fixture.ConfFixture(CONF))
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.useFixture(self.messaging_conf)
rpc.init(CONF)
if self.USES_DB:
global _DB_CACHE
if not _DB_CACHE:
_DB_CACHE = Database(session, migration,
sql_connection=CONF.database.connection,
sqlite_db=CONF.database.sqlite_db,
sqlite_clean_db=CONF.sqlite_clean_db)
self.useFixture(_DB_CACHE)
# NOTE(danms): Make sure to reset us back to non-remote objects
# for each test to avoid interactions. Also, backup the object
# registry.
objects_base.NovaObject.indirection_api = None
self._base_test_obj_backup = copy.copy(
objects_base.NovaObject._obj_classes)
self.addCleanup(self._restore_obj_registry)
# NOTE(mnaser): All calls to utils.is_neutron() are cached in
# nova.utils._IS_NEUTRON. We set it to None to avoid any
# caching of that value.
utils._IS_NEUTRON = None
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
self.addCleanup(self._clear_attrs)
self.useFixture(fixtures.EnvironmentVariable('http_proxy'))
self.policy = self.useFixture(policy_fixture.PolicyFixture())
CONF.set_override('fatal_exception_format_errors', True)
CONF.set_override('enabled', True, 'osapi_v3')
CONF.set_override('force_dhcp_release', False)
CONF.set_override('periodic_enable', False)
def _restore_obj_registry(self):
objects_base.NovaObject._obj_classes = self._base_test_obj_backup
def _clear_attrs(self):
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
group = kw.pop('group', None)
for k, v in kw.iteritems():
CONF.set_override(k, v, group)
def start_service(self, name, host=None, **kwargs):
svc = self.useFixture(ServiceFixture(name, host, **kwargs))
return svc.service
class APICoverage(object):
cover_api = None
def test_api_methods(self):
self.assertTrue(self.cover_api is not None)
api_methods = [x for x in dir(self.cover_api)
if not x.startswith('_')]
test_methods = [x[5:] for x in dir(self)
if x.startswith('test_')]
self.assertThat(
test_methods,
testtools.matchers.ContainsAll(api_methods))
class TimeOverride(fixtures.Fixture):
"""Fixture to start and remove time override."""
def setUp(self):
super(TimeOverride, self).setUp()
timeutils.set_time_override()
self.addCleanup(timeutils.clear_time_override)
class NoDBTestCase(TestCase):
"""`NoDBTestCase` differs from TestCase in that DB access is not supported.
This makes tests run significantly faster. If possible, all new tests
should derive from this class.
"""
USES_DB = False
class BaseHookTestCase(NoDBTestCase):
def assert_has_hook(self, expected_name, func):
self.assertTrue(hasattr(func, '__hook_name__'))
self.assertEqual(expected_name, func.__hook_name__)
| srajag/nova | nova/test.py | Python | apache-2.0 | 13,462 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sqlite3
import sys
if len(sys.argv) < 3:
print "Usage: %s [init|insert|list] db" % sys.argv[0]
else:
conn = sqlite3.connect(sys.argv[2])
with conn:
if sys.argv[1] == "init":
conn.execute("DROP TABLE IF EXISTS records")
conn.execute("CREATE TABLE records(id INTEGER PRIMARY KEY AUTOINCREMENT, description TEXT)")
conn.commit()
elif sys.argv[1] == "list":
cursor = conn.cursor()
cursor.execute("SELECT * FROM records")
rows = cursor.fetchall()
for r in rows:
print r
elif sys.argv[1] == "insert":
while True:
l = sys.stdin.readline()
if not l: break
conn.execute("INSERT INTO records(description) VALUES (?)", (l.rstrip(),))
conn.commit()
else:
print "Unrecognised command: %s" % sys.argv[1]
| dcristoloveanu/qpid-proton | examples/python/db_ctrl.py | Python | apache-2.0 | 1,739 |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import itemgetter
import re
import numpy as np
import pandas as pd
get_unit_and_periods = itemgetter('unit', 'periods')
def parse_treasury_csv_column(column):
"""
Parse a treasury CSV column into a more human-readable format.
Columns start with 'RIFLGFC', followed by Y or M (year or month), followed
by a two-digit number signifying number of years/months, followed by _N.B.
We only care about the middle two entries, which we turn into a string like
3month or 30year.
"""
column_re = re.compile(
r"^(?P<prefix>RIFLGFC)"
"(?P<unit>[YM])"
"(?P<periods>[0-9]{2})"
"(?P<suffix>_N.B)$"
)
match = column_re.match(column)
if match is None:
raise ValueError("Couldn't parse CSV column %r." % column)
unit, periods = get_unit_and_periods(match.groupdict())
# Roundtrip through int to coerce '06' into '6'.
return str(int(periods)) + ('year' if unit == 'Y' else 'month')
def earliest_possible_date():
"""
The earliest date for which we can load data from this module.
"""
# The US Treasury actually has data going back further than this, but it's
# pretty rare to find pricing data going back that far, and there's no
# reason to make people download benchmarks back to 1950 that they'll never
# be able to use.
return pd.Timestamp('1980', tz='UTC')
def get_treasury_data(start_date, end_date):
return pd.read_csv(
"https://www.federalreserve.gov/datadownload/Output.aspx"
"?rel=H15"
"&series=bf17364827e38702b42a58cf8eaa3f78"
"&lastObs="
"&from=" # An unbounded query is ~2x faster than specifying dates.
"&to="
"&filetype=csv"
"&label=include"
"&layout=seriescolumn"
"&type=package",
skiprows=5, # First 5 rows are useless headers.
parse_dates=['Time Period'],
na_values=['ND'], # Presumably this stands for "No Data".
index_col=0,
).loc[
start_date:end_date
].dropna(
how='all'
).rename(
columns=parse_treasury_csv_column
).tz_localize('UTC') * 0.01 # Convert from 2.57% to 0.0257.
def dataconverter(s):
try:
return float(s) / 100
except:
return np.nan
def get_daily_10yr_treasury_data():
"""Download daily 10 year treasury rates from the Federal Reserve and
return a pandas.Series."""
url = "https://www.federalreserve.gov/datadownload/Output.aspx?rel=H15" \
"&series=bcb44e57fb57efbe90002369321bfb3f&lastObs=&from=&to=" \
"&filetype=csv&label=include&layout=seriescolumn"
return pd.read_csv(url, header=5, index_col=0, names=['DATE', 'BC_10YEAR'],
parse_dates=True, converters={1: dataconverter},
squeeze=True)
| humdings/zipline | zipline/data/treasuries.py | Python | apache-2.0 | 3,414 |
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tool for generating a client library.
Relevant links:
https://developers.google.com/discovery/v1/reference/apis#resource
"""
import datetime
from apitools.gen import command_registry
from apitools.gen import message_registry
from apitools.gen import service_registry
from apitools.gen import util
def _ApitoolsVersion():
"""Returns version of the currently installed google-apitools package."""
try:
import pkg_resources
except ImportError:
return 'X.X.X'
try:
return pkg_resources.get_distribution('google-apitools').version
except pkg_resources.DistributionNotFound:
return 'X.X.X'
def _StandardQueryParametersSchema(discovery_doc):
"""Sets up dict of standard query parameters."""
standard_query_schema = {
'id': 'StandardQueryParameters',
'type': 'object',
'description': 'Query parameters accepted by all methods.',
'properties': discovery_doc.get('parameters', {}),
}
# We add an entry for the trace, since Discovery doesn't.
standard_query_schema['properties']['trace'] = {
'type': 'string',
'description': ('A tracing token of the form "token:<tokenid>" '
'to include in api requests.'),
'location': 'query',
}
return standard_query_schema
class DescriptorGenerator(object):
"""Code generator for a given discovery document."""
def __init__(self, discovery_doc, client_info, names, root_package, outdir,
base_package, protorpc_package, generate_cli=False,
init_wildcards_file=True,
use_proto2=False, unelidable_request_methods=None,
apitools_version=''):
self.__discovery_doc = discovery_doc
self.__client_info = client_info
self.__outdir = outdir
self.__use_proto2 = use_proto2
self.__description = util.CleanDescription(
self.__discovery_doc.get('description', ''))
self.__package = self.__client_info.package
self.__version = self.__client_info.version
self.__revision = discovery_doc.get('revision', '1')
self.__generate_cli = generate_cli
self.__init_wildcards_file = init_wildcards_file
self.__root_package = root_package
self.__base_files_package = base_package
self.__protorpc_package = protorpc_package
self.__names = names
# Order is important here: we need the schemas before we can
# define the services.
self.__message_registry = message_registry.MessageRegistry(
self.__client_info, self.__names, self.__description,
self.__root_package, self.__base_files_package,
self.__protorpc_package)
schemas = self.__discovery_doc.get('schemas', {})
for schema_name, schema in sorted(schemas.items()):
self.__message_registry.AddDescriptorFromSchema(
schema_name, schema)
# We need to add one more message type for the global parameters.
standard_query_schema = _StandardQueryParametersSchema(
self.__discovery_doc)
self.__message_registry.AddDescriptorFromSchema(
standard_query_schema['id'], standard_query_schema)
# Now that we know all the messages, we need to correct some
# fields from MessageFields to EnumFields.
self.__message_registry.FixupMessageFields()
self.__command_registry = command_registry.CommandRegistry(
self.__package, self.__version, self.__client_info,
self.__message_registry, self.__root_package,
self.__base_files_package, self.__protorpc_package,
self.__names)
self.__command_registry.AddGlobalParameters(
self.__message_registry.LookupDescriptorOrDie(
'StandardQueryParameters'))
self.__services_registry = service_registry.ServiceRegistry(
self.__client_info,
self.__message_registry,
self.__command_registry,
self.__names,
self.__root_package,
self.__base_files_package,
unelidable_request_methods or [])
services = self.__discovery_doc.get('resources', {})
for service_name, methods in sorted(services.items()):
self.__services_registry.AddServiceFromResource(
service_name, methods)
# We might also have top-level methods.
api_methods = self.__discovery_doc.get('methods', [])
if api_methods:
self.__services_registry.AddServiceFromResource(
'api', {'methods': api_methods})
# pylint: disable=protected-access
self.__client_info = self.__client_info._replace(
scopes=self.__services_registry.scopes)
# The apitools version that will be used in prerequisites for the
# generated packages.
self.__apitools_version = (
apitools_version if apitools_version else _ApitoolsVersion())
@property
def client_info(self):
return self.__client_info
@property
def discovery_doc(self):
return self.__discovery_doc
@property
def names(self):
return self.__names
@property
def outdir(self):
return self.__outdir
@property
def package(self):
return self.__package
@property
def use_proto2(self):
return self.__use_proto2
@property
def apitools_version(self):
return self.__apitools_version
def _GetPrinter(self, out):
printer = util.SimplePrettyPrinter(out)
return printer
def WriteInit(self, out):
"""Write a simple __init__.py for the generated client."""
printer = self._GetPrinter(out)
if self.__init_wildcards_file:
printer('"""Common imports for generated %s client library."""',
self.__client_info.package)
printer('# pylint:disable=wildcard-import')
else:
printer('"""Package marker file."""')
printer()
printer('import pkgutil')
printer()
if self.__init_wildcards_file:
printer('from %s import *', self.__base_files_package)
if self.__root_package == '.':
import_prefix = ''
else:
import_prefix = '%s.' % self.__root_package
if self.__generate_cli:
printer('from %s%s import *',
import_prefix, self.__client_info.cli_rule_name)
printer('from %s%s import *',
import_prefix, self.__client_info.client_rule_name)
printer('from %s%s import *',
import_prefix, self.__client_info.messages_rule_name)
printer()
printer('__path__ = pkgutil.extend_path(__path__, __name__)')
def WriteIntermediateInit(self, out):
"""Write a simple __init__.py for an intermediate directory."""
printer = self._GetPrinter(out)
printer('#!/usr/bin/env python')
printer('"""Shared __init__.py for apitools."""')
printer()
printer('from pkgutil import extend_path')
printer('__path__ = extend_path(__path__, __name__)')
def WriteSetupPy(self, out):
"""Write a setup.py for upload to PyPI."""
printer = self._GetPrinter(out)
year = datetime.datetime.now().year
printer('# Copyright %s Google Inc. All Rights Reserved.' % year)
printer('#')
printer('# Licensed under the Apache License, Version 2.0 (the'
'"License");')
printer('# you may not use this file except in compliance with '
'the License.')
printer('# You may obtain a copy of the License at')
printer('#')
printer('# http://www.apache.org/licenses/LICENSE-2.0')
printer('#')
printer('# Unless required by applicable law or agreed to in writing, '
'software')
printer('# distributed under the License is distributed on an "AS IS" '
'BASIS,')
printer('# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either '
'express or implied.')
printer('# See the License for the specific language governing '
'permissions and')
printer('# limitations under the License.')
printer()
printer('import setuptools')
printer('REQUIREMENTS = [')
with printer.Indent(indent=' '):
parts = self.apitools_version.split('.')
major = parts.pop(0)
minor = parts.pop(0)
printer('"google-apitools>=%s,~=%s.%s",',
self.apitools_version, major, minor)
printer('"httplib2>=0.9",')
printer('"oauth2client>=1.4.12",')
printer(']')
printer('_PACKAGE = "apitools.clients.%s"' % self.__package)
printer()
printer('setuptools.setup(')
# TODO(craigcitro): Allow customization of these options.
with printer.Indent(indent=' '):
printer('name="google-apitools-%s-%s",',
self.__package, self.__version)
printer('version="%s.%s",',
self.apitools_version, self.__revision)
printer('description="Autogenerated apitools library for %s",' % (
self.__package,))
printer('url="https://github.com/google/apitools",')
printer('author="Craig Citro",')
printer('author_email="[email protected]",')
printer('packages=setuptools.find_packages(),')
printer('install_requires=REQUIREMENTS,')
printer('classifiers=[')
with printer.Indent(indent=' '):
printer('"Programming Language :: Python :: 2.7",')
printer('"License :: OSI Approved :: Apache Software '
'License",')
printer('],')
printer('license="Apache 2.0",')
printer('keywords="apitools apitools-%s %s",' % (
self.__package, self.__package))
printer(')')
def WriteMessagesFile(self, out):
self.__message_registry.WriteFile(self._GetPrinter(out))
def WriteMessagesProtoFile(self, out):
self.__message_registry.WriteProtoFile(self._GetPrinter(out))
def WriteServicesProtoFile(self, out):
self.__services_registry.WriteProtoFile(self._GetPrinter(out))
def WriteClientLibrary(self, out):
self.__services_registry.WriteFile(self._GetPrinter(out))
def WriteCli(self, out):
self.__command_registry.WriteFile(self._GetPrinter(out))
| cherba/apitools | apitools/gen/gen_client_lib.py | Python | apache-2.0 | 11,303 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.contrib.framework.python.ops import add_arg_scope as contrib_add_arg_scope
from tensorflow.contrib.framework.python.ops import gen_variable_ops
from tensorflow.contrib.util import loader
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.platform import resource_loader
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import training_util
__all__ = ['add_model_variable',
'assert_global_step',
'assert_or_get_global_step',
'assign_from_checkpoint',
'assign_from_checkpoint_fn',
'assign_from_values',
'assign_from_values_fn',
'create_global_step',
'filter_variables',
'get_global_step',
'get_or_create_global_step',
'get_local_variables',
'get_model_variables',
'get_trainable_variables',
'get_unique_variable',
'get_variables_by_name',
'get_variables_by_suffix',
'get_variable_full_name',
'get_variables_to_restore',
'get_variables',
'local_variable',
'model_variable',
'variable',
'VariableDeviceChooser',
'zero_initializer']
def zero_initializer(ref, use_locking=True, name="zero_initializer"):
"""Initialize 'ref' with all zeros, ref tensor should be uninitialized.
If already initialized, you will get ValueError. This op is intended to
save memory during initialization.
Args:
ref: ref of the tensor need to be zero initialized.
name: optional name for this operation.
Returns:
ref that initialized.
Raises:
ValueError: If ref tensor is initialized.
"""
loader.load_op_library(
resource_loader.get_path_to_datafile("_variable_ops.so"))
return gen_variable_ops.zero_initializer(ref, name=name)
def assert_global_step(global_step_tensor):
training_util.assert_global_step(global_step_tensor)
def assert_or_get_global_step(graph=None, global_step_tensor=None):
"""Verifies that a global step tensor is valid or gets one if None is given.
If `global_step_tensor` is not None, check that it is a valid global step
tensor (using `assert_global_step`). Otherwise find a global step tensor using
`get_global_step` and return it.
Args:
graph: The graph to find the global step tensor for.
global_step_tensor: The tensor to check for suitability as a global step.
If None is given (the default), find a global step tensor.
Returns:
A tensor suitable as a global step, or `None` if none was provided and none
was found.
"""
if global_step_tensor is None:
# Get the global step tensor the same way the supervisor would.
global_step_tensor = get_global_step(graph)
else:
assert_global_step(global_step_tensor)
return global_step_tensor
def get_global_step(graph=None):
return training_util.get_global_step(graph)
def create_global_step(graph=None):
"""Create global step tensor in graph.
This API is deprecated. Use core framework training version instead.
Args:
graph: The graph in which to create the global step tensor. If missing,
use default graph.
Returns:
Global step tensor.
Raises:
ValueError: if global step tensor is already defined.
"""
return training_util.create_global_step(graph)
def get_or_create_global_step(graph=None):
"""Returns and create (if necessary) the global step tensor.
Args:
graph: The graph in which to create the global step tensor. If missing, use
default graph.
Returns:
The global step tensor.
"""
return training_util.get_or_create_global_step(graph)
def local_variable(initial_value, validate_shape=True, name=None):
"""Create variable and add it to `GraphKeys.LOCAL_VARIABLES` collection.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value, trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape, name=name)
@contrib_add_arg_scope
def variable(name, shape=None, dtype=None, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None,
partitioner=None, custom_getter=None, use_resource=None):
"""Gets an existing variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
If None it would default to `tf.GraphKeys.GLOBAL_VARIABLES`.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
The created or existing variable.
"""
collections = list(collections if collections is not None
else [ops.GraphKeys.GLOBAL_VARIABLES])
# Remove duplicates
collections = set(collections)
getter = variable_scope.get_variable
if custom_getter is not None:
getter = functools.partial(custom_getter,
reuse=variable_scope.get_variable_scope().reuse)
with ops.device(device or ''):
return getter(name, shape=shape, dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
use_resource=use_resource)
@contrib_add_arg_scope
def model_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
regularizer=None, trainable=True, collections=None,
caching_device=None, device=None, partitioner=None,
custom_getter=None, use_resource=None):
"""Gets an existing model variable with these parameters or creates a new one.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the
`GraphKeys.GLOBAL_VARIABLES` and `GraphKeys.MODEL_VARIABLES` collections.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
custom_getter: Callable that allows overwriting the internal
get_variable method and has to have the same signature.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
collections += [ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.MODEL_VARIABLES]
var = variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections,
caching_device=caching_device, device=device,
partitioner=partitioner, custom_getter=custom_getter,
use_resource=use_resource)
return var
def add_model_variable(var):
"""Adds a variable to the `GraphKeys.MODEL_VARIABLES` collection.
Args:
var: a variable.
"""
if var not in ops.get_collection(ops.GraphKeys.MODEL_VARIABLES):
ops.add_to_collection(ops.GraphKeys.MODEL_VARIABLES, var)
def get_variables(scope=None, suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
def get_model_variables(scope=None, suffix=None):
"""Gets the list of model variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)
def get_local_variables(scope=None, suffix=None):
"""Gets the list of local variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)
def get_trainable_variables(scope=None, suffix=None):
"""Gets the list of trainable variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in the trainable collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += get_variables(scope)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
def get_unique_variable(var_op_name):
"""Gets the variable uniquely identified by that var_op_name.
Args:
var_op_name: the full name of the variable op, including the scope.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = get_variables(scope=var_op_name)
if not candidates:
raise ValueError('Couldnt find variable %s' % var_op_name)
for candidate in candidates:
if candidate.op.name == var_op_name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable' %
var_op_name)
def assign_from_values(var_names_to_values):
"""Creates an assignment operation from a given mapping.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
assign_op: An `Operation` that assigns each of the given variables to the
requested values.
feed_dict: The feed dictionary to use when evaluating `assign_op`.
Raises:
ValueError: if any of the given variable names were not found.
"""
feed_dict = {}
assign_ops = []
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
if not var:
raise ValueError('Variable %s wasn\'t found' % var_name)
elif len(var) > 1:
# tf.get_collection is just a filter on the prefix: find the exact match:
found = False
for v in var:
if v.op.name == var_name:
var = v
found = True
break
if not found:
raise ValueError('Variable %s doesn\'t uniquely identify a variable' %
var_name)
else:
var = var[0]
# TODO(nsilberman): ensure placeholder and assign are on the same device.
# Assign a placeholder to the value that will be filled later.
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
def get_variable_full_name(var):
"""Returns the full name of a variable.
For normal Variables, this is the same as the var.op.name. For
sliced or PartitionedVariables, this name is the same for all the
slices/partitions. In both cases, this is normally the name used in
a checkpoint file.
Args:
var: A `Variable` object.
Returns:
A string that is the full name.
"""
if var._save_slice_info:
return var._save_slice_info.full_name
else:
return var.op.name
# TODO(nsilberman): add flag to load exponential moving averages instead
#
# TODO(sguada): Update docs in slim/g3doc/index.md to describe
# the new feature where the var_list dictionary can have values that
# are each a list of Variables.
def assign_from_checkpoint(model_path, var_list, ignore_missing_vars=False):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects
or a dictionary mapping names in the checkpoint to the
corresponding variables or list of variables to initialize
from that checkpoint value. For partitioned Variables, the
name in the checkpoint must be the full variable, not the
name of the partitioned variable, eg. "my_var" rather than
"my_var/part_4". If empty, returns no_op(), {}.
ignore_missing_vars: Boolean, if True ignore variables missing in the
checkpoint with a warning instead of failing.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If `ignore_missing_vars` is False and the checkpoint specified
at `model_path` is missing one of the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
ckpt_name = get_variable_full_name(var)
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.iteritems():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
log_str = 'Checkpoint is missing variable [%s]' % ckpt_name
if ignore_missing_vars:
logging.warning(log_str)
continue
else:
raise ValueError(log_str)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + var.op.name)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
if var.get_shape() != ckpt_value.shape:
raise ValueError(
'Total size of new array must be unchanged for %s '
'lh_shape: [%s], rh_shape: [%s]'
% (ckpt_name, str(ckpt_value.shape), str(var.get_shape())))
feed_dict[placeholder_tensor] = ckpt_value.reshape(ckpt_value.shape)
else:
slice_dims = zip(var._save_slice_info.var_offset,
var._save_slice_info.var_shape)
slice_dims = [(start, start + size) for (start, size) in slice_dims]
slice_dims = [slice(*x) for x in slice_dims]
slice_value = ckpt_value[slice_dims]
slice_value = slice_value.reshape(var._save_slice_info.var_shape)
feed_dict[placeholder_tensor] = slice_value
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
# pylint: enable=protected-access
def assign_from_checkpoint_fn(model_path, var_list, ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
If ignore_missing_vars is True and no variables are found in the checkpoint
it returns None.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the corresponding variables to initialize. If empty or
`None`, it would return `no_op(), None`.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation. If no matching variables were found in the checkpoint
then `None` is returned.
Raises:
ValueError: If var_list is empty.
"""
if not var_list:
raise ValueError('var_list cannot be empty')
if ignore_missing_vars:
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning(
'Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
if var_list:
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
def callback(session):
saver.restore(session, model_path)
return callback
else:
logging.warning('No Variables to restore')
return None
class VariableDeviceChooser(object):
"""Device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU or CPU placement.
"""
def __init__(self,
num_tasks=0,
job_name='ps',
device_type='CPU',
device_index=0):
"""Initialize VariableDeviceChooser.
Usage:
To use with 2 parameter servers:
VariableDeviceChooser(2)
To use without parameter servers:
VariableDeviceChooser()
VariableDeviceChooser(device_type='GPU') # For GPU placement
Args:
num_tasks: number of tasks.
job_name: String, a name for the parameter server job.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self._job_name = job_name
self._device_type = device_type
self._device_index = device_index
self._num_tasks = num_tasks
self._next_task_id = 0
def __call__(self, op):
device_spec = tf_device.DeviceSpec(device_type=self._device_type,
device_index=self._device_index)
if self._num_tasks > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_tasks
device_spec.job = self._job_name
device_spec.task = task_id
return device_spec.to_string()
def filter_variables(var_list, include_patterns=None, exclude_patterns=None,
reg_search=True):
"""Filter a list of variables using regular expressions.
First includes variables according to the list of include_patterns.
Afterwards, eliminates variables according to the list of exclude_patterns.
For example, one can obtain a list of variables with the weights of all
convolutional layers (depending on the network definition) by:
```python
variables = tf.contrib.framework.get_model_variables()
conv_weight_variables = tf.contrib.framework.filter_variables(
variables,
include_patterns=['Conv'],
exclude_patterns=['biases', 'Logits'])
```
Args:
var_list: list of variables.
include_patterns: list of regular expressions to include. Defaults to None,
which means all variables are selected according to the include rules.
A variable is included if it matches any of the include_patterns.
exclude_patterns: list of regular expressions to exclude. Defaults to None,
which means all variables are selected according to the exclude rules.
A variable is excluded if it matches any of the exclude_patterns.
reg_search: boolean. If True (default), performs re.search to find matches
(i.e. pattern can match any substring of the variable name). If False,
performs re.match (i.e. regexp should match from the beginning of the
variable name).
Returns:
filtered list of variables.
"""
if reg_search:
reg_exp_func = re.search
else:
reg_exp_func = re.match
# First include variables.
if include_patterns is None:
included_variables = list(var_list)
else:
included_variables = []
for var in var_list:
if any(reg_exp_func(ptrn, var.name) for ptrn in include_patterns):
included_variables.append(var)
# Afterwards, exclude variables.
if exclude_patterns is None:
filtered_variables = included_variables
else:
filtered_variables = []
for var in included_variables:
if not any(reg_exp_func(ptrn, var.name) for ptrn in exclude_patterns):
filtered_variables.append(var)
return filtered_variables
| mixturemodel-flow/tensorflow | tensorflow/contrib/framework/python/ops/variables.py | Python | apache-2.0 | 28,437 |
"""Add User
Revision ID: 5677ef75c712
Revises: 45e1cfacfc7d
Create Date: 2014-01-15 11:06:25.217408
"""
# revision identifiers, used by Alembic.
revision = '5677ef75c712'
down_revision = '45e1cfacfc7d'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'user',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('email', sa.String(length=128), nullable=False),
sa.Column('is_admin', sa.Boolean(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
def downgrade():
op.drop_table('user')
| bowlofstew/changes | migrations/versions/5677ef75c712_add_user.py | Python | apache-2.0 | 679 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serializes objects in batches; by default, the batch size is chosen based
on the size of objects and is also configurable by SparkContext's C{batchSize}
parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
from pyspark.util import _exception_message
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
def _create_batch(series, timezone):
"""
Create an Arrow record batch from the given pandas.Series or list of Series, with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:param timezone: A timezone to respect when handling timestamp values
:return: Arrow RecordBatch
"""
import decimal
from distutils.version import LooseVersion
import pyarrow as pa
from pyspark.sql.types import _check_series_convert_timestamps_internal
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
# TODO: maybe don't need None check anymore as of Arrow 0.9.1
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s.fillna(0), timezone)
# TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2
return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False)
elif t is not None and pa.types.is_string(t) and sys.version < '3':
# TODO: need decode before converting to Arrow in Python 2
# TODO: don't need as of Arrow 0.9.1
return pa.Array.from_pandas(s.apply(
lambda v: v.decode("utf-8") if isinstance(v, str) else v), mask=mask, type=t)
elif t is not None and pa.types.is_decimal(t) and \
LooseVersion("0.9.0") <= LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
# TODO: see ARROW-2432. Remove when the minimum PyArrow version becomes 0.10.0.
return pa.Array.from_pandas(s.apply(
lambda v: decimal.Decimal('NaN') if v is None else v), mask=mask, type=t)
return pa.Array.from_pandas(s, mask=mask, type=t)
arrs = [create_array(s, t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def __init__(self, timezone):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.types import from_arrow_type, \
_check_series_convert_date, _check_series_localize_timestamps
s = arrow_column.to_pandas()
s = _check_series_convert_date(s, from_arrow_type(arrow_column.type))
s = _check_series_localize_timestamps(s, self._timezone)
return s
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series, self._timezone)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hack namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with the new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple.
# Those created in other modules can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
try:
return cloudpickle.dumps(obj, 2)
except pickle.PickleError:
raise
except Exception as e:
emsg = _exception_message(e)
if "'i' format requires" in emsg:
msg = "Object too large to serialize: %s" % emsg
else:
msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
cloudpickle.print_exec(sys.stderr)
raise pickle.PicklingError(msg)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid serialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def read_bool(stream):
length = stream.read(1)
if not length:
raise EOFError
return struct.unpack("!?", length)[0]
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
class ChunkedStream(object):
"""
This is a file-like object takes a stream of data, of unknown length, and breaks it into fixed
length frames. The intended use case is serializing large data and sending it immediately over
a socket -- we do not want to buffer the entire data before sending it, but the receiving end
needs to know whether or not there is more data coming.
It works by buffering the incoming data in some fixed-size chunks. If the buffer is full, it
first sends the buffer size, then the data. This repeats as long as there is more data to send.
When this is closed, it sends the length of whatever data is in the buffer, then that data, and
finally a "length" of -1 to indicate the stream has completed.
"""
def __init__(self, wrapped, buffer_size):
self.buffer_size = buffer_size
self.buffer = bytearray(buffer_size)
self.current_pos = 0
self.wrapped = wrapped
def write(self, bytes):
byte_pos = 0
byte_remaining = len(bytes)
while byte_remaining > 0:
new_pos = byte_remaining + self.current_pos
if new_pos < self.buffer_size:
# just put it in our buffer
self.buffer[self.current_pos:new_pos] = bytes[byte_pos:]
self.current_pos = new_pos
byte_remaining = 0
else:
# fill the buffer, send the length then the contents, and start filling again
space_left = self.buffer_size - self.current_pos
new_byte_pos = byte_pos + space_left
self.buffer[self.current_pos:self.buffer_size] = bytes[byte_pos:new_byte_pos]
write_int(self.buffer_size, self.wrapped)
self.wrapped.write(self.buffer)
byte_remaining -= space_left
byte_pos = new_byte_pos
self.current_pos = 0
def close(self):
# if there is anything left in the buffer, write it out first
if self.current_pos > 0:
write_int(self.current_pos, self.wrapped)
self.wrapped.write(self.buffer[:self.current_pos])
# -1 length indicates to the receiving end that we're done.
write_int(-1, self.wrapped)
self.wrapped.close()
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
| michalsenkyr/spark | python/pyspark/serializers.py | Python | apache-2.0 | 26,207 |
"""
Decision Tree Classification of photometry
------------------------------------------
Figure 9.13
Decision tree applied to the RR Lyrae data (see caption of figure 9.3 for
details). This example uses tree depths of 7 and 12. With all four colors,this
decision tree achieves a completeness of 0.569 and a contamination of 0.386.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from astroML.datasets import fetch_rrlyrae_combined
from astroML.utils import split_samples
from astroML.utils import completeness_contamination
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#----------------------------------------------------------------------
# get data and split into training & testing sets
X, y = fetch_rrlyrae_combined()
X = X[:, [1, 0, 2, 3]] # rearrange columns for better 1-color results
(X_train, X_test), (y_train, y_test) = split_samples(X, y, [0.75, 0.25],
random_state=0)
N_tot = len(y)
N_st = np.sum(y == 0)
N_rr = N_tot - N_st
N_train = len(y_train)
N_test = len(y_test)
N_plot = 5000 + N_rr
#----------------------------------------------------------------------
# Fit Decision tree
Ncolors = np.arange(1, X.shape[1] + 1)
classifiers = []
predictions = []
Ncolors = np.arange(1, X.shape[1] + 1)
depths = [7, 12]
for depth in depths:
classifiers.append([])
predictions.append([])
for nc in Ncolors:
clf = DecisionTreeClassifier(random_state=0, max_depth=depth,
criterion='entropy')
clf.fit(X_train[:, :nc], y_train)
y_pred = clf.predict(X_test[:, :nc])
classifiers[-1].append(clf)
predictions[-1].append(y_pred)
completeness, contamination = completeness_contamination(predictions, y_test)
print("completeness", completeness)
print("contamination", contamination)
#------------------------------------------------------------
# compute the decision boundary
clf = classifiers[1][1]
xlim = (0.7, 1.35)
ylim = (-0.15, 0.4)
xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 101),
np.linspace(ylim[0], ylim[1], 101))
Z = clf.predict(np.c_[yy.ravel(), xx.ravel()])
Z = Z.reshape(xx.shape)
#----------------------------------------------------------------------
# plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(bottom=0.15, top=0.95, hspace=0.0,
left=0.1, right=0.95, wspace=0.2)
# left plot: data and decision boundary
ax = fig.add_subplot(121)
im = ax.scatter(X[-N_plot:, 1], X[-N_plot:, 0], c=y[-N_plot:],
s=4, lw=0, cmap=plt.cm.binary, zorder=2)
im.set_clim(-0.5, 1)
ax.contour(xx, yy, Z, [0.5], colors='k')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel('$u-g$')
ax.set_ylabel('$g-r$')
ax.text(0.02, 0.02, "depth = %i" % depths[1],
transform=ax.transAxes)
# plot completeness vs Ncolors
ax = fig.add_subplot(222)
ax.plot(Ncolors, completeness[0], 'o-k', ms=6, label="depth=%i" % depths[0])
ax.plot(Ncolors, completeness[1], '^--k', ms=6, label="depth=%i" % depths[1])
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylabel('completeness')
ax.set_xlim(0.5, 4.5)
ax.set_ylim(-0.1, 1.1)
ax.grid(True)
# plot contamination vs Ncolors
ax = fig.add_subplot(224)
ax.plot(Ncolors, contamination[0], 'o-k', ms=6, label="depth=%i" % depths[0])
ax.plot(Ncolors, contamination[1], '^--k', ms=6, label="depth=%i" % depths[1])
ax.legend(loc='lower right',
bbox_to_anchor=(1.0, 0.79))
ax.xaxis.set_major_locator(plt.MultipleLocator(1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%i'))
ax.set_xlabel('N colors')
ax.set_ylabel('contamination')
ax.set_xlim(0.5, 4.5)
ax.set_ylim(-0.1, 1.1)
ax.grid(True)
plt.show()
| eramirem/astroML | book_figures/chapter9/fig_rrlyrae_decisiontree.py | Python | bsd-2-clause | 4,685 |
# Copyright (c) 2009, 2012-2013, 2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
from m5.params import *
from System import System
class ArmMachineType(Enum):
map = {
'RealViewEB' : 827,
'RealViewPBX' : 1901,
'VExpress_EMM' : 2272,
'VExpress_EMM64' : 2272,
'DTOnly' : -1,
}
class ArmSystem(System):
type = 'ArmSystem'
cxx_header = "arch/arm/system.hh"
load_addr_mask = 0xffffffff
multi_proc = Param.Bool(True, "Multiprocessor system?")
boot_loader = VectorParam.String([],
"File that contains the boot loader code. Zero or more files may be "
"specified. The first boot loader that matches the kernel's "
"architecture will be used.")
gic_cpu_addr = Param.Addr(0, "Addres of the GIC CPU interface")
flags_addr = Param.Addr(0, "Address of the flags register for MP booting")
have_security = Param.Bool(False,
"True if Security Extensions are implemented")
have_virtualization = Param.Bool(False,
"True if Virtualization Extensions are implemented")
have_lpae = Param.Bool(True, "True if LPAE is implemented")
highest_el_is_64 = Param.Bool(False,
"True if the register width of the highest implemented exception level "
"is 64 bits (ARMv8)")
reset_addr_64 = Param.Addr(0x0,
"Reset address if the highest implemented exception level is 64 bits "
"(ARMv8)")
phys_addr_range_64 = Param.UInt8(40,
"Supported physical address range in bits when using AArch64 (ARMv8)")
have_large_asid_64 = Param.Bool(False,
"True if ASID is 16 bits in AArch64 (ARMv8)")
class GenericArmSystem(ArmSystem):
type = 'GenericArmSystem'
cxx_header = "arch/arm/system.hh"
load_addr_mask = 0x0fffffff
machine_type = Param.ArmMachineType('VExpress_EMM',
"Machine id from http://www.arm.linux.org.uk/developer/machines/")
atags_addr = Param.Addr("Address where default atags structure should " \
"be written")
dtb_filename = Param.String("",
"File that contains the Device Tree Blob. Don't use DTB if empty.")
early_kernel_symbols = Param.Bool(False,
"enable early kernel symbol tables before MMU")
enable_context_switch_stats_dump = Param.Bool(False, "enable stats/task info dumping at context switch boundaries")
panic_on_panic = Param.Bool(False, "Trigger a gem5 panic if the " \
"guest kernel panics")
panic_on_oops = Param.Bool(False, "Trigger a gem5 panic if the " \
"guest kernel oopses")
class LinuxArmSystem(GenericArmSystem):
type = 'LinuxArmSystem'
cxx_header = "arch/arm/linux/system.hh"
@classmethod
def export_methods(cls, code):
code('''void dumpDmesg();''')
class FreebsdArmSystem(GenericArmSystem):
type = 'FreebsdArmSystem'
cxx_header = "arch/arm/freebsd/system.hh"
| SanchayanMaity/gem5 | src/arch/arm/ArmSystem.py | Python | bsd-3-clause | 4,979 |
# BuildTarget: images/conceptPerformanceBestPracticesContextsGraphEditor.png
# BuildTarget: images/conceptPerformanceBestPracticesContextsImprovedStats.png
# BuildTarget: images/conceptPerformanceBestPracticesContextsStats.png
# BuildTarget: images/conceptPerformanceBestPracticesContextsViewer.png
# BuildTarget: images/graphEditorGroupFirst.png
# BuildTarget: images/graphEditorGroupSecond.png
import os
import time
import subprocess32 as subprocess
import imath
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
# Delay for x seconds
def __delay( delay ) :
endtime = time.time() + delay
while time.time() < endtime :
GafferUI.EventLoop.waitForIdle( 1 )
mainWindow = GafferUI.ScriptWindow.acquire( script )
viewer = mainWindow.getLayout().editors( GafferUI.Viewer )[0]
graphEditor = mainWindow.getLayout().editors( GafferUI.GraphEditor )[0]
# First sample node graph in Graph Editor
script["fileName"].setValue( os.path.abspath( "scripts/groupFirst.gfr" ) )
script.load()
graphEditor.frame( script.children( Gaffer.Node ) )
GafferUI.WidgetAlgo.grab( widget = graphEditor, imagePath = "images/graphEditorGroupFirst.png" )
# Second sample node graph in Graph Editor
script["fileName"].setValue( os.path.abspath( "scripts/groupSecond.gfr" ) )
script.load()
graphEditor.frame( script.children( Gaffer.Node ) )
GafferUI.WidgetAlgo.grab( widget = graphEditor, imagePath = "images/graphEditorGroupSecond.png" )
# Concept: Context performance in Viewer
script["fileName"].setValue( os.path.abspath( "scripts/conceptPerformanceBestPracticesContexts.gfr" ) )
script.load()
script.selection().add( Gaffer.StandardSet( [ script["CollectScenes"] ] ) )
script.setFocus( script["CollectScenes"] )
__delay( 0.1 )
viewport = viewer.view().viewportGadget()
viewport.frame( viewport.getPrimaryChild().bound() )
viewer.view()["minimumExpansionDepth"].setValue( 100 )
__delay( 0.5 )
# Side-on look at scene
cameraTransform = imath.M44f(
( 1, 0, 0, 0 ),
( 0, 1, 0, 0 ),
( 0, 0, 1, 0 ),
( 60, 5, 200, 1 )
)
viewport.setCameraTransform( cameraTransform )
GafferUI.WidgetAlgo.grab( widget = viewer, imagePath = "images/conceptPerformanceBestPracticesContextsViewer.png" )
# Concept: Context performance network in Graph Editor
graphEditor.frame( script.children( Gaffer.Node ) )
with GafferUI.Window() as window :
graphEditorWindow = GafferUI.GraphEditor( script )
graphEditorWindow.parent().reveal()
graphEditorWindow.parent()._qtWidget().resize( 800, 520 )
__delay( 0.01 )
graphEditorWindow.frame( script.children( Gaffer.Node ) )
__delay( 0.01 )
GafferUI.WidgetAlgo.grab( widget = graphEditorWindow, imagePath = "images/conceptPerformanceBestPracticesContextsGraphEditor.png" )
graphEditorWindow.parent().close()
del graphEditorWindow
# Concept: Context performance network with stats
name = "conceptPerformanceBestPracticesContexts"
nameImproved = name + "Improved"
inputScript = os.path.abspath( "scripts/{name}.gfr".format( name = name ) )
outputScript = os.path.abspath( "scripts/{name}.gfr".format( name = name + "Stats" ) )
improvedScript = os.path.abspath( "scripts/{name}.gfr".format( name = nameImproved ) )
command = "gaffer stats {inputScript} -scene {node} -contextMonitor -annotatedScript {outputScript}".format(
inputScript = inputScript,
node = "CollectScenes",
outputScript = outputScript
)
subprocess.check_call( command, shell=True )
script["fileName"].setValue( outputScript )
script.load()
with GafferUI.Window() as window :
graphEditorWindow = GafferUI.GraphEditor( script )
graphEditorWindow.parent().reveal()
graphEditorWindow.parent()._qtWidget().resize( 800, 520 )
__delay( 0.01 )
GafferUI.WidgetAlgo.grab( widget = graphEditorWindow, imagePath = "images/{imageName}.png".format( imageName = name + "Stats" ) )
graphEditorWindow.parent().close()
del graphEditorWindow
script.addChild( Gaffer.DeleteContextVariables() )
script["DeleteContextVariables"].setup( GafferScene.ScenePlug( "in", ) )
script["DeleteContextVariables"].addChild( Gaffer.V2fPlug( "__uiPosition", defaultValue = imath.V2f( 0, 0 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, ) )
script["DeleteContextVariables"]["variables"].setValue( 'collect:rootName' )
script["DeleteContextVariables"]["in"].setInput( script["Group"]["out"] )
script["Transform"]["in"].setInput( script["DeleteContextVariables"]["out"] )
script["DeleteContextVariables"]["__uiPosition"].setValue( imath.V2f( -3.0, 4.75 ) )
script["fileName"].setValue( improvedScript )
script.save()
# Concept: Context performance network with improved stats
outputScript = os.path.abspath( "scripts/{name}.gfr".format( name = nameImproved + "Stats" ) )
command = "gaffer stats {inputScript} -scene {node} -contextMonitor -annotatedScript {outputScript}".format(
inputScript = improvedScript,
node = "CollectScenes",
outputScript = outputScript
)
subprocess.check_call( command, shell=True )
script["fileName"].setValue( outputScript )
script.load()
with GafferUI.Window() as window :
graphEditorWindow = GafferUI.GraphEditor( script )
graphEditorWindow.parent().reveal()
graphEditorWindow.parent()._qtWidget().resize( 800, 520 )
__delay( 0.01 )
GafferUI.WidgetAlgo.grab( widget = graphEditorWindow, imagePath = "images/{imageName}.png".format( imageName = nameImproved + "Stats" ) )
graphEditorWindow.parent().close()
del graphEditorWindow
| hradec/gaffer | doc/source/WorkingWithTheNodeGraph/PerformanceBestPractices/screengrab.py | Python | bsd-3-clause | 5,338 |
#!/usr/bin/python
from mod_pywebsocket import msgutil
import time
import urllib
def web_socket_do_extra_handshake(request):
msgutil._write(request, 'x')
time.sleep(2)
def web_socket_transfer_data(request):
msgutil._write(request, urllib.unquote(request.ws_location.split('?', 1)[1]).decode("string-escape"))
time.sleep(2)
| operasoftware/presto-testo | wpt/websockets/websock_handlers/sleep_2_then_open_raw_wsh.py | Python | bsd-3-clause | 330 |
from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
import warnings
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
from django.utils import six
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
self.path = '%s/%s' % (script_name.rstrip('/'), path_info.lstrip('/'))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', PendingDeprecationWarning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value if six.PY2 else value.encode(ISO_8859_1)
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Same comment as above
return value if six.PY2 else value.encode(ISO_8859_1).decode(UTF_8)
| DrMeers/django | django/core/handlers/wsgi.py | Python | bsd-3-clause | 9,435 |
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.contrib import admin
from django.shortcuts import render
import autocomplete_light
import tower
from mozillians.common.monkeypatches import patch
# Funfactory monkeypatches customized to work with Django 1.7 admin
patch()
autocomplete_light.autodiscover()
# Activate a locale so that jinja2 doesn't choke when running a shell
# or individual tests that need translation and don't involve a web
# request, like when testing emails.
tower.activate('en-US')
def error_page(request, template, status=None):
"""Render error templates, found in the root /templates directory.
If no status parameter is explcitedly passed, this function assumes
your HTTP status code is the same as your template name (i.e. passing
a template=404 will render 404.html with the HTTP status code 404).
"""
return render(request, '%d.html' % template, status=(status or template))
handler404 = lambda r: error_page(r, 404)
handler500 = lambda r: error_page(r, 500)
handler_csrf = lambda r, cb=None: error_page(r, 'csrf_error', status=400)
urlpatterns = patterns(
'',
url(r'', include('django_browserid.urls')),
url(r'^api/', include('mozillians.api.urls')),
url(r'', include('mozillians.groups.urls', 'groups')),
url(r'', include('mozillians.phonebook.urls', 'phonebook')),
# Admin URLs.
url(r'^admin/', include(admin.site.urls)),
url(r'^_autocomplete/', include('autocomplete_light.urls')),
url(r'', include('mozillians.humans.urls', 'humans')),
)
# In DEBUG mode, serve media files through Django, and serve error pages
# via predictable routes. Add in qunit tests.
if settings.DEBUG:
# Remove leading and trailing slashes so the regex matches.
urlpatterns += patterns(
'',
# Add the 404, 500, and csrf pages for testing
url(r'^404/$', handler404),
url(r'^500/$', handler500),
url(r'^csrf/$', handler_csrf),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}))
| chirilo/mozillians | mozillians/urls.py | Python | bsd-3-clause | 2,122 |
"""
.. todo::
WRITEME
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A more sensible matplotlib-based image viewer command,
a wrapper around `matplotlib.pyplot.imshow`.
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
#do some shape checking because PIL just raises a tuple indexing error
#that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
#don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
#PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:,:,0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name +' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name +' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
.. todo::
WRITEME
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
.. todo::
WRITEME
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
.. todo::
WRITEME
"""
assert type(filepath) == str
if rescale_image == False and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
# print 'image.load: ' + str((rval.min(), rval.max()))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
if numpy_rval.ndim not in [2,3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError("Tried to load an image, got an array with " +
str(numpy_rval.ndim)+" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format."
)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
.. todo::
WRITEME
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
| JesseLivezey/plankton | pylearn2/utils/image.py | Python | bsd-3-clause | 16,346 |
import time
import datetime
from flask_admin.babel import lazy_gettext
class BaseFilter(object):
"""
Base filter class.
"""
def __init__(self, name, options=None, data_type=None):
"""
Constructor.
:param name:
Displayed name
:param options:
List of fixed options. If provided, will use drop down instead of textbox.
:param data_type:
Client-side widget type to use.
"""
self.name = name
self.options = options
self.data_type = data_type
def get_options(self, view):
"""
Return list of predefined options.
Override to customize behavior.
:param view:
Associated administrative view class.
"""
options = self.options
if options:
if callable(options):
options = options()
return options
return None
def validate(self, value):
"""
Validate value.
If value is valid, returns `True` and `False` otherwise.
:param value:
Value to validate
"""
# useful for filters with date conversions, see if conversion in clean() raises ValueError
try:
self.clean(value)
return True
except ValueError:
return False
def clean(self, value):
"""
Parse value into python format. Occurs before .apply()
:param value:
Value to parse
"""
return value
def apply(self, query):
"""
Apply search criteria to the query and return new query.
:param query:
Query
"""
raise NotImplementedError()
def operation(self):
"""
Return readable operation name.
For example: u'equals'
"""
raise NotImplementedError()
def __unicode__(self):
return self.name
# Customized filters
class BaseBooleanFilter(BaseFilter):
"""
Base boolean filter, uses fixed list of options.
"""
def __init__(self, name, options=None, data_type=None):
super(BaseBooleanFilter, self).__init__(name,
(('1', lazy_gettext(u'Yes')),
('0', lazy_gettext(u'No'))),
data_type)
def validate(self, value):
return value in ('0', '1')
class BaseIntFilter(BaseFilter):
"""
Base Int filter. Adds validation and changes value to python int.
Avoid using int(float(value)) to also allow using decimals, because it
causes precision issues with large numbers.
"""
def clean(self, value):
return int(value)
class BaseFloatFilter(BaseFilter):
"""
Base Float filter. Adds validation and changes value to python float.
"""
def clean(self, value):
return float(value)
class BaseIntListFilter(BaseFilter):
"""
Base Integer list filter. Adds validation for int "In List" filter.
Avoid using int(float(value)) to also allow using decimals, because it
causes precision issues with large numbers.
"""
def clean(self, value):
return [int(v.strip()) for v in value.split(',') if v.strip()]
class BaseFloatListFilter(BaseFilter):
"""
Base Float list filter. Adds validation for float "In List" filter.
"""
def clean(self, value):
return [float(v.strip()) for v in value.split(',') if v.strip()]
class BaseDateFilter(BaseFilter):
"""
Base Date filter. Uses client-side date picker control.
"""
def __init__(self, name, options=None, data_type=None):
super(BaseDateFilter, self).__init__(name,
options,
data_type='datepicker')
def clean(self, value):
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
class BaseDateBetweenFilter(BaseFilter):
"""
Base Date Between filter. Consolidates logic for validation and clean.
Apply method is different for each back-end.
"""
def clean(self, value):
return [datetime.datetime.strptime(range, '%Y-%m-%d')
for range in value.split(' to ')]
def operation(self):
return lazy_gettext('between')
def validate(self, value):
try:
value = [datetime.datetime.strptime(range, '%Y-%m-%d')
for range in value.split(' to ')]
# if " to " is missing, fail validation
# sqlalchemy's .between() will not work if end date is before start date
if (len(value) == 2) and (value[0] <= value[1]):
return True
else:
return False
except ValueError:
return False
class BaseDateTimeFilter(BaseFilter):
"""
Base DateTime filter. Uses client-side date time picker control.
"""
def __init__(self, name, options=None, data_type=None):
super(BaseDateTimeFilter, self).__init__(name,
options,
data_type='datetimepicker')
def clean(self, value):
# datetime filters will not work in SQLite + SQLAlchemy if value not converted to datetime
return datetime.datetime.strptime(value, '%Y-%m-%d %H:%M:%S')
class BaseDateTimeBetweenFilter(BaseFilter):
"""
Base DateTime Between filter. Consolidates logic for validation and clean.
Apply method is different for each back-end.
"""
def clean(self, value):
return [datetime.datetime.strptime(range, '%Y-%m-%d %H:%M:%S')
for range in value.split(' to ')]
def operation(self):
return lazy_gettext('between')
def validate(self, value):
try:
value = [datetime.datetime.strptime(range, '%Y-%m-%d %H:%M:%S')
for range in value.split(' to ')]
if (len(value) == 2) and (value[0] <= value[1]):
return True
else:
return False
except ValueError:
return False
class BaseTimeFilter(BaseFilter):
"""
Base Time filter. Uses client-side time picker control.
"""
def __init__(self, name, options=None, data_type=None):
super(BaseTimeFilter, self).__init__(name,
options,
data_type='timepicker')
def clean(self, value):
# time filters will not work in SQLite + SQLAlchemy if value not converted to time
timetuple = time.strptime(value, '%H:%M:%S')
return datetime.time(timetuple.tm_hour,
timetuple.tm_min,
timetuple.tm_sec)
class BaseTimeBetweenFilter(BaseFilter):
"""
Base Time Between filter. Consolidates logic for validation and clean.
Apply method is different for each back-end.
"""
def clean(self, value):
timetuples = [time.strptime(range, '%H:%M:%S')
for range in value.split(' to ')]
return [datetime.time(timetuple.tm_hour,
timetuple.tm_min,
timetuple.tm_sec)
for timetuple in timetuples]
def operation(self):
return lazy_gettext('between')
def validate(self, value):
try:
timetuples = [time.strptime(range, '%H:%M:%S')
for range in value.split(' to ')]
if (len(timetuples) == 2) and (timetuples[0] <= timetuples[1]):
return True
else:
return False
except ValueError:
raise
return False
def convert(*args):
"""
Decorator for field to filter conversion routine.
See :mod:`flask_admin.contrib.sqla.filters` for usage example.
"""
def _inner(func):
func._converter_for = list(map(lambda x: x.lower(), args))
return func
return _inner
class BaseFilterConverter(object):
"""
Base filter converter.
Derive from this class to implement custom field to filter conversion
logic.
"""
def __init__(self):
self.converters = dict()
for p in dir(self):
attr = getattr(self, p)
if hasattr(attr, '_converter_for'):
for p in attr._converter_for:
self.converters[p] = attr
| sfpprxy/py-reminder | libs/model/filters.py | Python | bsd-3-clause | 8,753 |
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
from pandas.core.arrays import PeriodArray
from pandas.tests.extension import base
@pytest.fixture
def dtype():
return PeriodDtype(freq="D")
@pytest.fixture
def data(dtype):
return PeriodArray(np.arange(1970, 2070), freq=dtype.freq)
@pytest.fixture
def data_for_twos(dtype):
return PeriodArray(np.ones(100) * 2, freq=dtype.freq)
@pytest.fixture
def data_for_sorting(dtype):
return PeriodArray([2018, 2019, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing(dtype):
return PeriodArray([iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing_for_sorting(dtype):
return PeriodArray([2018, iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_for_grouping(dtype):
B = 2018
NA = iNaT
A = 2017
C = 2019
return PeriodArray([B, B, NA, NA, A, A, B, C], freq=dtype.freq)
@pytest.fixture
def na_value():
return pd.NaT
class BasePeriodTests:
pass
class TestPeriodDtype(BasePeriodTests, base.BaseDtypeTests):
pass
class TestConstructors(BasePeriodTests, base.BaseConstructorsTests):
pass
class TestGetitem(BasePeriodTests, base.BaseGetitemTests):
pass
class TestMethods(BasePeriodTests, base.BaseMethodsTests):
def test_combine_add(self, data_repeated):
# Period + Period is not defined.
pass
class TestInterface(BasePeriodTests, base.BaseInterfaceTests):
pass
class TestArithmeticOps(BasePeriodTests, base.BaseArithmeticOpsTests):
implements = {"__sub__", "__rsub__"}
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
if all_arithmetic_operators in self.implements:
df = pd.DataFrame({"A": data})
self.check_opname(df, all_arithmetic_operators, data[0], exc=None)
else:
# ... but not the rest.
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# we implement substitution...
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
super()._check_divmod_op(s, op, other, exc=TypeError)
def test_add_series_with_extension_array(self, data):
# we don't implement + for Period
s = pd.Series(data)
msg = (
r"unsupported operand type\(s\) for \+: "
r"\'PeriodArray\' and \'PeriodArray\'"
)
with pytest.raises(TypeError, match=msg):
s + data
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame])
def test_direct_arith_with_ndframe_returns_not_implemented(self, data, box):
# Override to use __sub__ instead of __add__
other = pd.Series(data)
if box is pd.DataFrame:
other = other.to_frame()
result = data.__sub__(other)
assert result is NotImplemented
class TestCasting(BasePeriodTests, base.BaseCastingTests):
pass
class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
# the base test is not appropriate for us. We raise on comparison
# with (some) integers, depending on the value.
pass
class TestMissing(BasePeriodTests, base.BaseMissingTests):
pass
class TestReshaping(BasePeriodTests, base.BaseReshapingTests):
pass
class TestSetitem(BasePeriodTests, base.BaseSetitemTests):
pass
class TestGroupby(BasePeriodTests, base.BaseGroupbyTests):
pass
class TestPrinting(BasePeriodTests, base.BasePrintingTests):
pass
class TestParsing(BasePeriodTests, base.BaseParsingTests):
@pytest.mark.parametrize("engine", ["c", "python"])
def test_EA_types(self, engine, data):
super().test_EA_types(engine, data)
class Test2DCompat(BasePeriodTests, base.Dim2CompatTests):
pass
| rs2/pandas | pandas/tests/extension/test_period.py | Python | bsd-3-clause | 5,274 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: An Incremental Earley Chart Parser
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Peter Ljunglöf <[email protected]>
# Rob Speer <[email protected]>
# Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# Jean Mark Gawron <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Data classes and parser implementations for *incremental* chart
parsers, which use dynamic programming to efficiently parse a text.
A "chart parser" derives parse trees for a text by iteratively adding
\"edges\" to a \"chart\". Each "edge" represents a hypothesis about the tree
structure for a subsequence of the text. The "chart" is a
\"blackboard\" for composing and combining these hypotheses.
A parser is "incremental", if it guarantees that for all i, j where i < j,
all edges ending at i are built before any edges ending at j.
This is appealing for, say, speech recognizer hypothesis filtering.
The main parser class is ``EarleyChartParser``, which is a top-down
algorithm, originally formulated by Jay Earley (1970).
"""
from __future__ import print_function, division
from nltk.compat import xrange
from nltk.parse.chart import (Chart, ChartParser, EdgeI, LeafEdge, LeafInitRule,
BottomUpPredictRule, BottomUpPredictCombineRule,
TopDownInitRule, SingleEdgeFundamentalRule,
EmptyPredictRule,
CachedTopDownPredictRule,
FilteredSingleEdgeFundamentalRule,
FilteredBottomUpPredictCombineRule)
from nltk.parse.featurechart import (FeatureChart, FeatureChartParser,
FeatureTopDownInitRule,
FeatureTopDownPredictRule,
FeatureEmptyPredictRule,
FeatureBottomUpPredictRule,
FeatureBottomUpPredictCombineRule,
FeatureSingleEdgeFundamentalRule)
#////////////////////////////////////////////////////////////
# Incremental Chart
#////////////////////////////////////////////////////////////
class IncrementalChart(Chart):
def initialize(self):
# A sequence of edge lists contained in this chart.
self._edgelists = tuple([] for x in self._positions())
# The set of child pointer lists associated with each edge.
self._edge_to_cpls = {}
# Indexes mapping attribute values to lists of edges
# (used by select()).
self._indexes = {}
def edges(self):
return list(self.iteredges())
def iteredges(self):
return (edge for edgelist in self._edgelists for edge in edgelist)
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = sorted(restrictions.keys())
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(restrictions[key] for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError('Bad restriction: %s' % key)
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(getattr(edge, key)() for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(getattr(edge, key)() for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
def _append_edge(self, edge):
self._edgelists[edge.end()].append(edge)
def _positions(self):
return xrange(self.num_leaves() + 1)
class FeatureIncrementalChart(IncrementalChart, FeatureChart):
def select(self, end, **restrictions):
edgelist = self._edgelists[end]
# If there are no restrictions, then return all edges.
if restrictions=={}: return iter(edgelist)
# Find the index corresponding to the given restrictions.
restr_keys = sorted(restrictions.keys())
restr_keys = tuple(restr_keys)
# If it doesn't exist, then create it.
if restr_keys not in self._indexes:
self._add_index(restr_keys)
vals = tuple(self._get_type_if_possible(restrictions[key])
for key in restr_keys)
return iter(self._indexes[restr_keys][end].get(vals, []))
def _add_index(self, restr_keys):
# Make sure it's a valid index.
for key in restr_keys:
if not hasattr(EdgeI, key):
raise ValueError('Bad restriction: %s' % key)
# Create the index.
index = self._indexes[restr_keys] = tuple({} for x in self._positions())
# Add all existing edges to the index.
for end, edgelist in enumerate(self._edgelists):
this_index = index[end]
for edge in edgelist:
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
this_index.setdefault(vals, []).append(edge)
def _register_with_indexes(self, edge):
end = edge.end()
for (restr_keys, index) in self._indexes.items():
vals = tuple(self._get_type_if_possible(getattr(edge, key)())
for key in restr_keys)
index[end].setdefault(vals, []).append(edge)
#////////////////////////////////////////////////////////////
# Incremental CFG Rules
#////////////////////////////////////////////////////////////
class CompleteFundamentalRule(SingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.nextsym()):
new_edge = left_edge.move_dot_forward(right_edge.end())
if chart.insert_with_backpointer(new_edge, left_edge, right_edge):
yield new_edge
class CompleterRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply(self, chart, grammar, edge):
if not isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply(chart, grammar, edge):
yield new_edge
class ScannerRule(CompleteFundamentalRule):
_fundamental_rule = CompleteFundamentalRule()
def apply(self, chart, grammar, edge):
if isinstance(edge, LeafEdge):
for new_edge in self._fundamental_rule.apply(chart, grammar, edge):
yield new_edge
class PredictorRule(CachedTopDownPredictRule):
pass
class FilteredCompleteFundamentalRule(FilteredSingleEdgeFundamentalRule):
def apply(self, chart, grammar, edge):
# Since the Filtered rule only works for grammars without empty productions,
# we only have to bother with complete edges here.
if edge.is_complete():
for new_edge in self._apply_complete(chart, grammar, edge):
yield new_edge
#////////////////////////////////////////////////////////////
# Incremental FCFG Rules
#////////////////////////////////////////////////////////////
class FeatureCompleteFundamentalRule(FeatureSingleEdgeFundamentalRule):
def _apply_incomplete(self, chart, grammar, left_edge):
fr = self._fundamental_rule
end = left_edge.end()
# When the chart is incremental, we only have to look for
# empty complete edges here.
for right_edge in chart.select(start=end, end=end,
is_complete=True,
lhs=left_edge.nextsym()):
for new_edge in fr.apply(chart, grammar, left_edge, right_edge):
yield new_edge
class FeatureCompleterRule(CompleterRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeatureScannerRule(ScannerRule):
_fundamental_rule = FeatureCompleteFundamentalRule()
class FeaturePredictorRule(FeatureTopDownPredictRule):
pass
#////////////////////////////////////////////////////////////
# Incremental CFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CompleterRule(),
ScannerRule(),
PredictorRule()]
TD_INCREMENTAL_STRATEGY = [LeafInitRule(),
TopDownInitRule(),
CachedTopDownPredictRule(),
CompleteFundamentalRule()]
BU_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictRule(),
CompleteFundamentalRule()]
BU_LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
EmptyPredictRule(),
BottomUpPredictCombineRule(),
CompleteFundamentalRule()]
LC_INCREMENTAL_STRATEGY = [LeafInitRule(),
FilteredBottomUpPredictCombineRule(),
FilteredCompleteFundamentalRule()]
class IncrementalChartParser(ChartParser):
"""
An *incremental* chart parser implementing Jay Earley's
parsing algorithm:
| For each index end in [0, 1, ..., N]:
| For each edge such that edge.end = end:
| If edge is incomplete and edge.next is not a part of speech:
| Apply PredictorRule to edge
| If edge is incomplete and edge.next is a part of speech:
| Apply ScannerRule to edge
| If edge is complete:
| Apply CompleterRule to edge
| Return any complete parses in the chart
"""
def __init__(self, grammar, strategy=BU_LC_INCREMENTAL_STRATEGY,
trace=0, trace_chart_width=50,
chart_class=IncrementalChart):
"""
Create a new Earley chart parser, that uses ``grammar`` to
parse texts.
:type grammar: CFG
:param grammar: The grammar used to parse texts.
:type trace: int
:param trace: The level of tracing that should be used when
parsing a text. ``0`` will generate no tracing output;
and higher numbers will produce more verbose tracing
output.
:type trace_chart_width: int
:param trace_chart_width: The default total width reserved for
the chart in trace output. The remainder of each line will
be used to display edges.
:param chart_class: The class that should be used to create
the charts used by this parser.
"""
self._grammar = grammar
self._trace = trace
self._trace_chart_width = trace_chart_width
self._chart_class = chart_class
self._axioms = []
self._inference_rules = []
for rule in strategy:
if rule.NUM_EDGES == 0:
self._axioms.append(rule)
elif rule.NUM_EDGES == 1:
self._inference_rules.append(rule)
else:
raise ValueError("Incremental inference rules must have "
"NUM_EDGES == 0 or 1")
def chart_parse(self, tokens, trace=None):
if trace is None: trace = self._trace
trace_new_edges = self._trace_new_edges
tokens = list(tokens)
self._grammar.check_coverage(tokens)
chart = self._chart_class(tokens)
grammar = self._grammar
# Width, for printing trace edges.
trace_edge_width = self._trace_chart_width // (chart.num_leaves() + 1)
if trace: print(chart.pretty_format_leaves(trace_edge_width))
for axiom in self._axioms:
new_edges = list(axiom.apply(chart, grammar))
trace_new_edges(chart, axiom, new_edges, trace, trace_edge_width)
inference_rules = self._inference_rules
for end in range(chart.num_leaves()+1):
if trace > 1: print("\n* Processing queue:", end, "\n")
agenda = list(chart.select(end=end))
while agenda:
edge = agenda.pop()
for rule in inference_rules:
new_edges = list(rule.apply(chart, grammar, edge))
trace_new_edges(chart, rule, new_edges, trace, trace_edge_width)
for new_edge in new_edges:
if new_edge.end()==end:
agenda.append(new_edge)
return chart
class EarleyChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, EARLEY_STRATEGY, **parser_args)
pass
class IncrementalTopDownChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalBottomUpLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
IncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_STRATEGY, **parser_args)
class IncrementalLeftCornerChartParser(IncrementalChartParser):
def __init__(self, grammar, **parser_args):
if not grammar.is_nonempty():
raise ValueError("IncrementalLeftCornerParser only works for grammars "
"without empty productions.")
IncrementalChartParser.__init__(self, grammar, LC_INCREMENTAL_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Incremental FCFG Chart Parsers
#////////////////////////////////////////////////////////////
EARLEY_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureCompleterRule(),
FeatureScannerRule(),
FeaturePredictorRule()]
TD_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureTopDownInitRule(),
FeatureTopDownPredictRule(),
FeatureCompleteFundamentalRule()]
BU_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictRule(),
FeatureCompleteFundamentalRule()]
BU_LC_INCREMENTAL_FEATURE_STRATEGY = [LeafInitRule(),
FeatureEmptyPredictRule(),
FeatureBottomUpPredictCombineRule(),
FeatureCompleteFundamentalRule()]
class FeatureIncrementalChartParser(IncrementalChartParser, FeatureChartParser):
def __init__(self, grammar,
strategy=BU_LC_INCREMENTAL_FEATURE_STRATEGY,
trace_chart_width=20,
chart_class=FeatureIncrementalChart,
**parser_args):
IncrementalChartParser.__init__(self, grammar,
strategy=strategy,
trace_chart_width=trace_chart_width,
chart_class=chart_class,
**parser_args)
class FeatureEarleyChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, EARLEY_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalTopDownChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, TD_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
class FeatureIncrementalBottomUpLeftCornerChartParser(FeatureIncrementalChartParser):
def __init__(self, grammar, **parser_args):
FeatureIncrementalChartParser.__init__(self, grammar, BU_LC_INCREMENTAL_FEATURE_STRATEGY, **parser_args)
#////////////////////////////////////////////////////////////
# Demonstration
#////////////////////////////////////////////////////////////
def demo(print_times=True, print_grammar=False,
print_trees=True, trace=2,
sent='I saw John with a dog with my cookie', numparses=5):
"""
A demonstration of the Earley parsers.
"""
import sys, time
from nltk.parse.chart import demo_grammar
# The grammar for ChartParser and SteppingChartParser:
grammar = demo_grammar()
if print_grammar:
print("* Grammar")
print(grammar)
# Tokenize the sample sentence.
print("* Sentence:")
print(sent)
tokens = sent.split()
print(tokens)
print()
# Do the parsing.
earley = EarleyChartParser(grammar, trace=trace)
t = time.clock()
chart = earley.chart_parse(tokens)
parses = list(chart.parses(grammar.start()))
t = time.clock()-t
# Print results.
if numparses:
assert len(parses)==numparses, 'Not all parses found'
if print_trees:
for tree in parses: print(tree)
else:
print("Nr trees:", len(parses))
if print_times:
print("Time:", t)
if __name__ == '__main__': demo()
| sdoran35/hate-to-hugs | venv/lib/python3.6/site-packages/nltk/parse/earleychart.py | Python | mit | 18,690 |
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
sys.path.insert(0, dirname(abspath(__file__)))
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
'other': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
},
},
INSTALLED_APPS=[
'nashvegas',
'tests',
],
ROOT_URLCONF='',
DEBUG=False,
SITE_ID=1,
TEMPLATE_DEBUG=True,
)
from django_nose import NoseTestSuiteRunner
def runtests(*test_args, **kwargs):
if not test_args:
test_args = ['tests']
kwargs.setdefault('interactive', False)
test_runner = NoseTestSuiteRunner(**kwargs)
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--verbosity', dest='verbosity', action='store', default=1, type=int)
parser.add_options(NoseTestSuiteRunner.options)
(options, args) = parser.parse_args()
runtests(*args, **options.__dict__)
| iivvoo/nashvegas | runtests.py | Python | mit | 1,302 |
"""distutils.spawn
Provides the 'spawn()' function, a front-end to various platform-
specific functions for launching another program in a sub-process.
Also provides the 'find_executable()' to search the path for a given
executable name.
"""
# created 1999/07/24, Greg Ward
__revision__ = "$Id: spawn.py,v 1.10 2000/09/26 02:00:51 gward Exp $"
import sys, os, string
from distutils.errors import *
def spawn (cmd,
search_path=1,
verbose=0,
dry_run=0):
"""Run another program, specified as a command list 'cmd', in a new
process. 'cmd' is just the argument list for the new process, ie.
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
There is no way to run a program with a name different from that of its
executable.
If 'search_path' is true (the default), the system's executable search
path will be used to find the program; otherwise, cmd[0] must be the
exact path to the executable. If 'verbose' is true, a one-line summary
of the command will be printed before it is run. If 'dry_run' is true,
the command will not actually be run.
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
if os.name == 'posix':
_spawn_posix(cmd, search_path, verbose, dry_run)
elif os.name == 'nt':
_spawn_nt(cmd, search_path, verbose, dry_run)
else:
raise DistutilsPlatformError, \
"don't know how to spawn programs on platform '%s'" % os.name
# spawn ()
def _nt_quote_args (args):
"""Quote command-line arguments for DOS/Windows conventions: just
wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i in range(len(args)):
if string.find(args[i], ' ') != -1:
args[i] = '"%s"' % args[i]
return args
def _spawn_nt (cmd,
search_path=1,
verbose=0,
dry_run=0):
executable = cmd[0]
cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
if verbose:
print string.join([executable] + cmd[1:], ' ')
if not dry_run:
# spawn for NT requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_posix (cmd,
search_path=1,
verbose=0,
dry_run=0):
if verbose:
print string.join(cmd, ' ')
if dry_run:
return
exec_fn = search_path and os.execvp or os.execv
pid = os.fork()
if pid == 0: # in the child
try:
#print "cmd[0] =", cmd[0]
#print "cmd =", cmd
exec_fn(cmd[0], cmd)
except OSError, e:
sys.stderr.write("unable to execute %s: %s\n" %
(cmd[0], e.strerror))
os._exit(1)
sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0])
os._exit(1)
else: # in the parent
# Loop until the child either exits or is terminated by a signal
# (ie. keep waiting if it's merely stopped)
while 1:
(pid, status) = os.waitpid(pid, 0)
if os.WIFSIGNALED(status):
raise DistutilsExecError, \
"command '%s' terminated by signal %d" % \
(cmd[0], os.WTERMSIG(status))
elif os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return # hey, it succeeded!
else:
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % \
(cmd[0], exit_status)
elif os.WIFSTOPPED(status):
continue
else:
raise DistutilsExecError, \
"unknown error executing '%s': termination status %d" % \
(cmd[0], status)
# _spawn_posix ()
def find_executable(executable, path=None):
"""Try to find 'executable' in the directories listed in 'path' (a
string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']). Returns the complete filename or None if not
found.
"""
if path is None:
path = os.environ['PATH']
paths = string.split(path, os.pathsep)
(base, ext) = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
# find_executable()
| atmark-techno/atmark-dist | user/python/Lib/distutils/spawn.py | Python | gpl-2.0 | 5,773 |
import os
import re
import time
import kodi
import log_utils # @UnusedImport
from salts_lib import utils2
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
__all__ = ['scraper', 'proxy', 'local_scraper', 'pw_scraper', 'watchseries_scraper', 'movie25_scraper', 'nitertv_scraper',
'filmovizjia_scraper', 'icefilms_scraper', 'viooz_scraper', 'mvl_proxy', 'streamdor_scraper', 'goojara_proxy',
'filmikz_scraper', 'vidnow4k_proxy', 'downloadtube_scraper', 'iwatch_scraper', 'ororotv_scraper', 'vidics_scraper',
'losmovies_scraper', 'movie4k_scraper', 'easynews_scraper', 'noobroom_scraper', 'seriesonline_scraper',
'directdl_scraper', 'afdah_scraper', 'dizibox_scraper', 'yesmovies_scraper', 'iomovies_scraper',
'streamtv_scraper', 'wmo_scraper', 'wso_scraper', 'watchfree_scraper', 'streamlord_scraper', 'yify_proxy',
'pftv_scraper', 'flixanity_scraper', 'cmz_scraper', 'movienight_scraper', 'alluc_scraper', 'watchonline_scraper',
'xmovies8_scraper', 'moviexk_scraper', 'mintmovies_scraper', 'pubfilm_scraper', 'rlssource_scraper', 'mehliz_scraper',
'couchtunerv1_scraper', 'ddlvalley_scraper', 'pelispedia_scraper', 'spacemov_scraper', 'putmv_scraper',
'watch8now_scraper', 'dizilab_scraper', 'dizimag_scraper', 'moviehut_scraper', 'serieswatch_scraper', 'dizist_scraper',
'dizigold_scraper', 'onlinemoviespro_scraper', 'emoviespro_scraper', 'one23movies_proxy', 'rlsbb_scraper',
'sezonlukdizi_scraper', 'movietube_scraper', 'putlocker_scraper', 'diziay_scraper', 'scenehdtv_scraper', 'pubfilmto_scraper',
'furk_scraper', 'hevcbluray_scraper', 'ninemovies_proxy', 'miradetodo_scraper', 'dizipas_scraper', 'xmovies8v2_scraper',
'moviesplanet_scraper', 'premiumize_scraper', 'tvonline_scraper', 'watchitvideos_scraper', 'movieblast_scraper',
'ddlseries_scraper', 'fmovie_scraper', 'seriescoco_scraper', 'veocube_scraper', 'piratejunkies_scraper', 'sit2play_scraper',
'watch5s_scraper', 'moviesub_scraper', 'watchepisodes_scraper', 'heydl_scraper', 'vkflix_scraper', 'bestmoviez_scraper',
'm4ufree_scraper', 'moviewatcher_scraper', 'vivoto_scraper', '2ddl_scraper', 'onlinedizi_scraper', 'moviehubs_scraper',
'premiumizev2_scraper', 'cinemamkv_scraper', 'dayt_scraper', 'moviego_scraper', 'treasureen_scraper', 'movieocean_proxy',
'rlsmovies_scraper', 'hdmoviefree_scraper', 'tvrush_scraper', 'snagfilms_scraper', 'scenedown_scraper', 'scenerls_scraper',
'tvshow_scraper', 'quikr_scraper', 'rlshd_scraper', 'tvhd_scraper', 'seehd_scraper', 'myddl_scraper', 'rmz_scraper',
'ol_scraper', 'real_scraper', 'movytvy_scraper', 'vumoo_scraper', 'vebup_scraper', 'mvgee_proxy']
from . import *
logger = log_utils.Logger.get_logger()
class ScraperVideo:
def __init__(self, video_type, title, year, trakt_id, season='', episode='', ep_title='', ep_airdate=''):
assert(video_type in (VIDEO_TYPES.__dict__[k] for k in VIDEO_TYPES.__dict__ if not k.startswith('__')))
self.video_type = video_type
if isinstance(title, unicode): self.title = title.encode('utf-8')
else: self.title = title
self.year = str(year)
self.season = season
self.episode = episode
if isinstance(ep_title, unicode): self.ep_title = ep_title.encode('utf-8')
else: self.ep_title = ep_title
self.trakt_id = trakt_id
self.ep_airdate = utils2.to_datetime(ep_airdate, "%Y-%m-%d").date() if ep_airdate else None
def __str__(self):
return '|%s|%s|%s|%s|%s|%s|%s|' % (self.video_type, self.title, self.year, self.season, self.episode, self.ep_title, self.ep_airdate)
def update_xml(xml, new_settings, cat_count):
new_settings.insert(0, '<category label="Scrapers %s">' % (cat_count))
new_settings.append(' </category>')
new_settings = '\n'.join(new_settings)
match = re.search('(<category label="Scrapers %s">.*?</category>)' % (cat_count), xml, re.DOTALL | re.I)
if match:
old_settings = match.group(1)
if old_settings != new_settings:
xml = xml.replace(old_settings, new_settings)
else:
logger.log('Unable to match category: %s' % (cat_count), log_utils.LOGWARNING)
return xml
def update_settings():
full_path = os.path.join(kodi.get_path(), 'resources', 'settings.xml')
try:
# open for append; skip update if it fails
with open(full_path, 'a') as f:
pass
except Exception as e:
logger.log('Dynamic settings update skipped: %s' % (e), log_utils.LOGWARNING)
else:
with open(full_path, 'r') as f:
xml = f.read()
new_settings = []
cat_count = 1
old_xml = xml
classes = scraper.Scraper.__class__.__subclasses__(scraper.Scraper) # @UndefinedVariable
classes += proxy.Proxy.__class__.__subclasses__(proxy.Proxy) # @UndefinedVariable
for cls in sorted(classes, key=lambda x: x.get_name().upper()):
if not cls.get_name() or cls.has_proxy(): continue
new_settings += cls.get_settings()
if len(new_settings) > 90:
xml = update_xml(xml, new_settings, cat_count)
new_settings = []
cat_count += 1
if new_settings:
xml = update_xml(xml, new_settings, cat_count)
if xml != old_xml:
with open(full_path, 'w') as f:
f.write(xml)
else:
logger.log('No Settings Update Needed', log_utils.LOGDEBUG)
def update_all_scrapers():
try: last_check = int(kodi.get_setting('last_list_check'))
except: last_check = 0
now = int(time.time())
list_url = kodi.get_setting('scraper_url')
scraper_password = kodi.get_setting('scraper_password')
list_path = os.path.join(kodi.translate_path(kodi.get_profile()), 'scraper_list.txt')
exists = os.path.exists(list_path)
if list_url and scraper_password and (not exists or (now - last_check) > 15 * 60):
_etag, scraper_list = utils2.get_and_decrypt(list_url, scraper_password)
if scraper_list:
try:
with open(list_path, 'w') as f:
f.write(scraper_list)
kodi.set_setting('last_list_check', str(now))
kodi.set_setting('scraper_last_update', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now)))
for line in scraper_list.split('\n'):
line = line.replace(' ', '')
if line:
scraper_url, filename = line.split(',')
if scraper_url.startswith('http'):
update_scraper(filename, scraper_url)
except Exception as e:
logger.log('Exception during scraper update: %s' % (e), log_utils.LOGWARNING)
def update_scraper(filename, scraper_url):
try:
if not filename: return
py_path = os.path.join(kodi.get_path(), 'scrapers', filename)
exists = os.path.exists(py_path)
scraper_password = kodi.get_setting('scraper_password')
if scraper_url and scraper_password:
old_lm = None
old_py = ''
if exists:
with open(py_path, 'r') as f:
old_py = f.read()
match = re.search('^#\s+Last-Modified:\s*(.*)', old_py)
if match:
old_lm = match.group(1).strip()
new_lm, new_py = utils2.get_and_decrypt(scraper_url, scraper_password, old_lm)
if new_py:
logger.log('%s path: %s, new_py: %s, match: %s' % (filename, py_path, bool(new_py), new_py == old_py), log_utils.LOGDEBUG)
if old_py != new_py:
with open(py_path, 'w') as f:
f.write('# Last-Modified: %s\n' % (new_lm))
f.write(new_py)
kodi.notify(msg=utils2.i18n('scraper_updated') + filename)
except Exception as e:
logger.log('Failure during %s scraper update: %s' % (filename, e), log_utils.LOGWARNING)
update_settings()
update_all_scrapers()
| mrquim/mrquimrepo | repo/plugin.video.salts/scrapers/__init__.py | Python | gpl-2.0 | 8,416 |
#!/usr/bin/env python
"""
Updates the dependency lists in makefile.deps for all object files produced
from sources in src and subfolders, so that changing a header file always
leads to the recompilation of all the files depending on this header.
"""
import os, re, fnmatch, util2, update_vs, update_vs2008
pjoin = os.path.join
DIRS = [
"src", pjoin("src", "utils"), pjoin("src", "mui"),
pjoin("src", "installer"), pjoin("src", "ifilter"), pjoin("src", "previewer"),
pjoin("src", "memtrace"), pjoin("src", "regress"), pjoin("src", "uia"),
pjoin("ext", "unarr"), pjoin("ext", "unarr", "common"),
] + [pjoin("ext", "unarr", format) for format in ["rar", "tar", "zip", "_7z"]]
INCLUDE_DIRS = DIRS + [
pjoin("mupdf", "include"),
pjoin("ext", "bzip2"),
pjoin("ext", "CHMlib", "src"),
pjoin("ext", "freetype2", "include"),
pjoin("ext", "jbig2dec"),
pjoin("ext", "libdjvu"),
pjoin("ext", "libjpeg-turbo"),
pjoin("ext", "libwebp"),
pjoin("ext", "lzma", "C"),
pjoin("ext", "openjpeg"),
pjoin("ext", "synctex"),
pjoin("ext", "zlib"),
]
OBJECT_DIRS = {
"src\\utils": "$(OU)",
"src\\ifilter": "$(ODLL)", "src\\previewer": "$(ODLL)",
"src\\mui": "$(OMUI)",
"src\\memtrace": "$(OM)",
"src\\uia": "$(OUIA)",
"ext\\unarr": "$(OUN)", "ext\\unarr\\common": "$(OUN)", "ext\\unarr\\rar": "$(OUN)", "ext\\unarr\\tar": "$(OUN)", "ext\\unarr\\zip": "$(OUN)", "ext\\unarr\\_7z": "$(OUN)"
} # default: "$(OS)"
MAKEFILE = "makefile.deps"
DEPENDENCIES_PER_LINE = 3
def prependPath(files, basefile=None):
result = []
include_dirs = INCLUDE_DIRS
if basefile:
include_dirs = [os.path.dirname(basefile)] + include_dirs
for file in files:
if file in ["string.h", "math.h"]:
continue # skip names of system headers which also exist in mupdf/include/mupdf/fitz
for dir in include_dirs:
path = os.path.normpath(pjoin(dir, file))
if os.path.exists(path):
result.append(path)
break
return result
def getObjectPath(file):
file = file.replace("/", "\\")
for (path, odir) in OBJECT_DIRS.items():
if file.startswith(path + "\\"):
return odir
return "$(OS)"
@util2.memoize
def extractIncludes(file):
content = open(file, "r").read()
content = content.replace("\r\n", "\n")
# filter out multi-line comments (could contain #include lines as examples)
content = re.sub(r'(?s)/\*.*?\*/', '/* */', content)
# try to filter out "#if 0 ... #endif" sections (hacky)
content = re.sub(r'(?sm)^#if 0$.*?^#endif$', '', content)
includes = re.findall(r'(?m)^#include ["<]([^">]+)[">]', content)
includes = prependPath(includes, file)
for inc in includes:
includes += extractIncludes(inc)
return util2.uniquify(includes)
def createDependencyList():
dependencies = {}
for dir in DIRS:
all_c_files = fnmatch.filter(os.listdir(dir), "*.c*")
for file in all_c_files:
file = pjoin(dir, file)
dependencies[file] = extractIncludes(file)
return dependencies
def flattenDependencyList(dependencies):
flatlist = []
for file in dependencies.keys():
if dependencies[file]:
opath = getObjectPath(file)
filename = os.path.splitext(os.path.split(file)[1])[0]
# TODO: normalizing paths already in prependPath makes getObjectPath fail under cygwin
deplist = sorted(dependencies[file], key=lambda s: str.lower(s.replace("/", "\\")))
for depgroup in util2.group(deplist, DEPENDENCIES_PER_LINE):
flatlist.append("%s\\%s.obj: $B\\%s" % (opath, filename, " $B\\".join(depgroup)))
return flatlist
def normalizePaths(paths):
return re.sub(r"( |\\)[^.\\\s]+\\..\\", r"\1", paths.replace("/", "\\"))
def injectDependencyList(flatlist):
flatlist = "\n".join(sorted(flatlist, key=str.lower))
flatlist = normalizePaths(flatlist)
content = "## Header-dependencies for src\* and src\*\*\n"
content += "### the list below is auto-generated by update_dependencies.py\n"
content += "B=$(BASEDIR)\n"
content += flatlist + "\n"
open(MAKEFILE, "wb").write(content.replace("\n", "\r\n"))
def main():
util2.chdir_top()
injectDependencyList(flattenDependencyList(createDependencyList()))
if __name__ == "__main__":
main()
update_vs.main()
update_vs2008.main()
| ibb-zimmers/betsynetpdf | sumatrapdf/scripts/update_dependencies.py | Python | gpl-3.0 | 4,222 |
# coding=utf-8
"""Tests for the keyword wizard."""
import unittest
from safe.definitions.constants import INASAFE_TEST
from safe.test.utilities import get_qgis_app
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app(qsetting=INASAFE_TEST)
import os
import shutil
import unittest
from datetime import datetime
from safe.common.utilities import temp_dir
from safe.definitions.constants import big_number, no_field
from safe.definitions.exposure import (exposure_land_cover, exposure_place,
exposure_population, exposure_structure)
from safe.definitions.exposure_classifications import (generic_place_classes,
generic_structure_classes)
from safe.definitions.extra_keywords import extra_keyword_earthquake_depth
from safe.definitions.fields import (aggregation_name_field,
exposure_type_field, female_count_field,
hazard_name_field, hazard_value_field,
population_count_field)
from safe.definitions.hazard import (hazard_cyclone, hazard_earthquake,
hazard_flood, hazard_volcano)
from safe.definitions.hazard_category import hazard_category_multiple_event
from safe.definitions.hazard_classifications import (cyclone_au_bom_hazard_classes,
earthquake_mmi_scale,
flood_hazard_classes,
volcano_hazard_classes)
from safe.definitions.layer_geometry import (layer_geometry_point,
layer_geometry_polygon,
layer_geometry_raster)
from safe.definitions.layer_modes import (layer_mode_classified,
layer_mode_continuous)
from safe.definitions.layer_purposes import (layer_purpose_aggregation,
layer_purpose_exposure,
layer_purpose_hazard)
from safe.definitions.units import (count_exposure_unit,
unit_kilometres_per_hour, unit_metres,
unit_mmi)
from safe.definitions.utilities import (default_classification_thresholds,
get_compulsory_fields)
from safe.gui.tools.wizard.wizard_dialog import WizardDialog
from safe.test.utilities import (clone_raster_layer, clone_shp_layer,
dict_values_sorted, load_test_vector_layer,
standard_data_path)
from safe.utilities.unicode import byteify
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "[email protected]"
__revision__ = '$Format:%H$'
# Some default values for testing
source = 'Source'
source_scale = 'Source Scale'
source_url = 'Source Url'
source_date = datetime.strptime('06-12-2015', '%d-%m-%Y')
source_license = 'Source License'
layer_title = 'Layer Title'
# noinspection PyTypeChecker
class TestKeywordWizard(unittest.TestCase):
"""Test the InaSAFE keyword wizard GUI."""
maxDiff = None
def tearDown(self):
"""Run after each test."""
# Remove the mess that we made on each test
try:
shutil.rmtree(temp_dir(sub_dir='test'))
except BaseException:
pass
def check_list(self, expected_list, list_widget):
"""Helper function to check that list_widget is equal to expected_list.
:param expected_list: List of expected values to be found.
:type expected_list: list
:param list_widget: List widget that wants to be checked.
:type expected_list: QListWidget
"""
real_list = []
for i in range(list_widget.count()):
real_list.append(list_widget.item(i).text())
self.assertEqual(expected_list, real_list)
def check_current_step(self, expected_step):
"""Helper function to check the current step is expected_step.
:param expected_step: The expected current step.
:type expected_step: WizardStep instance
"""
current_step = expected_step.parent.get_current_step()
message = 'Should be step %s but it got %s' % (
expected_step.__class__.__name__, current_step.__class__.__name__)
self.assertEqual(expected_step, current_step, message)
def check_current_text(self, expected_text, list_widget):
"""Check the current text in list widget is expected_text.
:param expected_text: The expected current step.
:type expected_text: str
:param list_widget: List widget that wants to be checked.
:type list_widget: QListWidget
"""
try:
selected_items = list_widget.selectedItems()
selected_texts = [item.text() for item in selected_items]
if isinstance(expected_text, str):
expected_text = [expected_text]
self.assertListEqual(expected_text, selected_texts)
except AttributeError:
options = [
list_widget.item(i).text()
for i in range(list_widget.count())
]
message = 'There is no %s in the available option %s' % (
expected_text, options)
self.assertFalse(True, message)
# noinspection PyUnresolvedReferences
@staticmethod
def select_from_list_widget(option, list_widget):
"""Helper function to select option from list_widget.
:param option: Option to be chosen.
:type option: str
:param list_widget: List widget that wants to be checked.
:type list_widget: QListWidget
"""
available_options = []
for i in range(list_widget.count()):
if list_widget.item(i).text() == option:
list_widget.setCurrentRow(i)
return
else:
available_options.append(list_widget.item(i).text())
message = (
'There is no %s in the list widget. The available options are '
'%s' % (option, available_options))
raise Exception(message)
def test_invalid_keyword_layer(self):
layer = clone_raster_layer(
name='invalid_keyword_xml',
include_keywords=True,
source_directory=standard_data_path('other'),
extension='.tif')
# check the environment first
self.assertIsNotNone(layer.dataProvider())
# Initialize dialog
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
# It shouldn't raise any exception although the xml is invalid
dialog.set_keywords_creation_mode(layer)
def test_hazard_without_inasafe_fields(self):
"""Test keyword wizard for layer without inasafe fields."""
# cloning layer that has no inasafe fields
layer = load_test_vector_layer(
'hazard', 'classified_generic_polygon.shp', clone=True)
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select earthquake
self.select_from_list_widget(
hazard_earthquake['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select earthquake
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select classified mode
self.select_from_list_widget(
layer_mode_classified['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select classified
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# select h_zone field
self.select_from_list_widget(
'h_zone',
dialog.step_kw_field.lstFields)
# Click next to select h_zone
dialog.pbnNext.click()
# Check if in multi classification step
self.check_current_step(dialog.step_kw_multi_classifications)
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
# Fill source form
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to extra keywords step
dialog.pbnNext.click()
# Check if in extra keywords step
self.check_current_step(dialog.step_kw_extra_keywords)
self.assertTrue(dialog.step_kw_extra_keywords.widgets_dict)
# Click next to finish extra keywords step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
# Fill title form
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to summary step
dialog.pbnNext.click()
# Check if in summary step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking keyword created
expected_keyword = {
'scale': source_scale,
'hazard_category': hazard_category_multiple_event['key'],
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'hazard': hazard_earthquake['key'],
'inasafe_fields':
{hazard_value_field['key']: 'h_zone'},
'value_maps': layer.keywords['value_maps'],
'date': source_date,
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_hazard['key'],
'layer_mode': layer_mode_classified['key']
}
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def test_aggregation_without_inasafe_fields(self):
"""Test keyword wizard for layer without inasafe fields."""
layer = load_test_vector_layer(
'aggregation', 'district_osm_jakarta.geojson', clone=True)
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Check aggregation
self.check_current_text(
layer_purpose_aggregation['name'],
dialog.step_kw_purpose.lstCategories)
# Click next
dialog.pbnNext.click()
# check if in step field
self.check_current_step(dialog.step_kw_field)
# Check aggregation
self.check_current_text(
layer.keywords['inasafe_fields']['aggregation_name_field'],
dialog.step_kw_field.lstFields)
# Click next
dialog.pbnNext.click()
# Check field mapping steps
self.check_current_step(dialog.step_kw_fields_mapping)
# Click next to continue
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
# Click next
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
# Click next
dialog.pbnNext.click()
# Check if in summary step
self.check_current_step(dialog.step_kw_summary)
# Click next
dialog.pbnNext.click()
def test_hazard_volcano_polygon_keyword(self):
"""Test keyword wizard for volcano hazard polygon."""
layer = clone_shp_layer(
name='volcano_krb',
include_keywords=False,
source_directory=standard_data_path('hazard'))
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select volcano
self.select_from_list_widget(
hazard_volcano['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select volcano
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple_event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select classified mode
self.select_from_list_widget(
layer_mode_classified['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select classified
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# select KRB field
self.select_from_list_widget('KRB', dialog.step_kw_field.lstFields)
# Click next to select KRB
dialog.pbnNext.click()
# Check if in multi classification step
self.check_current_step(dialog.step_kw_multi_classifications)
# Change combo box
dialog.step_kw_multi_classifications.exposure_combo_boxes[
0].setCurrentIndex(1)
# Click save
dialog.step_kw_multi_classifications.save_button.click()
# Click next to finish multi classifications step
dialog.pbnNext.click()
# select inasafe fields step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Get the parameter widget for hazard name
hazard_name_parameter_widget = dialog.step_kw_inasafe_fields.\
parameter_container.get_parameter_widget_by_guid(
hazard_name_field['key'])
# Check if it's set to no field at the beginning
self.assertEqual(
no_field, hazard_name_parameter_widget.get_parameter().value)
# Select volcano
hazard_name_parameter_widget.set_choice('volcano')
# Check if it's set to volcano
self.assertEqual(
'volcano', hazard_name_parameter_widget.get_parameter().value)
# Check if in InaSAFE field step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Click next to finish InaSAFE Field step and go to source step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in summary step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
expected_keyword = {
'scale': source_scale,
'hazard_category': hazard_category_multiple_event['key'],
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'hazard': hazard_volcano['key'],
'inasafe_fields':
{
hazard_value_field['key']: 'KRB',
hazard_name_field['key']: 'volcano',
},
'value_maps': {
exposure_land_cover['key']: {
volcano_hazard_classes['key']: {
'active': True,
'classes': {
'high': ['Kawasan Rawan Bencana III'],
'low': ['Kawasan Rawan Bencana I'],
'medium': ['Kawasan Rawan Bencana II']
}
}
}
},
'date': source_date,
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_hazard['key'],
'layer_mode': layer_mode_classified['key']
}
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def test_hazard_volcano_polygon_existing_keywords(self):
"""Test existing keyword for hazard volcano polygon."""
layer = load_test_vector_layer(
'hazard', 'volcano_krb.shp', clone=True)
default_classes = {
'high': ['Kawasan Rawan Bencana III'],
'low': ['Kawasan Rawan Bencana I'],
'medium': ['Kawasan Rawan Bencana II']
}
keywords = {
'hazard': hazard_volcano['key'],
'hazard_category': hazard_category_multiple_event['key'],
'inasafe_fields': {
hazard_name_field['key']: 'volcano',
hazard_value_field['key']: 'KRB'
},
'layer_geometry': layer_geometry_polygon['key'],
'layer_mode': layer_mode_classified['key'],
'layer_purpose': layer_purpose_hazard['key'],
'title': 'Volcano KRB',
'value_maps': {
exposure_land_cover['key']: {
volcano_hazard_classes['key']: {
'active': True,
'classes': default_classes
}
},
'population': {
'volcano_hazard_classes': {
'active': True,
'classes': default_classes
}
},
'road': {
'volcano_hazard_classes': {
'active': True,
'classes': default_classes
}
},
'structure': {
'volcano_hazard_classes': {
'active': True,
'classes': default_classes
}
}
}
}
layer.keywords = keywords
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer, keywords)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Check if hazard is selected
self.check_current_text(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# Check if volcano is selected
self.check_current_text(
hazard_volcano['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select volcano
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# Check if multiple event is selected
self.check_current_text(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# Check if classified is selected
self.check_current_text(
layer_mode_classified['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select classified
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# Check if KRB is selected
self.check_current_text('KRB', dialog.step_kw_field.lstFields)
# Click next to select KRB
dialog.pbnNext.click()
# Check if in select classification step
self.check_current_step(dialog.step_kw_multi_classifications)
# Click next to finish multi classifications step
dialog.pbnNext.click()
# select additional keywords / inasafe fields step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Check inasafe fields
parameters = dialog.step_kw_inasafe_fields. \
parameter_container.get_parameters(True)
# Get layer's inasafe_fields
inasafe_fields = layer.keywords.get('inasafe_fields')
self.assertIsNotNone(inasafe_fields)
for key, value in list(inasafe_fields.items()):
# Not check if it's hazard_class_field
if key == get_compulsory_fields(
layer_purpose_hazard['key'])['key']:
continue
# Check if existing key in parameters guid
self.assertIn(key, [p.guid for p in parameters])
# Iterate through all parameter to get parameter value
for parameter in parameters:
if parameter.guid == key:
# Check the value is the same
self.assertEqual(value, parameter.value)
break
for parameter in parameters:
# If not available is chosen, inasafe_fields shouldn't have it
if parameter.value == no_field:
self.assertNotIn(parameter.guid, list(inasafe_fields.keys()))
# If not available is not chosen, inasafe_fields should have it
else:
self.assertIn(parameter.guid, list(inasafe_fields.keys()))
# Click next to finish inasafe fields step and go to source step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
self.assertTrue(dialog.pbnNext.isEnabled())
self.assertEqual(dialog.step_kw_source.leSource.text(), '')
self.assertEqual(dialog.step_kw_source.leSource_url.text(), '')
self.assertFalse(dialog.step_kw_source.ckbSource_date.isChecked())
self.assertEqual(dialog.step_kw_source.leSource_scale.text(), '')
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
self.assertEqual(
'Volcano KRB', dialog.step_kw_title.leTitle.text())
self.assertTrue(dialog.pbnNext.isEnabled())
# Click finish
dialog.pbnNext.click()
self.assertDictEqual(
keywords['value_maps'], dialog.get_keywords()['value_maps'])
self.assertDictEqual(keywords, dialog.get_keywords())
def test_exposure_structure_polygon_keyword(self):
"""Test keyword wizard for exposure structure polygon."""
layer = clone_shp_layer(
name='buildings',
include_keywords=False,
source_directory=standard_data_path('exposure'))
self.assertIsNotNone(layer)
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.qsettings = None
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select exposure
self.select_from_list_widget(
layer_purpose_exposure['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select exposure
dialog.pbnNext.click()
# Check if in select exposure step
self.check_current_step(dialog.step_kw_subcategory)
# select structure
self.select_from_list_widget(
exposure_structure['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select structure
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select classified mode
self.select_from_list_widget(
layer_mode_classified['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select classified
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# select TYPE field
self.select_from_list_widget(
'TYPE', dialog.step_kw_field.lstFields)
# Click next to select TYPE
dialog.pbnNext.click()
# Check if in select classification step
self.check_current_step(dialog.step_kw_classification)
# select generic structure classes classification
self.select_from_list_widget(
generic_structure_classes['name'],
dialog.step_kw_classification.lstClassifications)
# Click next to select the classifications
dialog.pbnNext.click()
# Check if in classify step
self.check_current_step(dialog.step_kw_classify)
default_classes = generic_structure_classes['classes']
unassigned_values = [] # no need to check actually, not save in file
assigned_values = {
'residential': ['Residential'],
'education': ['School'],
'health': ['Clinic/Doctor'],
'transport': [],
'place of worship': ['Place of Worship - Islam'],
'government': ['Government'],
'commercial': ['Commercial', 'Industrial'],
'recreation': [],
'public facility': [],
'evacuation centre': [],
'other': []
}
dialog.step_kw_classify.populate_classified_values(
unassigned_values, assigned_values, default_classes)
# Click next to finish value mapping
dialog.pbnNext.click()
# Check if in InaSAFE field step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Click next to finish inasafe fields step and go to inasafe default
# field step
dialog.pbnNext.click()
# Check if in InaSAFE Default field step
# This step is disabled until we activate again value/rate fields.
# self.check_current_step(dialog.step_kw_default_inasafe_fields)
# Click next to finish InaSAFE Default Field step and go to source step
# dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
expected_keyword = {
'scale': source_scale,
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'exposure': exposure_structure['key'],
'inasafe_fields': {
exposure_type_field['key']: 'TYPE',
},
'value_map': dict(
(k,
v) for k,
v in list(
assigned_values.items()) if v),
'date': source_date,
'classification': generic_structure_classes['key'],
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_exposure['key'],
'layer_mode': layer_mode_classified['key']}
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def test_exposure_structure_polygon_existing_keywords(self):
"""Test existing keyword for exposure structure polygon."""
layer = load_test_vector_layer(
'exposure', 'buildings.shp', clone=True)
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Check if hazard is selected
self.check_current_text(
layer_purpose_exposure['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select exposure
dialog.pbnNext.click()
# Check if in select exposure step
self.check_current_step(dialog.step_kw_subcategory)
# Check if structure is selected
self.check_current_text(
exposure_structure['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select structure
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# Check if classified is selected
self.check_current_text(
layer_mode_classified['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select classified
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# Check if TYPE is selected
self.check_current_text('TYPE', dialog.step_kw_field.lstFields)
# Click next to select TYPE
dialog.pbnNext.click()
# Check if in select classification step
self.check_current_step(dialog.step_kw_classification)
# Check if generic structure classes is selected.
self.check_current_text(
generic_structure_classes['name'],
dialog.step_kw_classification.lstClassifications)
# Click next to select the classifications
dialog.pbnNext.click()
# Check if in classify step
self.check_current_step(dialog.step_kw_classify)
# Click next to finish value mapping
dialog.pbnNext.click()
# select additional keywords / inasafe fields step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Check inasafe fields
parameters = dialog.step_kw_inasafe_fields. \
parameter_container.get_parameters(True)
# Get layer's inasafe_fields
inasafe_fields = layer.keywords.get('inasafe_fields')
self.assertIsNotNone(inasafe_fields)
for key, value in list(inasafe_fields.items()):
# Not check if it's hazard_value_field
if key == get_compulsory_fields(
layer_purpose_exposure['key'])['key']:
continue
# Check if existing key in parameters guid
self.assertIn(key, [p.guid for p in parameters])
# Iterate through all parameter to get parameter value
for parameter in parameters:
if parameter.guid == key:
# Check the value is the same
self.assertEqual(value, parameter.value)
break
for parameter in parameters:
# If not available is chosen, inasafe_fields shouldn't have it
if parameter.value == no_field:
self.assertNotIn(parameter.guid, list(inasafe_fields.keys()))
# If not available is not chosen, inasafe_fields should have it
else:
self.assertIn(parameter.guid, list(inasafe_fields.keys()))
# Click next to finish inasafe fields step and go to inasafe default
# field step
dialog.pbnNext.click()
# Check if in InaSAFE Default field step
# This step is disabled until we activate again value/rate fields.
# self.check_current_step(dialog.step_kw_default_inasafe_fields)
# Click next to finish InaSAFE Default Field step and go to source step
# dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
self.assertTrue(dialog.pbnNext.isEnabled())
self.assertEqual(
dialog.step_kw_source.leSource.text(),
layer.keywords.get('source'))
self.assertEqual(dialog.step_kw_source.leSource_url.text(), '')
self.assertFalse(dialog.step_kw_source.ckbSource_date.isChecked())
self.assertEqual(dialog.step_kw_source.leSource_scale.text(), '')
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
self.assertEqual(
'Buildings', dialog.step_kw_title.leTitle.text())
self.assertTrue(dialog.pbnNext.isEnabled())
# Click finish
dialog.pbnNext.click()
self.assertDictEqual(
dict_values_sorted(layer.keywords['value_map']),
dict_values_sorted(dialog.get_keywords()['value_map']))
def test_aggregation_keyword(self):
"""Test Aggregation Keywords."""
layer = load_test_vector_layer(
'gisv4', 'aggregation', 'small_grid.geojson', clone_to_memory=True)
layer.keywords = {}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select aggregation
self.select_from_list_widget(
layer_purpose_aggregation['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select aggregation
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# select area_name field
area_name = 'area_name'
self.select_from_list_widget(
area_name, dialog.step_kw_field.lstFields)
# Click next to select area_name
dialog.pbnNext.click()
# Check field mapping steps
self.check_current_step(dialog.step_kw_fields_mapping)
# Click next to continue
dialog.pbnNext.click()
# select inasafe fields step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Check behaviour
self.check_radio_button_behaviour(
dialog.step_kw_default_inasafe_fields)
# Click next to finish inasafe fields step and go to source step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
expected_keyword = {
'inasafe_fields': {aggregation_name_field['key']: area_name},
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_aggregation['key'],
'title': layer_title
}
# Check the keywords
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(real_keywords),
dict_values_sorted(expected_keyword))
def test_aggregation_existing_keyword(self):
"""Test Keyword wizard for aggregation layer with keywords."""
layer = load_test_vector_layer(
'gisv4', 'aggregation', 'small_grid.geojson', clone_to_memory=True)
area_name = 'area_name'
expected_keyword = {
'inasafe_fields': {aggregation_name_field['key']: area_name},
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_aggregation['key'],
'title': layer_title
}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer, expected_keyword)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select aggregation
self.check_current_text(
layer_purpose_aggregation['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select aggregation
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# select area_name field
self.check_current_text(
area_name, dialog.step_kw_field.lstFields)
# Click next to select KRB
dialog.pbnNext.click()
# Check field mapping steps
self.check_current_step(dialog.step_kw_fields_mapping)
# Click next to continue
dialog.pbnNext.click()
# Check inasafe fields step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Check behaviour
self.check_radio_button_behaviour(
dialog.step_kw_default_inasafe_fields)
# Click next to finish inasafe fields step and go to source step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
# Check if the title is already filled
self.assertEqual(dialog.step_kw_title.leTitle.text(), layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Check the keywords
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(real_keywords),
dict_values_sorted(expected_keyword))
def test_exposure_population_polygon_keyword(self):
"""Test exposure population polygon keyword."""
layer = load_test_vector_layer(
'exposure', 'census.geojson', clone_to_memory=True)
layer.keywords = {}
self.assertIsNotNone(layer)
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select exposure
self.select_from_list_widget(
layer_purpose_exposure['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select exposure
dialog.pbnNext.click()
# Check if in select exposure step
self.check_current_step(dialog.step_kw_subcategory)
# select population
self.select_from_list_widget(
exposure_population['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select population
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# Select continuous
self.select_from_list_widget(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in select unit step
self.check_current_step(dialog.step_kw_unit)
# Select count
self.select_from_list_widget(
count_exposure_unit['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select count
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# select population field
population_field = 'population'
self.select_from_list_widget(
population_field, dialog.step_kw_field.lstFields)
# Click next to select population
dialog.pbnNext.click()
# Check field mapping steps
self.check_current_step(dialog.step_kw_fields_mapping)
# Click next to continue
dialog.pbnNext.click()
# Check if in InaSAFE field step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Click next to finish InaSAFE Field step and go to source step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
expected_keyword = {
'scale': source_scale,
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'exposure': exposure_population['key'],
'exposure_unit': count_exposure_unit['key'],
'inasafe_fields':
{
population_count_field['key']: ['population'],
},
'date': source_date,
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_exposure['key'],
'layer_mode': layer_mode_continuous['key']
}
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def test_exposure_population_polygon_existing_keyword(self):
"""Test existing exposure population polygon with keyword."""
layer = load_test_vector_layer(
'exposure', 'census.geojson', clone_to_memory=True)
expected_keyword = {
'scale': source_scale,
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'exposure': exposure_population['key'],
'exposure_unit': count_exposure_unit['key'],
'inasafe_fields':
{
# Dummy, select more than fields to show we can do it.
population_count_field['key']: ['population', 'id'],
},
'date': source_date,
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_exposure['key'],
'layer_mode': layer_mode_continuous['key']
}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer, expected_keyword)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Check if exposure is selected
self.select_from_list_widget(
layer_purpose_exposure['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select exposure
dialog.pbnNext.click()
# Check if in select exposure step
self.check_current_step(dialog.step_kw_subcategory)
# Check if population is selected
self.check_current_text(
exposure_population['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select population
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# Check if continuous is selected
self.check_current_text(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in select unit step
self.check_current_step(dialog.step_kw_unit)
# Check if count is selected
self.check_current_text(
count_exposure_unit['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select count
dialog.pbnNext.click()
# Check if in select unit step
self.check_current_step(dialog.step_kw_field)
# Check if population is selected
population_field = expected_keyword['inasafe_fields'][
population_count_field['key']]
self.check_current_text(
population_field, dialog.step_kw_field.lstFields)
# Click next to select population
dialog.pbnNext.click()
# Check field mapping steps
self.check_current_step(dialog.step_kw_fields_mapping)
# Click next to continue
dialog.pbnNext.click()
# Check if in InaSAFE field step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Click next to finish inasafe fields step and go to source step
# field step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
self.assertEqual(dialog.step_kw_source.leSource.text(), source)
self.assertEqual(
dialog.step_kw_source.leSource_scale.text(), source_scale)
self.assertEqual(
dialog.step_kw_source.ckbSource_date.isChecked(), True)
self.assertEqual(
dialog.step_kw_source.dtSource_date.dateTime(), source_date)
self.assertEqual(
dialog.step_kw_source.leSource_license.text(), source_license)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
self.assertEqual(dialog.step_kw_title.leTitle.text(), layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def test_exposure_population_raster(self):
"""Test keyword wizard for population raster."""
path = standard_data_path(
'exposure', 'people_allow_resampling_true.tif')
message = "Path %s is not found" % path
self.assertTrue(os.path.exists(path), message)
layer = clone_raster_layer(
name='people_allow_resampling_true',
extension='.tif',
include_keywords=False,
source_directory=standard_data_path('exposure'))
self.assertIsNotNone(layer)
layer.keywords = {}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select exposure
self.select_from_list_widget(
layer_purpose_exposure['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select exposure
dialog.pbnNext.click()
# Check if in select exposure step
self.check_current_step(dialog.step_kw_subcategory)
# select population
self.select_from_list_widget(
exposure_population['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select population
dialog.pbnNext.click()
# Check if in select band step
self.check_current_step(dialog.step_kw_band_selector)
# Click next to select Band 1 (default)
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# Check if continuous is selected
self.check_current_text(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in select unit step
self.check_current_step(dialog.step_kw_unit)
# Check if count is selected
self.check_current_text(
count_exposure_unit['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select count
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
real_keywords = dialog.get_keywords()
self.assertEqual(1, real_keywords['active_band'])
def test_clean_keyword_wizard(self):
"""Test for having the clean state when we run keyword wizard."""
layer = load_test_vector_layer(
'gisv4',
'exposure',
'population_multi_fields.geojson',
clone_to_memory=True)
self.assertIsNotNone(layer)
expected_keyword = {
'scale': source_scale,
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'exposure': exposure_population['key'],
'exposure_unit': count_exposure_unit['key'],
'inasafe_fields':
{
# Dummy, select more than fields to show we can do it.
population_count_field['key']: [
'right_hand',
'left_hand'
],
female_count_field['key']: [
'F_0_4',
'F_5_9',
'F_9_15',
'F_15_30',
'F_30_60',
'F_60_100'
]
},
'date': source_date,
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_exposure['key'],
'layer_mode': layer_mode_continuous['key']
}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer, expected_keyword)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Click next to select exposure
dialog.pbnNext.click()
# Click next to select population
dialog.pbnNext.click()
# Click next to select continuous
dialog.pbnNext.click()
# Click next to select count
dialog.pbnNext.click()
# Click next to select population
dialog.pbnNext.click()
# Click next to continue
dialog.pbnNext.click()
# Click next to finish inasafe fields step and go to source step
# field step
dialog.pbnNext.click()
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in summary step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
real_keywords = dialog.get_keywords()
self.assertDictEqual(byteify(real_keywords), byteify(expected_keyword))
# Now we try to use the same dialog object for assigning another layer.
# This is replication of #4200
hazard_layer = load_test_vector_layer(
'hazard', 'volcano_krb.shp', clone=True)
hazard_expected_keyword = {
'scale': source_scale,
'hazard_category': hazard_category_multiple_event['key'],
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'hazard': hazard_volcano['key'],
'inasafe_fields':
{
hazard_value_field['key']: 'KRB',
hazard_name_field['key']: 'volcano',
},
'value_maps': {
exposure_land_cover['key']: {
volcano_hazard_classes['key']: {
'active': True,
'classes': {
'high': ['Kawasan Rawan Bencana III'],
'low': ['Kawasan Rawan Bencana I'],
'medium': ['Kawasan Rawan Bencana II']
}
}
}
},
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_hazard['key'],
'layer_mode': layer_mode_classified['key']
}
# noinspection PyTypeChecker
dialog.set_keywords_creation_mode(
hazard_layer, hazard_expected_keyword)
# Click next to select hazard
dialog.pbnNext.click()
# Click next to select volcano
dialog.pbnNext.click()
# Click next to select multiple event
dialog.pbnNext.click()
# Click next to select classified
dialog.pbnNext.click()
# Click next to select KRB
dialog.pbnNext.click()
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Click next to finish InaSAFE Field step and go to source step
dialog.pbnNext.click()
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in summary step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
real_keywords = dialog.get_keywords()
self.assertDictEqual(hazard_expected_keyword, real_keywords)
def test_exposure_multi_fields_existing_keyword(self):
"""Test for exposure layer with multiple fields."""
layer = load_test_vector_layer(
'gisv4',
'exposure',
'population_multi_fields.geojson',
clone_to_memory=True)
self.assertIsNotNone(layer)
expected_keyword = {
'scale': source_scale,
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'exposure': exposure_population['key'],
'exposure_unit': count_exposure_unit['key'],
'inasafe_fields':
{
# Dummy, select more than fields to show we can do it.
population_count_field['key']: [
'right_hand',
'left_hand'
],
female_count_field['key']: [
'F_0_4',
'F_5_9',
'F_9_15',
'F_15_30',
'F_30_60',
'F_60_100'
]
},
'date': source_date,
'layer_geometry': layer_geometry_polygon['key'],
'layer_purpose': layer_purpose_exposure['key'],
'layer_mode': layer_mode_continuous['key']
}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer, expected_keyword)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Check if exposure is selected
self.select_from_list_widget(
layer_purpose_exposure['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select exposure
dialog.pbnNext.click()
# Check if in select exposure step
self.check_current_step(dialog.step_kw_subcategory)
# Check if population is selected
self.check_current_text(
exposure_population['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select population
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# Check if continuous is selected
self.check_current_text(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in select unit step
self.check_current_step(dialog.step_kw_unit)
# Check if count is selected
self.check_current_text(
count_exposure_unit['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select count
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# Check if population field is selected
population_field = expected_keyword['inasafe_fields'][
population_count_field['key']]
self.check_current_text(
population_field, dialog.step_kw_field.lstFields)
# Click next to select population field
dialog.pbnNext.click()
# Check field mapping steps
self.check_current_step(dialog.step_kw_fields_mapping)
# Click next to continue
dialog.pbnNext.click()
# Check if in InaSAFE field step
self.check_current_step(dialog.step_kw_inasafe_fields)
# Click next to finish inasafe fields step and go to source step
# field step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
self.assertEqual(dialog.step_kw_source.leSource.text(), source)
self.assertEqual(
dialog.step_kw_source.leSource_scale.text(), source_scale)
self.assertEqual(
dialog.step_kw_source.ckbSource_date.isChecked(), True)
self.assertEqual(
dialog.step_kw_source.dtSource_date.dateTime(), source_date)
self.assertEqual(
dialog.step_kw_source.leSource_license.text(), source_license)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
self.assertEqual(dialog.step_kw_title.leTitle.text(), layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
real_keywords = dialog.get_keywords()
self.assertDictEqual(byteify(real_keywords), byteify(expected_keyword))
def test_exposure_place_population(self):
"""Test for place with population exposure."""
layer = load_test_vector_layer(
'gisv4',
'exposure',
'places.geojson',
clone_to_memory=True)
self.assertIsNotNone(layer)
expected_keyword = {
'scale': source_scale,
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'exposure': exposure_place['key'],
'inasafe_fields':
{
exposure_type_field['key']: 'Type',
population_count_field['key']: 'Population',
},
'date': source_date,
'layer_geometry': layer_geometry_point['key'],
'layer_purpose': layer_purpose_exposure['key'],
'layer_mode': layer_mode_classified['key'],
'classification': generic_place_classes['key'],
}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer, expected_keyword)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Check if exposure is selected
self.select_from_list_widget(
layer_purpose_exposure['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select exposure
dialog.pbnNext.click()
# Check if in select exposure step
self.check_current_step(dialog.step_kw_subcategory)
# Check if place is selected
self.check_current_text(
exposure_place['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select place
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# Check if classified is selected
self.check_current_text(
layer_mode_classified['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select classified
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# Check if place type field is selected
place_type_field = expected_keyword['inasafe_fields'][
exposure_type_field['key']]
self.check_current_text(
place_type_field, dialog.step_kw_field.lstFields)
# Click next to select place type field
dialog.pbnNext.click()
# Check if in select classification step
self.check_current_step(dialog.step_kw_classification)
# Check if generic structure classes is selected.
self.check_current_text(
generic_place_classes['name'],
dialog.step_kw_classification.lstClassifications)
# Click next to select the classifications
dialog.pbnNext.click()
# Check if in classify step
self.check_current_step(dialog.step_kw_classify)
# Click next to finish value mapping
dialog.pbnNext.click()
# select additional keywords / inasafe fields step
self.check_current_step(dialog.step_kw_inasafe_fields)
current_inasafe_field = dialog.step_kw_inasafe_fields.\
get_inasafe_fields()
population_field = current_inasafe_field.get(
population_count_field['key'])
expected_population_field = expected_keyword['inasafe_fields'][
population_count_field['key']]
# Check if the population field is set.
self.assertEqual(population_field, expected_population_field)
# Click next to finish set the InaSAFE fields
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
self.assertEqual(dialog.step_kw_title.leTitle.text(), layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
real_keywords = dialog.get_keywords()
self.assertDictEqual(byteify(real_keywords), byteify(expected_keyword))
def test_classified_raster_keywords(self):
"""Test keyword wizard for classified raster."""
path = standard_data_path('hazard', 'classified_flood_20_20.asc')
message = "Path %s is not found" % path
self.assertTrue(os.path.exists(path), message)
layer = clone_raster_layer(
name='classified_flood_20_20',
extension='.asc',
include_keywords=False,
source_directory=standard_data_path('hazard'))
self.assertIsNotNone(layer)
layer.keywords = {}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select flood
self.select_from_list_widget(
hazard_flood['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select flood
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple_event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select band step
self.check_current_step(dialog.step_kw_band_selector)
# Click next to select Band 1 (default)
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select classified mode
self.select_from_list_widget(
layer_mode_classified['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select classified
dialog.pbnNext.click()
# Check if in multi classification step
self.check_current_step(dialog.step_kw_multi_classifications)
# Change combo box
dialog.step_kw_multi_classifications.exposure_combo_boxes[
0].setCurrentIndex(1)
# Click save
dialog.step_kw_multi_classifications.save_button.click()
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to extra keywords step
dialog.pbnNext.click()
self.check_current_step(dialog.step_kw_extra_keywords)
# Click next to finish extra keywords step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
expected_keyword = {
'active_band': 1,
'scale': source_scale,
'hazard_category': hazard_category_multiple_event['key'],
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'hazard': hazard_flood['key'],
'date': source_date,
'layer_geometry': layer_geometry_raster['key'],
'layer_purpose': layer_purpose_hazard['key'],
'layer_mode': layer_mode_classified['key'],
'value_maps': {
exposure_land_cover['key']: {
flood_hazard_classes['key']: {
'active': True,
'classes': {
'wet': [1.0, 2.0, 3.0]
}
}
}
}
}
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def test_classified_raster_existing_keywords(self):
"""Test keyword wizard for existing keywords classified raster."""
layer = clone_raster_layer(
name='classified_flood_20_20',
extension='.asc',
include_keywords=False,
source_directory=standard_data_path('hazard'))
self.assertIsNotNone(layer)
expected_keyword = {
'active_band': 1,
'scale': source_scale,
'hazard_category': hazard_category_multiple_event['key'],
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'hazard': hazard_flood['key'],
'value_maps': {},
'date': source_date,
'layer_geometry': layer_geometry_raster['key'],
'layer_purpose': layer_purpose_hazard['key'],
'layer_mode': layer_mode_classified['key']
}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer, expected_keyword)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Check if hazard is selected
self.check_current_text(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# Check if flood is selected
self.check_current_text(
hazard_flood['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select flood
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# Check if multiple event is selected
self.check_current_text(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select band step
self.check_current_step(dialog.step_kw_band_selector)
# Click next to select Band 1 (default)
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# Check if classified is selected
self.check_current_text(
layer_mode_classified['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select classified
dialog.pbnNext.click()
# Check if in select classification step
self.check_current_step(dialog.step_kw_multi_classifications)
# Change combo box
dialog.step_kw_multi_classifications.exposure_combo_boxes[
0].setCurrentIndex(1)
# Click save
dialog.step_kw_multi_classifications.save_button.click()
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
self.assertEqual(dialog.step_kw_source.leSource.text(), source)
self.assertEqual(
dialog.step_kw_source.leSource_scale.text(), source_scale)
self.assertEqual(
dialog.step_kw_source.ckbSource_date.isChecked(), True)
self.assertEqual(
dialog.step_kw_source.dtSource_date.dateTime(), source_date)
self.assertEqual(
dialog.step_kw_source.leSource_license.text(), source_license)
# Click next to finish source step and go to extra keywords step
dialog.pbnNext.click()
self.check_current_step(dialog.step_kw_extra_keywords)
# Click next to finish extra keywords step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
self.assertEqual(dialog.step_kw_title.leTitle.text(), layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def test_continuous_raster_keywords(self):
"""Test keyword wizard for continuous raster."""
path = standard_data_path('hazard', 'continuous_flood_20_20.asc')
message = "Path %s is not found" % path
self.assertTrue(os.path.exists(path), message)
layer = clone_raster_layer(
name='continuous_flood_20_20',
extension='.asc',
include_keywords=False,
source_directory=standard_data_path('hazard'))
self.assertIsNotNone(layer)
layer.keywords = {}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select flood
self.select_from_list_widget(
hazard_flood['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select flood
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple_event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select band step
self.check_current_step(dialog.step_kw_band_selector)
# Click next to select Band 1 (default)
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select continuous mode
self.select_from_list_widget(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in select unit step
self.check_current_step(dialog.step_kw_unit)
# select unit metres
self.select_from_list_widget(
unit_metres['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select unit metres
dialog.pbnNext.click()
# Check if in select multi classifications step
self.check_current_step(dialog.step_kw_multi_classifications)
# Change combo box
dialog.step_kw_multi_classifications.exposure_combo_boxes[
0].setCurrentIndex(1)
# Click save
dialog.step_kw_multi_classifications.save_button.click()
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to extra keywords step
dialog.pbnNext.click()
self.check_current_step(dialog.step_kw_extra_keywords)
# Click next to finish extra keywords step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
expected_keyword = {
'active_band': 1,
'continuous_hazard_unit': 'metres',
'date': source_date,
'hazard': hazard_flood['key'],
'hazard_category': hazard_category_multiple_event['key'],
'layer_geometry': layer_geometry_raster['key'],
'layer_mode': layer_mode_continuous['key'],
'layer_purpose': layer_purpose_hazard['key'],
'license': source_license,
'scale': source_scale,
'source': source,
'title': layer_title,
'url': source_url,
'thresholds': {
exposure_land_cover['key']: {
flood_hazard_classes['key']: {
'active': True,
'classes': {
'dry': [0.0, 1.0],
'wet': [1.0, big_number]
}
}
}
}
}
real_keywords = dialog.get_keywords()
self.assertDictEqual(byteify(real_keywords), byteify(expected_keyword))
def test_continuous_raster_existing_keywords(self):
"""Test keyword wizard for continuous raster with assigned keyword."""
path = standard_data_path('hazard', 'continuous_flood_20_20.asc')
message = "Path %s is not found" % path
self.assertTrue(os.path.exists(path), message)
layer = clone_raster_layer(
name='continuous_flood_20_20',
extension='.asc',
include_keywords=False,
source_directory=standard_data_path('hazard'))
self.assertIsNotNone(layer)
original_keywords = {
'active_band': 1,
'continuous_hazard_unit': 'metres',
'date': source_date,
'hazard': hazard_flood['key'],
'hazard_category': hazard_category_multiple_event['key'],
'layer_geometry': layer_geometry_raster['key'],
'layer_mode': layer_mode_continuous['key'],
'layer_purpose': layer_purpose_hazard['key'],
'license': source_license,
'scale': source_scale,
'source': source,
'thresholds': {
exposure_land_cover['key']: {
flood_hazard_classes['key']: {
'classes': {
'dry': [0, 1],
'wet': [1, 9999999999]
},
'active': True
}
},
},
'title': layer_title,
'url': source_url,
}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer, original_keywords)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select flood
self.select_from_list_widget(
hazard_flood['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select flood
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple_event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select band step
self.check_current_step(dialog.step_kw_band_selector)
# Click next to select Band 1 (default)
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select continuous mode
self.select_from_list_widget(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in select unit step
self.check_current_step(dialog.step_kw_unit)
# select unit metres
self.select_from_list_widget(
unit_metres['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select unit metres
dialog.pbnNext.click()
# Check if in select multi classifications step
self.check_current_step(dialog.step_kw_multi_classifications)
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to extra keywords step
dialog.pbnNext.click()
self.check_current_step(dialog.step_kw_extra_keywords)
# Click next to finish extra keywords step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
real_keywords = dialog.get_keywords()
self.assertDictEqual(
byteify(real_keywords), byteify(original_keywords))
def test_continuous_vector(self):
"""Test continuous vector for keyword wizard."""
layer = load_test_vector_layer(
'hazard', 'continuous_vector.geojson', clone_to_memory=True)
layer.keywords = {}
self.assertIsNotNone(layer)
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'],
dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select flood
self.select_from_list_widget(
hazard_flood['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select population
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple_event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# Select continuous
self.select_from_list_widget(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in select unit step
self.check_current_step(dialog.step_kw_unit)
# Select metres
self.select_from_list_widget(
unit_metres['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select metres
dialog.pbnNext.click()
# Check if in select field step
self.check_current_step(dialog.step_kw_field)
# select population field
depth_field = 'depth'
self.select_from_list_widget(
depth_field, dialog.step_kw_field.lstFields)
# Click next to select depth
dialog.pbnNext.click()
# Check if in multi classification step
self.check_current_step(dialog.step_kw_multi_classifications)
# Change combo box
dialog.step_kw_multi_classifications.exposure_combo_boxes[
0].setCurrentIndex(1)
# Click save
dialog.step_kw_multi_classifications.save_button.click()
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to extra keywords step
dialog.pbnNext.click()
self.check_current_step(dialog.step_kw_extra_keywords)
# Click next to finish extra keywords step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
expected_keyword = {
'continuous_hazard_unit': unit_metres['key'],
'date': source_date,
'hazard': hazard_flood['key'],
'hazard_category': hazard_category_multiple_event['key'],
'inasafe_fields': {hazard_value_field['key']: depth_field},
'layer_geometry': layer_geometry_polygon['key'],
'layer_mode': layer_mode_continuous['key'],
'layer_purpose': layer_purpose_hazard['key'],
'license': source_license,
'scale': source_scale,
'source': source,
'thresholds': {
exposure_land_cover['key']: {
flood_hazard_classes['key']: {
'classes': {
'dry': [0, 1],
'wet': [1, big_number]
},
'active': True
}
},
},
'title': layer_title,
'url': source_url
}
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
# @unittest.skip(
# 'This test is hanging for a unknown reason since a few times.')
def test_auto_select_one_item(self):
"""Test auto select if there is only one item in a list."""
layer = clone_shp_layer(
name='buildings',
include_keywords=True,
source_directory=standard_data_path('exposure'))
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
dialog.pbnNext.click() # choose exposure
self.assertEqual(
dialog.step_kw_subcategory.lstSubcategories.currentRow(), 2)
num_item = dialog.step_kw_subcategory.lstSubcategories.count()
dialog.close()
self.assertTrue(num_item == 3)
def test_earthquake_raster(self):
"""Test for Earthquake raster keyword wizard."""
path = standard_data_path('hazard', 'earthquake.tif')
message = "Path %s is not found" % path
self.assertTrue(os.path.exists(path), message)
layer = clone_raster_layer(
name='earthquake',
extension='.tif',
include_keywords=False,
source_directory=standard_data_path('hazard'))
self.assertIsNotNone(layer)
layer.keywords = {}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select EQ
self.select_from_list_widget(
hazard_earthquake['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select EQ
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple_event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select band step
self.check_current_step(dialog.step_kw_band_selector)
# Click next to select Band 1 (default)
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select continuous mode
self.select_from_list_widget(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in unit step
self.check_current_step(dialog.step_kw_unit)
# select MMI
self.select_from_list_widget(
unit_mmi['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select MMI
dialog.pbnNext.click()
# Check if in multi classification step
self.check_current_step(dialog.step_kw_multi_classifications)
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to extra keywords step
dialog.pbnNext.click()
# Check if in extra keywords step
self.check_current_step(dialog.step_kw_extra_keywords)
self.assertTrue(dialog.step_kw_extra_keywords.widgets_dict)
# Click next to finish extra keywords step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
expected_keyword = {
'active_band': 1,
'continuous_hazard_unit': unit_mmi['key'],
'scale': source_scale,
'hazard_category': hazard_category_multiple_event['key'],
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'hazard': hazard_earthquake['key'],
'date': source_date,
'layer_geometry': layer_geometry_raster['key'],
'layer_purpose': layer_purpose_hazard['key'],
'layer_mode': layer_mode_continuous['key'],
'thresholds': {
exposure_population['key']: {
earthquake_mmi_scale['key']: {
'active': True,
'classes': default_classification_thresholds(
earthquake_mmi_scale)
}
}
}
}
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def test_earthquake_raster_dirty_keywords(self):
"""Test for Earthquake raster keyword wizard."""
path = standard_data_path('hazard', 'earthquake.tif')
message = "Path %s is not found" % path
self.assertTrue(os.path.exists(path), message)
layer = clone_raster_layer(
name='earthquake',
extension='.tif',
include_keywords=False,
source_directory=standard_data_path('hazard'))
self.assertIsNotNone(layer)
expected_keyword = {
'continuous_hazard_unit': unit_mmi['key'],
'scale': source_scale,
'hazard_category': hazard_category_multiple_event['key'],
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'hazard': hazard_earthquake['key'],
'date': source_date,
'layer_geometry': layer_geometry_raster['key'],
'layer_purpose': layer_purpose_hazard['key'],
'layer_mode': layer_mode_continuous['key'],
'thresholds': {
exposure_population['key']: {
earthquake_mmi_scale['key']: {
'active': True,
'classes': default_classification_thresholds(
earthquake_mmi_scale)
}
},
exposure_land_cover['key']: {
earthquake_mmi_scale['key']: {
'active': True,
'classes': default_classification_thresholds(
earthquake_mmi_scale)
}
}
},
'extra_keywords': {
extra_keyword_earthquake_depth['key']: 10
}
}
layer.keywords = expected_keyword
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer, expected_keyword)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select EQ
self.select_from_list_widget(
hazard_earthquake['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select EQ
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple_event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select band step
self.check_current_step(dialog.step_kw_band_selector)
# Click next to select Band 1 (default)
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select continuous mode
self.select_from_list_widget(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in unit step
self.check_current_step(dialog.step_kw_unit)
# select MMI
self.select_from_list_widget(
unit_mmi['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select MMI
dialog.pbnNext.click()
# Check if in multi classification step
self.check_current_step(dialog.step_kw_multi_classifications)
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to extra keywords step
dialog.pbnNext.click()
# Check if in extra keywords step
self.check_current_step(dialog.step_kw_extra_keywords)
self.assertTrue(dialog.step_kw_extra_keywords.widgets_dict)
# Click next to finish extra keywords step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
real_keywords = dialog.get_keywords()
# Check if classification for land cover is not exist anymore #4214
self.assertNotIn(
exposure_land_cover['key'],
list(real_keywords['thresholds'].keys())
)
# Check if the extra keywords remain
extra_keywords = real_keywords['extra_keywords']
self.assertDictEqual(
extra_keywords, expected_keyword['extra_keywords'])
def test_cyclone_raster(self):
"""Test for cyclone raster keyword wizard when we have many units."""
path = standard_data_path('gisv4', 'hazard', 'cyclone_AUBOM_km_h.asc')
message = "Path %s is not found" % path
self.assertTrue(os.path.exists(path), message)
layer = clone_raster_layer(
name='cyclone_AUBOM_km_h',
extension='.asc',
include_keywords=False,
source_directory=standard_data_path('gisv4', 'hazard'))
self.assertIsNotNone(layer)
layer.keywords = {}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select cyclone
self.select_from_list_widget(
hazard_cyclone['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select EQ
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple_event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select band step
self.check_current_step(dialog.step_kw_band_selector)
# Click next to select Band 1 (default)
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select continuous mode
self.select_from_list_widget(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in unit step
self.check_current_step(dialog.step_kw_unit)
# select MMI
self.select_from_list_widget(
unit_kilometres_per_hour['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select MMI
dialog.pbnNext.click()
# Check if in select multi classifications step
self.check_current_step(dialog.step_kw_multi_classifications)
# Change combo box
dialog.step_kw_multi_classifications.exposure_combo_boxes[
0].setCurrentIndex(1)
# Click save
dialog.step_kw_multi_classifications.save_button.click()
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
expected_keyword = {
'active_band': 1,
'continuous_hazard_unit': unit_kilometres_per_hour['key'],
'scale': source_scale,
'hazard_category': hazard_category_multiple_event['key'],
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'hazard': hazard_cyclone['key'],
'date': source_date,
'layer_geometry': layer_geometry_raster['key'],
'layer_purpose': layer_purpose_hazard['key'],
'layer_mode': layer_mode_continuous['key'],
'thresholds': {
exposure_land_cover['key']: {
cyclone_au_bom_hazard_classes['key']: {
'active': True,
'classes': default_classification_thresholds(
cyclone_au_bom_hazard_classes,
unit_kilometres_per_hour['key'])
}
}
}
}
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def test_earthquake_raster_invalid_key(self):
"""Test for Earthquake raster keyword wizard."""
path = standard_data_path('hazard', 'earthquake.tif')
message = "Path %s is not found" % path
self.assertTrue(os.path.exists(path), message)
layer = clone_raster_layer(
name='earthquake',
extension='.tif',
include_keywords=False,
source_directory=standard_data_path('hazard'))
self.assertIsNotNone(layer)
layer.keywords = {
'thresholds': {
exposure_structure['key']: {
'dummy': {
'active': True,
'classes': default_classification_thresholds(
earthquake_mmi_scale)
}
}
}
}
# noinspection PyTypeChecker
dialog = WizardDialog(iface=IFACE)
dialog.set_keywords_creation_mode(layer)
# Check if in select purpose step
self.check_current_step(dialog.step_kw_purpose)
# Select hazard
self.select_from_list_widget(
layer_purpose_hazard['name'], dialog.step_kw_purpose.lstCategories)
# Click next to select hazard
dialog.pbnNext.click()
# Check if in select hazard step
self.check_current_step(dialog.step_kw_subcategory)
# select EQ
self.select_from_list_widget(
hazard_earthquake['name'],
dialog.step_kw_subcategory.lstSubcategories)
# Click next to select EQ
dialog.pbnNext.click()
# Check if in select hazard category step
self.check_current_step(dialog.step_kw_hazard_category)
# select multiple_event
self.select_from_list_widget(
hazard_category_multiple_event['name'],
dialog.step_kw_hazard_category.lstHazardCategories)
# Click next to select multiple event
dialog.pbnNext.click()
# Check if in select band step
self.check_current_step(dialog.step_kw_band_selector)
# Click next to select Band 1 (default)
dialog.pbnNext.click()
# Check if in select layer mode step
self.check_current_step(dialog.step_kw_layermode)
# select continuous mode
self.select_from_list_widget(
layer_mode_continuous['name'],
dialog.step_kw_layermode.lstLayerModes)
# Click next to select continuous
dialog.pbnNext.click()
# Check if in unit step
self.check_current_step(dialog.step_kw_unit)
# select MMI
self.select_from_list_widget(
unit_mmi['name'],
dialog.step_kw_unit.lstUnits)
# Click next to select MMI
dialog.pbnNext.click()
# Check if in multi classification step
self.check_current_step(dialog.step_kw_multi_classifications)
# Click next to finish multi classifications step
dialog.pbnNext.click()
# Check if in source step
self.check_current_step(dialog.step_kw_source)
dialog.step_kw_source.leSource.setText(source)
dialog.step_kw_source.leSource_scale.setText(source_scale)
dialog.step_kw_source.leSource_url.setText(source_url)
dialog.step_kw_source.ckbSource_date.setChecked(True)
dialog.step_kw_source.dtSource_date.setDateTime(source_date)
dialog.step_kw_source.leSource_license.setText(source_license)
# Click next to finish source step and go to extra keywords step
dialog.pbnNext.click()
# Check if in extra keywords step
self.check_current_step(dialog.step_kw_extra_keywords)
self.assertTrue(dialog.step_kw_extra_keywords.widgets_dict)
# Click next to finish extra keywords step and go to title step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_title)
dialog.step_kw_title.leTitle.setText(layer_title)
# Click next to finish title step and go to kw summary step
dialog.pbnNext.click()
# Check if in title step
self.check_current_step(dialog.step_kw_summary)
# Click finish
dialog.pbnNext.click()
# Checking Keyword Created
expected_keyword = {
'active_band': 1,
'continuous_hazard_unit': unit_mmi['key'],
'scale': source_scale,
'hazard_category': hazard_category_multiple_event['key'],
'license': source_license,
'source': source,
'url': source_url,
'title': layer_title,
'hazard': hazard_earthquake['key'],
'date': source_date,
'layer_geometry': layer_geometry_raster['key'],
'layer_purpose': layer_purpose_hazard['key'],
'layer_mode': layer_mode_continuous['key'],
'thresholds': {
exposure_population['key']: {
earthquake_mmi_scale['key']: {
'active': True,
'classes': default_classification_thresholds(
earthquake_mmi_scale)
}
}
}
}
real_keywords = dialog.get_keywords()
self.assertDictEqual(dict_values_sorted(
real_keywords), dict_values_sorted(expected_keyword))
def check_radio_button_behaviour(self, inasafe_default_dialog):
"""Test radio button behaviour so they are disabled when user set the
ratio field and enabled when there is no field selected.
"""
# Get the parameter container from dialog.
parameter_container = (
inasafe_default_dialog.parameter_container.get_parameter_widgets())
# Check every parameter widgets on the container.
for parameter_widget in parameter_container:
parameter_widget = parameter_widget.widget()
# Locate the 'Do not report' radio button.
dont_use_button = (
parameter_widget.default_input_button_group.button(
len(parameter_widget._parameter.default_values) - 2))
# 'Do not report' button should be selected since the default
# selected input is 'No Field'.
self.assertTrue(dont_use_button.isChecked())
# Select ratio field on input.
current_index = parameter_widget.input.currentIndex()
parameter_widget.input.setCurrentIndex(current_index + 1)
self.assertFalse(dont_use_button.isChecked())
parameter_widget.input.setCurrentIndex(current_index)
self.assertTrue(dont_use_button.isChecked())
if __name__ == '__main__':
suite = unittest.makeSuite(TestKeywordWizard)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| lucernae/inasafe | safe/gui/tools/wizard/test/test_keyword_wizard.py | Python | gpl-3.0 | 116,135 |
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.common._collections_compat import Container
from ansible.playbook.block import Block
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
from ansible.playbook.role import hash_params
class TestHashParams(unittest.TestCase):
def test(self):
params = {'foo': 'bar'}
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def _assert_hashable(self, res):
a_dict = {}
try:
a_dict[res] = res
except TypeError as e:
self.fail('%s is not hashable: %s' % (res, e))
def _assert_set(self, res):
self.assertIsInstance(res, frozenset)
def test_dict_tuple(self):
params = {'foo': (1, 'bar',)}
res = hash_params(params)
self._assert_set(res)
def test_tuple(self):
params = (1, None, 'foo')
res = hash_params(params)
self._assert_hashable(res)
def test_tuple_dict(self):
params = ({'foo': 'bar'}, 37)
res = hash_params(params)
self._assert_hashable(res)
def test_list(self):
params = ['foo', 'bar', 1, 37, None]
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def test_dict_with_list_value(self):
params = {'foo': [1, 4, 'bar']}
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def test_empty_set(self):
params = set([])
res = hash_params(params)
self._assert_hashable(res)
self._assert_set(res)
def test_generator(self):
def my_generator():
for i in ['a', 1, None, {}]:
yield i
params = my_generator()
res = hash_params(params)
self._assert_hashable(res)
def test_container_but_not_iterable(self):
# This is a Container that is not iterable, which is unlikely but...
class MyContainer(Container):
def __init__(self, some_thing):
self.data = []
self.data.append(some_thing)
def __contains__(self, item):
return item in self.data
def __hash__(self):
return hash(self.data)
def __len__(self):
return len(self.data)
def __call__(self):
return False
foo = MyContainer('foo bar')
params = foo
self.assertRaises(TypeError, hash_params, params)
def test_param_dict_dupe_values(self):
params1 = {'foo': False}
params2 = {'bar': False}
res1 = hash_params(params1)
res2 = hash_params(params2)
hash1 = hash(res1)
hash2 = hash(res2)
self.assertNotEqual(res1, res2)
self.assertNotEqual(hash1, hash2)
def test_param_dupe(self):
params1 = {
# 'from_files': {},
'tags': [],
u'testvalue': False,
u'testvalue2': True,
# 'when': []
}
params2 = {
# 'from_files': {},
'tags': [],
u'testvalue': True,
u'testvalue2': False,
# 'when': []
}
res1 = hash_params(params1)
res2 = hash_params(params2)
self.assertNotEqual(hash(res1), hash(res2))
self.assertNotEqual(res1, res2)
foo = {}
foo[res1] = 'params1'
foo[res2] = 'params2'
self.assertEqual(len(foo), 2)
del foo[res2]
self.assertEqual(len(foo), 1)
for key in foo:
self.assertTrue(key in foo)
self.assertIn(key, foo)
class TestRole(unittest.TestCase):
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_tasks(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_tasks/tasks/main.yml": """
- shell: echo 'hello world'
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(str(r), 'foo_tasks')
self.assertEqual(len(r._task_blocks), 1)
assert isinstance(r._task_blocks[0], Block)
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_tasks_dir_vs_file(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_tasks/tasks/custom_main/foo.yml": """
- command: bar
""",
"/etc/ansible/roles/foo_tasks/tasks/custom_main.yml": """
- command: baz
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play, from_files=dict(tasks='custom_main'))
self.assertEqual(r._task_blocks[0]._ds[0]['command'], 'baz')
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_handlers(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_handlers/handlers/main.yml": """
- name: test handler
shell: echo 'hello world'
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_handlers', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(len(r._handler_blocks), 1)
assert isinstance(r._handler_blocks[0], Block)
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/defaults/main.yml": """
foo: bar
""",
"/etc/ansible/roles/foo_vars/vars/main.yml": """
foo: bam
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._default_vars, dict(foo='bar'))
self.assertEqual(r._role_vars, dict(foo='bam'))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars_dirs(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/defaults/main/foo.yml": """
foo: bar
""",
"/etc/ansible/roles/foo_vars/vars/main/bar.yml": """
foo: bam
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._default_vars, dict(foo='bar'))
self.assertEqual(r._role_vars, dict(foo='bam'))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars_nested_dirs(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/defaults/main/foo/bar.yml": """
foo: bar
""",
"/etc/ansible/roles/foo_vars/vars/main/bar/foo.yml": """
foo: bam
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._default_vars, dict(foo='bar'))
self.assertEqual(r._role_vars, dict(foo='bam'))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars_nested_dirs_combined(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/defaults/main/foo/bar.yml": """
foo: bar
a: 1
""",
"/etc/ansible/roles/foo_vars/defaults/main/bar/foo.yml": """
foo: bam
b: 2
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._default_vars, dict(foo='bar', a=1, b=2))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars_dir_vs_file(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/vars/main/foo.yml": """
foo: bar
""",
"/etc/ansible/roles/foo_vars/vars/main.yml": """
foo: bam
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._role_vars, dict(foo='bam'))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_metadata(self):
fake_loader = DictDataLoader({
'/etc/ansible/roles/foo_metadata/meta/main.yml': """
allow_duplicates: true
dependencies:
- bar_metadata
galaxy_info:
a: 1
b: 2
c: 3
""",
'/etc/ansible/roles/bar_metadata/meta/main.yml': """
dependencies:
- baz_metadata
""",
'/etc/ansible/roles/baz_metadata/meta/main.yml': """
dependencies:
- bam_metadata
""",
'/etc/ansible/roles/bam_metadata/meta/main.yml': """
dependencies: []
""",
'/etc/ansible/roles/bad1_metadata/meta/main.yml': """
1
""",
'/etc/ansible/roles/bad2_metadata/meta/main.yml': """
foo: bar
""",
'/etc/ansible/roles/recursive1_metadata/meta/main.yml': """
dependencies: ['recursive2_metadata']
""",
'/etc/ansible/roles/recursive2_metadata/meta/main.yml': """
dependencies: ['recursive1_metadata']
""",
})
mock_play = MagicMock()
mock_play.collections = None
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_metadata', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
role_deps = r.get_direct_dependencies()
self.assertEqual(len(role_deps), 1)
self.assertEqual(type(role_deps[0]), Role)
self.assertEqual(len(role_deps[0].get_parents()), 1)
self.assertEqual(role_deps[0].get_parents()[0], r)
self.assertEqual(r._metadata.allow_duplicates, True)
self.assertEqual(r._metadata.galaxy_info, dict(a=1, b=2, c=3))
all_deps = r.get_all_dependencies()
self.assertEqual(len(all_deps), 3)
self.assertEqual(all_deps[0].get_name(), 'bam_metadata')
self.assertEqual(all_deps[1].get_name(), 'baz_metadata')
self.assertEqual(all_deps[2].get_name(), 'bar_metadata')
i = RoleInclude.load('bad1_metadata', play=mock_play, loader=fake_loader)
self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
i = RoleInclude.load('bad2_metadata', play=mock_play, loader=fake_loader)
self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
# TODO: re-enable this test once Ansible has proper role dep cycle detection
# that doesn't rely on stack overflows being recoverable (as they aren't in Py3.7+)
# see https://github.com/ansible/ansible/issues/61527
# i = RoleInclude.load('recursive1_metadata', play=mock_play, loader=fake_loader)
# self.assertRaises(AnsibleError, Role.load, i, play=mock_play)
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_complex(self):
# FIXME: add tests for the more complex uses of
# params and tags/when statements
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_complex/tasks/main.yml": """
- shell: echo 'hello world'
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load(dict(role='foo_complex'), play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r.get_name(), "foo_complex")
| ganeshrn/ansible | test/units/playbook/role/test_role.py | Python | gpl-3.0 | 13,819 |
from mod_pywebsocket import msgutil
def web_socket_do_extra_handshake(request):
# must set request.ws_protocol to the selected version from ws_requested_protocols
request.ws_protocol = request.ws_requested_protocols[0]
if (request.ws_protocol == 'error'):
raise ValueError('Error')
pass
def web_socket_transfer_data(request):
while True:
line = msgutil.receive_message(request)
if line == 'protocol':
msgutil.send_message(request, request.ws_protocol)
continue
if line == 'resource':
msgutil.send_message(request, request.ws_resource)
continue
if line == 'origin':
msgutil.send_message(request, request.ws_origin)
continue
msgutil.send_message(request, line)
if line == 'end':
return
| Yukarumya/Yukarum-Redfoxes | dom/base/test/file_websocket_basic_wsh.py | Python | mpl-2.0 | 773 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account Cut-off Prepaid module for OpenERP
# Copyright (C) 2013 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Cut-off Prepaid',
'version': '8.0.0.1.0',
'category': 'Accounting & Finance',
'license': 'AGPL-3',
'summary': 'Prepaid Expense, Prepaid Revenue',
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com',
'depends': ['account_cutoff_base'],
'data': [
'company_view.xml',
'product_view.xml',
'account_invoice_view.xml',
'account_view.xml',
'account_cutoff_view.xml',
],
'demo': ['product_demo.xml'],
'images': [
'images/prepaid_revenue_draft.jpg',
'images/prepaid_revenue_journal_entry.jpg',
'images/prepaid_revenue_done.jpg',
],
'installable': True,
'active': False,
'application': True,
}
| xpansa/account-closing | account_cutoff_prepaid/__openerp__.py | Python | agpl-3.0 | 1,816 |
# -*- Mode: Python -*-
#
# Author: Sam Rushing <[email protected]>
# Copyright 1996-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID = '$Id: redirecting_handler.py,v 1.4 2002/03/20 17:37:48 amk Exp $'
import re
import counter
class redirecting_handler:
def __init__ (self, pattern, redirect, regex_flag=re.IGNORECASE):
self.pattern = pattern
self.redirect = redirect
self.patreg = re.compile (pattern, regex_flag)
self.hits = counter.counter()
def match (self, request):
m = self.patreg.match (request.uri)
return (m and (m.end() == len(request.uri)))
def handle_request (self, request):
self.hits.increment()
m = self.patreg.match (request.uri)
part = m.group(1)
request['Location'] = self.redirect % part
request.error (302) # moved temporarily
def __repr__ (self):
return '<Redirecting Handler at %08x [%s => %s]>' % (
id(self),
repr(self.pattern),
repr(self.redirect)
)
def status (self):
import producers
return producers.simple_producer (
'<li> Redirecting Handler %s => %s <b>Hits</b>: %s' % (
self.pattern, self.redirect, self.hits
)
)
| XiaoMi/minos | supervisor/supervisor/medusa/redirecting_handler.py | Python | apache-2.0 | 1,396 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import namedtuple
from textwrap import dedent
from pants.backend.jvm.tasks.jvm_compile.zinc.zinc_compile import ZincCompile
from pants.base.build_environment import get_buildroot
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_mkdir, safe_open, safe_rmtree
from pants_test.backend.jvm.tasks.jvm_compile.base_compile_integration_test import BaseCompileIT
class Compile(namedtuple('Compile', ['srcfiles', 'config', 'artifact_count'])):
pass
class CacheCompileIntegrationTest(BaseCompileIT):
def run_compile(self, target_spec, config, workdir):
args = ['compile', target_spec]
pants_run = self.run_pants_with_workdir(args, workdir, config)
self.assert_success(pants_run)
def create_file(self, path, value):
with safe_open(path, 'w') as f:
f.write(value)
def test_transitive_invalid_target_is_dep(self):
with temporary_dir() as cache_dir, \
temporary_dir(root_dir=get_buildroot()) as src_dir:
config = {
'cache.compile.zinc': {'write_to': [cache_dir], 'read_from': [cache_dir]},
'compile.zinc': {'incremental_caching': True},
'java': {'strict_deps': False},
}
target_dir = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest')
a_srcfile = os.path.join(target_dir, 'A.java')
b_srcfile = os.path.join(target_dir, 'B.java')
c_srcfile = os.path.join(target_dir, 'C.java')
buildfile = os.path.join(target_dir, 'BUILD')
self.create_file(a_srcfile,
dedent("""package org.pantsbuild.cachetest;
class A {}
"""))
self.create_file(b_srcfile,
dedent("""package org.pantsbuild.cachetest;
class B {
A a;
}
"""))
self.create_file(c_srcfile,
dedent("""package org.pantsbuild.cachetest;
class C {
A a;
}
"""))
self.create_file(buildfile,
dedent("""
java_library(name='a',
sources=['A.java']
)
java_library(name='b',
sources=['B.java'],
dependencies=[':a']
)
java_library(name='c',
sources=['C.java'],
dependencies=[':b']
)
"""))
c_spec = os.path.join(os.path.basename(src_dir), 'org', 'pantsbuild',
'cachetest:c')
with self.temporary_workdir() as workdir:
self.run_compile(c_spec, config, workdir)
# clean workdir
# rm cache entries for a and b
cache_dir_entries = os.listdir(os.path.join(cache_dir))
zinc_dir = os.path.join(cache_dir, cache_dir_entries[0])
c_or_a_cache_dirs = [subdir for subdir in os.listdir(zinc_dir)
if subdir.endswith('cachetest.a') or subdir.endswith('cachetest.c')]
for subdir in c_or_a_cache_dirs:
safe_rmtree(os.path.join(zinc_dir, subdir))
# run compile
with self.temporary_workdir() as workdir:
self.run_compile(c_spec, config, workdir)
def test_stale_artifacts_rmd_when_cache_used_with_zinc(self):
with temporary_dir() as cache_dir, \
self.temporary_workdir() as workdir, \
temporary_dir(root_dir=get_buildroot()) as src_dir:
config = {
'cache.compile.zinc': {'write_to': [cache_dir], 'read_from': [cache_dir]},
'compile.zinc': {'incremental_caching': True },
}
srcfile = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'A.java')
buildfile = os.path.join(src_dir, 'org', 'pantsbuild', 'cachetest', 'BUILD')
self.create_file(srcfile,
dedent("""package org.pantsbuild.cachetest;
class A {}
class Main {}"""))
self.create_file(buildfile,
dedent("""java_library(name='cachetest',
sources=['A.java']
)"""))
cachetest_spec = os.path.join(os.path.basename(src_dir), 'org', 'pantsbuild',
'cachetest:cachetest')
# Caches values A.class, Main.class
self.run_compile(cachetest_spec, config, workdir)
self.create_file(srcfile,
dedent("""package org.pantsbuild.cachetest;
class A {}
class NotMain {}"""))
# Caches values A.class, NotMain.class and leaves them on the filesystem
self.run_compile(cachetest_spec, config, workdir)
self.create_file(srcfile,
dedent("""package org.pantsbuild.cachetest;
class A {}
class Main {}"""))
# Should cause NotMain.class to be removed
self.run_compile(cachetest_spec, config, workdir)
root = os.path.join(workdir, 'compile', 'zinc')
task_versions = [p for p in os.listdir(root) if p != 'current']
self.assertEqual(len(task_versions), 1, 'Expected 1 task version.')
versioned_root = os.path.join(root, task_versions[0])
per_target_dirs = os.listdir(versioned_root)
self.assertEqual(len(per_target_dirs), 1, 'Expected 1 target.')
target_workdir_root = os.path.join(versioned_root, per_target_dirs[0])
target_workdirs = os.listdir(target_workdir_root)
self.assertEqual(len(target_workdirs), 3, 'Expected 3 workdirs (current, and two versioned).')
self.assertIn('current', target_workdirs)
def classfiles(d):
cd = os.path.join(target_workdir_root, d, 'classes', 'org', 'pantsbuild', 'cachetest')
return sorted(os.listdir(cd))
# One workdir should contain NotMain, and the other should contain Main.
self.assertEquals(sorted(classfiles(w) for w in target_workdirs if w != 'current'),
sorted([['A.class', 'Main.class'], ['A.class', 'NotMain.class']]))
def test_incremental_caching(self):
"""Tests that with --no-incremental-caching, we don't write incremental artifacts."""
srcfile = 'A.java'
def config(incremental_caching):
return { 'compile.zinc': {'incremental_caching': incremental_caching} }
self._do_test_caching(
Compile({srcfile: "class A {}"}, config(False), 1),
Compile({srcfile: "final class A {}"}, config(False), 1),
Compile({srcfile: "public final class A {}"}, config(True), 2),
)
def test_incremental(self):
"""Tests that with --no-incremental and --no-incremental-caching, we always write artifacts."""
srcfile = 'A.java'
config = {'compile.zinc': {'incremental': False, 'incremental_caching': False}}
self._do_test_caching(
Compile({srcfile: "class A {}"}, config, 1),
Compile({srcfile: "final class A {}"}, config, 2),
Compile({srcfile: "public final class A {}"}, config, 3),
)
def _do_test_caching(self, *compiles):
"""Tests that the given compiles within the same workspace produce the given artifact counts."""
with temporary_dir() as cache_dir, \
self.temporary_workdir() as workdir, \
temporary_dir(root_dir=get_buildroot()) as src_dir:
def complete_config(config):
# Clone the input config and add cache settings.
cache_settings = {'write_to': [cache_dir], 'read_from': [cache_dir]}
return dict(config.items() + [('cache.compile.zinc', cache_settings)])
buildfile = os.path.join(src_dir, 'BUILD')
spec = os.path.join(src_dir, ':cachetest')
artifact_dir = os.path.join(cache_dir,
ZincCompile.stable_name(),
'{}.cachetest'.format(os.path.basename(src_dir)))
for c in compiles:
# Clear the src directory and recreate the files.
safe_mkdir(src_dir, clean=True)
self.create_file(buildfile,
"""java_library(name='cachetest', sources=rglobs('*.java', '*.scala'))""")
for name, content in c.srcfiles.items():
self.create_file(os.path.join(src_dir, name), content)
# Compile, and confirm that we have the right count of artifacts.
self.run_compile(spec, complete_config(c.config), workdir)
self.assertEquals(c.artifact_count, len(os.listdir(artifact_dir)))
class CacheCompileIntegrationWithZjarsTest(CacheCompileIntegrationTest):
_EXTRA_TASK_ARGS = ['--compile-zinc-use-classpath-jars']
| landism/pants | tests/python/pants_test/backend/jvm/tasks/jvm_compile/java/test_cache_compile_integration.py | Python | apache-2.0 | 9,196 |
# Copyright (c) 2015-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2016 Cara Vinson <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""Astroid hooks for dateutil"""
import textwrap
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
def dateutil_transform():
return AstroidBuilder(MANAGER).string_build(textwrap.dedent('''
import datetime
def parse(timestr, parserinfo=None, **kwargs):
return datetime.datetime()
'''))
register_module_extender(MANAGER, 'dateutil.parser', dateutil_transform)
| arju88nair/projectCulminate | venv/lib/python3.5/site-packages/astroid/brain/brain_dateutil.py | Python | apache-2.0 | 714 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='See README')
parser.add_argument('-c', '--count', default=3, type=int,
help='with how many failure times it should be '
'considered as an attack')
config = parser.parse_args()
ips = {}
banned = set()
for line in sys.stdin:
if 'can not parse header when' in line:
ip = line.split()[-1].split(':')[0]
if ip not in ips:
ips[ip] = 1
print(ip)
sys.stdout.flush()
else:
ips[ip] += 1
if ip not in banned and ips[ip] >= config.count:
if ip != '127.0.0.1':
banned.add(ip)
cmd = 'iptables -I INPUT -s %s -j DROP' % ip
print(cmd, file=sys.stderr)
sys.stderr.flush()
os.system(cmd)
| qingran001/bushu | utils/autoban.py | Python | apache-2.0 | 2,219 |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test methods in twisted.internet.threads and reactor thread APIs.
"""
import sys, os, time
from twisted.trial import unittest
from twisted.internet import reactor, defer, interfaces, threads, protocol, error
from twisted.python import failure, threadable, log, threadpool
class ReactorThreadsTestCase(unittest.TestCase):
"""
Tests for the reactor threading API.
"""
def test_suggestThreadPoolSize(self):
"""
Try to change maximum number of threads.
"""
reactor.suggestThreadPoolSize(34)
self.assertEqual(reactor.threadpool.max, 34)
reactor.suggestThreadPoolSize(4)
self.assertEqual(reactor.threadpool.max, 4)
def _waitForThread(self):
"""
The reactor's threadpool is only available when the reactor is running,
so to have a sane behavior during the tests we make a dummy
L{threads.deferToThread} call.
"""
return threads.deferToThread(time.sleep, 0)
def test_callInThread(self):
"""
Test callInThread functionality: set a C{threading.Event}, and check
that it's not in the main thread.
"""
def cb(ign):
waiter = threading.Event()
result = []
def threadedFunc():
result.append(threadable.isInIOThread())
waiter.set()
reactor.callInThread(threadedFunc)
waiter.wait(120)
if not waiter.isSet():
self.fail("Timed out waiting for event.")
else:
self.assertEqual(result, [False])
return self._waitForThread().addCallback(cb)
def test_callFromThread(self):
"""
Test callFromThread functionality: from the main thread, and from
another thread.
"""
def cb(ign):
firedByReactorThread = defer.Deferred()
firedByOtherThread = defer.Deferred()
def threadedFunc():
reactor.callFromThread(firedByOtherThread.callback, None)
reactor.callInThread(threadedFunc)
reactor.callFromThread(firedByReactorThread.callback, None)
return defer.DeferredList(
[firedByReactorThread, firedByOtherThread],
fireOnOneErrback=True)
return self._waitForThread().addCallback(cb)
def test_wakerOverflow(self):
"""
Try to make an overflow on the reactor waker using callFromThread.
"""
def cb(ign):
self.failure = None
waiter = threading.Event()
def threadedFunction():
# Hopefully a hundred thousand queued calls is enough to
# trigger the error condition
for i in xrange(100000):
try:
reactor.callFromThread(lambda: None)
except:
self.failure = failure.Failure()
break
waiter.set()
reactor.callInThread(threadedFunction)
waiter.wait(120)
if not waiter.isSet():
self.fail("Timed out waiting for event")
if self.failure is not None:
return defer.fail(self.failure)
return self._waitForThread().addCallback(cb)
def _testBlockingCallFromThread(self, reactorFunc):
"""
Utility method to test L{threads.blockingCallFromThread}.
"""
waiter = threading.Event()
results = []
errors = []
def cb1(ign):
def threadedFunc():
try:
r = threads.blockingCallFromThread(reactor, reactorFunc)
except Exception, e:
errors.append(e)
else:
results.append(r)
waiter.set()
reactor.callInThread(threadedFunc)
return threads.deferToThread(waiter.wait, self.getTimeout())
def cb2(ign):
if not waiter.isSet():
self.fail("Timed out waiting for event")
return results, errors
return self._waitForThread().addCallback(cb1).addBoth(cb2)
def test_blockingCallFromThread(self):
"""
Test blockingCallFromThread facility: create a thread, call a function
in the reactor using L{threads.blockingCallFromThread}, and verify the
result returned.
"""
def reactorFunc():
return defer.succeed("foo")
def cb(res):
self.assertEqual(res[0][0], "foo")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
def test_asyncBlockingCallFromThread(self):
"""
Test blockingCallFromThread as above, but be sure the resulting
Deferred is not already fired.
"""
def reactorFunc():
d = defer.Deferred()
reactor.callLater(0.1, d.callback, "egg")
return d
def cb(res):
self.assertEqual(res[0][0], "egg")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
def test_errorBlockingCallFromThread(self):
"""
Test error report for blockingCallFromThread.
"""
def reactorFunc():
return defer.fail(RuntimeError("bar"))
def cb(res):
self.assert_(isinstance(res[1][0], RuntimeError))
self.assertEqual(res[1][0].args[0], "bar")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
def test_asyncErrorBlockingCallFromThread(self):
"""
Test error report for blockingCallFromThread as above, but be sure the
resulting Deferred is not already fired.
"""
def reactorFunc():
d = defer.Deferred()
reactor.callLater(0.1, d.errback, RuntimeError("spam"))
return d
def cb(res):
self.assert_(isinstance(res[1][0], RuntimeError))
self.assertEqual(res[1][0].args[0], "spam")
return self._testBlockingCallFromThread(reactorFunc).addCallback(cb)
class Counter:
index = 0
problem = 0
def add(self):
"""A non thread-safe method."""
next = self.index + 1
# another thread could jump in here and increment self.index on us
if next != self.index + 1:
self.problem = 1
raise ValueError
# or here, same issue but we wouldn't catch it. We'd overwrite
# their results, and the index will have lost a count. If
# several threads get in here, we will actually make the count
# go backwards when we overwrite it.
self.index = next
class DeferredResultTestCase(unittest.TestCase):
"""
Test twisted.internet.threads.
"""
def setUp(self):
reactor.suggestThreadPoolSize(8)
def tearDown(self):
reactor.suggestThreadPoolSize(0)
def testCallMultiple(self):
L = []
N = 10
d = defer.Deferred()
def finished():
self.assertEqual(L, range(N))
d.callback(None)
threads.callMultipleInThread([
(L.append, (i,), {}) for i in xrange(N)
] + [(reactor.callFromThread, (finished,), {})])
return d
def test_deferredResult(self):
"""
L{threads.deferToThread} executes the function passed, and correctly
handles the positional and keyword arguments given.
"""
d = threads.deferToThread(lambda x, y=5: x + y, 3, y=4)
d.addCallback(self.assertEqual, 7)
return d
def test_deferredFailure(self):
"""
Check that L{threads.deferToThread} return a failure object
with an appropriate exception instance when the called
function raises an exception.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
d = threads.deferToThread(raiseError)
return self.assertFailure(d, NewError)
def test_deferredFailureAfterSuccess(self):
"""
Check that a successfull L{threads.deferToThread} followed by a one
that raises an exception correctly result as a failure.
"""
# set up a condition that causes cReactor to hang. These conditions
# can also be set by other tests when the full test suite is run in
# alphabetical order (test_flow.FlowTest.testThreaded followed by
# test_internet.ReactorCoreTestCase.testStop, to be precise). By
# setting them up explicitly here, we can reproduce the hang in a
# single precise test case instead of depending upon side effects of
# other tests.
#
# alas, this test appears to flunk the default reactor too
d = threads.deferToThread(lambda: None)
d.addCallback(lambda ign: threads.deferToThread(lambda: 1//0))
return self.assertFailure(d, ZeroDivisionError)
class DeferToThreadPoolTestCase(unittest.TestCase):
"""
Test L{twisted.internet.threads.deferToThreadPool}.
"""
def setUp(self):
self.tp = threadpool.ThreadPool(0, 8)
self.tp.start()
def tearDown(self):
self.tp.stop()
def test_deferredResult(self):
"""
L{threads.deferToThreadPool} executes the function passed, and
correctly handles the positional and keyword arguments given.
"""
d = threads.deferToThreadPool(reactor, self.tp,
lambda x, y=5: x + y, 3, y=4)
d.addCallback(self.assertEqual, 7)
return d
def test_deferredFailure(self):
"""
Check that L{threads.deferToThreadPool} return a failure object with an
appropriate exception instance when the called function raises an
exception.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
d = threads.deferToThreadPool(reactor, self.tp, raiseError)
return self.assertFailure(d, NewError)
_callBeforeStartupProgram = """
import time
import %(reactor)s
%(reactor)s.install()
from twisted.internet import reactor
def threadedCall():
print 'threaded call'
reactor.callInThread(threadedCall)
# Spin very briefly to try to give the thread a chance to run, if it
# is going to. Is there a better way to achieve this behavior?
for i in xrange(100):
time.sleep(0.0)
"""
class ThreadStartupProcessProtocol(protocol.ProcessProtocol):
def __init__(self, finished):
self.finished = finished
self.out = []
self.err = []
def outReceived(self, out):
self.out.append(out)
def errReceived(self, err):
self.err.append(err)
def processEnded(self, reason):
self.finished.callback((self.out, self.err, reason))
class StartupBehaviorTestCase(unittest.TestCase):
"""
Test cases for the behavior of the reactor threadpool near startup
boundary conditions.
In particular, this asserts that no threaded calls are attempted
until the reactor starts up, that calls attempted before it starts
are in fact executed once it has started, and that in both cases,
the reactor properly cleans itself up (which is tested for
somewhat implicitly, by requiring a child process be able to exit,
something it cannot do unless the threadpool has been properly
torn down).
"""
def testCallBeforeStartupUnexecuted(self):
progname = self.mktemp()
progfile = file(progname, 'w')
progfile.write(_callBeforeStartupProgram % {'reactor': reactor.__module__})
progfile.close()
def programFinished((out, err, reason)):
if reason.check(error.ProcessTerminated):
self.fail("Process did not exit cleanly (out: %s err: %s)" % (out, err))
if err:
log.msg("Unexpected output on standard error: %s" % (err,))
self.failIf(out, "Expected no output, instead received:\n%s" % (out,))
def programTimeout(err):
err.trap(error.TimeoutError)
proto.signalProcess('KILL')
return err
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
d = defer.Deferred().addCallbacks(programFinished, programTimeout)
proto = ThreadStartupProcessProtocol(d)
reactor.spawnProcess(proto, sys.executable, ('python', progname), env)
return d
if interfaces.IReactorThreads(reactor, None) is None:
for cls in (ReactorThreadsTestCase,
DeferredResultTestCase,
StartupBehaviorTestCase):
cls.skip = "No thread support, nothing to test here."
else:
import threading
if interfaces.IReactorProcess(reactor, None) is None:
for cls in (StartupBehaviorTestCase,):
cls.skip = "No process support, cannot run subprocess thread tests."
| nlloyd/SubliminalCollaborator | libs/twisted/test/test_threads.py | Python | apache-2.0 | 13,048 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Time grain SQLA
Revision ID: c5756bec8b47
Revises: e502db2af7be
Create Date: 2018-06-04 11:12:59.878742
"""
# revision identifiers, used by Alembic.
revision = "c5756bec8b47"
down_revision = "e502db2af7be"
import json
from alembic import op
from sqlalchemy import Column, Integer, Text
from sqlalchemy.ext.declarative import declarative_base
from superset import db
Base = declarative_base()
class Slice(Base):
__tablename__ = "slices"
id = Column(Integer, primary_key=True)
params = Column(Text)
def upgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
if params.get("time_grain_sqla") == "Time Column":
params["time_grain_sqla"] = None
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
def downgrade():
bind = op.get_bind()
session = db.Session(bind=bind)
for slc in session.query(Slice).all():
try:
params = json.loads(slc.params)
if params.get("time_grain_sqla") is None:
params["time_grain_sqla"] = "Time Column"
slc.params = json.dumps(params, sort_keys=True)
except Exception:
pass
session.commit()
session.close()
| airbnb/superset | superset/migrations/versions/c5756bec8b47_time_grain_sqla.py | Python | apache-2.0 | 2,191 |
from __future__ import print_function
import re
INDENT = 1
NAME = 2
INHERIT = 3
ARGUMENTS = 3
PASS=' pass\n'
def pythonize_arguments(arg_str):
"""
Remove types from function arguments in cython
"""
out_args = []
# If there aren't any arguments return the empty string
if arg_str is None:
return out_str
args = arg_str.split(',')
for arg in args:
components = arg.split('=')
name_and_type=components[0].split(' ')
# There is probably type info
if name_and_type[-1]=='' and len(name_and_type)>1:
name=name_and_type[-2]
else:
name=name_and_type[-1]
# if there are default parameters
if len(components)>1:
name+='='+components[1]
out_args.append(name)
return ','.join(out_args)
def get_indent(indent_str):
"""
Check if the indent exists
"""
if indent_str is None:
return ''
else:
return indent_str
def get_inherit(inherit_str):
"""
Check if there is a parent class
"""
if inherit_str is None:
return ''
else:
return inherit_str
def get_func_name(func_str):
"""
Get function name, ie removes possible return type
"""
name = func_str.split(' ')[-1]
return name
def create_doc_copy(in_file='../../python/_dynet.pyx', out_file='dynet.py'):
in_comment = False
in_func = False
with open(out_file, 'w+') as py:
with open(in_file, 'r') as pyx:
for l in pyx:
# Check if this line is a function declaration (def or cpdef)
is_func = re.match(r'(\s*)(?:cp)?def (.*)\((.*)\):', l, re.I)
if is_func:
# If the previous line was a function, print pass
if in_func:
print(indent + PASS, file=py)
# Preserve indentation
indent = get_indent(is_func.group(INDENT))
# Get function name
name = get_func_name(is_func.group(NAME))
# Get arguments
arguments = pythonize_arguments(is_func.group(ARGUMENTS))
# Print declaration
print(indent + "def "+name+"("+arguments+"):", file=py)
# Now in function body
in_func = True
continue
# Check if this line declares a class
is_class = re.match(r'(\s*)(?:cdef )?class (.*)(\(.*\))?:', l, re.I)
if is_class:
# Preserve indentation
indent = get_indent(is_class.group(INDENT))
# Get parent class
inherit = get_inherit(is_class.group(INHERIT))
# Print declaration
print(indent + "class "+is_class.group(NAME)+inherit+":", file=py)
# Handle comments (better)
is_comment = re.match(r'(\s*)"""(.*)', l, re.I) or ('"""' in l and in_comment) # This last case is to account for end of line """ to end the comment
# If start or beginning of comment
if is_comment:
# If end of comment, print the """
if in_comment:
print(l[:-1], file=py)
# Toggle in_comment indicator
in_comment = not in_comment
# If this is a single line comment, end in_comment scope
if l.count('"""') > 1:
in_comment = False
# Print comment line
if in_comment:
print(l[:-1], file=py)
continue
# If not in comment anymore but still in function scope, print pass
if in_func:
print(indent + PASS, file=py)
in_func = False
| clab/cnn | doc/source/doc_util.py | Python | apache-2.0 | 3,948 |
from ..libc.scanf import scanf
class __isoc99_scanf(scanf):
pass
| chubbymaggie/angr | angr/procedures/glibc/scanf.py | Python | bsd-2-clause | 71 |
# (C) Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.euimage.pack.pack import ImagePack
| vasiliykochergin/euca2ools | euca2ools/commands/euimage/pack/__init__.py | Python | bsd-2-clause | 1,405 |
from itertools import count
from .compatibility import zip_longest
from .core import (istask, get_dependencies, subs, toposort, flatten,
reverse_dict, add, inc, ishashable, preorder_traversal)
from .rewrite import END
from toolz import identity
def cull(dsk, keys):
""" Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> cull(d, 'out') # doctest: +SKIP
{'x': 1, 'out': (add, 'x', 10)}
"""
if not isinstance(keys, (list, set)):
keys = [keys]
nxt = set(flatten(keys))
seen = nxt
while nxt:
cur = nxt
nxt = set()
for item in cur:
for dep in get_dependencies(dsk, item):
if dep not in seen:
nxt.add(dep)
seen.update(nxt)
return dict((k, v) for k, v in dsk.items() if k in seen)
def fuse(dsk, keys=None):
""" Return new dask with linear sequence of tasks fused together.
If specified, the keys in ``keys`` keyword argument are *not* fused.
This may be used as an optimization step.
Examples
--------
>>> d = {'a': 1, 'b': (inc, 'a'), 'c': (inc, 'b')}
>>> fuse(d) # doctest: +SKIP
{'c': (inc, (inc, 1))}
>>> fuse(d, keys=['b']) # doctest: +SKIP
{'b': (inc, 1), 'c': (inc, 'b')}
"""
if keys is not None and not isinstance(keys, set):
if not isinstance(keys, list):
keys = [keys]
keys = set(flatten(keys))
# locate all members of linear chains
child2parent = {}
unfusible = set()
for parent in dsk:
deps = get_dependencies(dsk, parent, as_list=True)
has_many_children = len(deps) > 1
for child in deps:
if keys is not None and child in keys:
unfusible.add(child)
elif child in child2parent:
del child2parent[child]
unfusible.add(child)
elif has_many_children:
unfusible.add(child)
elif child not in unfusible:
child2parent[child] = parent
# construct the chains from ancestor to descendant
chains = []
parent2child = dict(map(reversed, child2parent.items()))
while child2parent:
child, parent = child2parent.popitem()
chain = [child, parent]
while parent in child2parent:
parent = child2parent.pop(parent)
del parent2child[parent]
chain.append(parent)
chain.reverse()
while child in parent2child:
child = parent2child.pop(child)
del child2parent[child]
chain.append(child)
chains.append(chain)
# create a new dask with fused chains
rv = {}
fused = set()
for chain in chains:
child = chain.pop()
val = dsk[child]
while chain:
parent = chain.pop()
val = subs(dsk[parent], child, val)
fused.add(child)
child = parent
fused.add(child)
rv[child] = val
for key, val in dsk.items():
if key not in fused:
rv[key] = val
return rv
def inline(dsk, keys=None, inline_constants=True):
""" Return new dask with the given keys inlined with their values.
Inlines all constants if ``inline_constants`` keyword is True.
Examples
--------
>>> d = {'x': 1, 'y': (inc, 'x'), 'z': (add, 'x', 'y')}
>>> inline(d) # doctest: +SKIP
{'y': (inc, 1), 'z': (add, 1, 'y')}
>>> inline(d, keys='y') # doctest: +SKIP
{'z': (add, 1, (inc, 1))}
>>> inline(d, keys='y', inline_constants=False) # doctest: +SKIP
{'x': 1, 'z': (add, 'x', (inc, 'x'))}
"""
if keys is None:
keys = []
elif not isinstance(keys, (list, set)):
keys = [keys]
keys = set(flatten(keys))
if inline_constants:
keys.update(k for k, v in dsk.items() if not istask(v))
# Keys may depend on other keys, so determine replace order with toposort.
# The values stored in `keysubs` do not include other keys.
replaceorder = toposort(dict((k, dsk[k]) for k in keys if k in dsk))
keysubs = {}
for key in replaceorder:
val = dsk[key]
for dep in keys & get_dependencies(dsk, key):
if dep in keysubs:
replace = keysubs[dep]
else:
replace = dsk[dep]
val = subs(val, dep, replace)
keysubs[key] = val
# Make new dask with substitutions
rv = {}
for key, val in dsk.items():
if key in keys:
continue
for item in keys & get_dependencies(dsk, key):
val = subs(val, item, keysubs[item])
rv[key] = val
return rv
def inline_functions(dsk, fast_functions=None, inline_constants=False):
""" Inline cheap functions into larger operations
Examples
--------
>>> dsk = {'out': (add, 'i', 'd'), # doctest: +SKIP
... 'i': (inc, 'x'),
... 'd': (double, 'y'),
... 'x': 1, 'y': 1}
>>> inline_functions(dsk, [inc]) # doctest: +SKIP
{'out': (add, (inc, 'x'), 'd'),
'd': (double, 'y'),
'x': 1, 'y': 1}
"""
if not fast_functions:
return dsk
fast_functions = set(fast_functions)
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
keys = [k for k, v in dsk.items()
if istask(v)
and functions_of(v).issubset(fast_functions)
and dependents[k]]
if keys:
return inline(dsk, keys, inline_constants=inline_constants)
else:
return dsk
def functions_of(task):
""" Set of functions contained within nested task
Examples
--------
>>> task = (add, (mul, 1, 2), (inc, 3)) # doctest: +SKIP
>>> functions_of(task) # doctest: +SKIP
set([add, mul, inc])
"""
result = set()
if istask(task):
args = set.union(*map(functions_of, task[1:])) if task[1:] else set()
return set([unwrap_partial(task[0])]) | args
if isinstance(task, (list, tuple)):
if not task:
return set()
return set.union(*map(functions_of, task))
return set()
def unwrap_partial(func):
while hasattr(func, 'func'):
func = func.func
return func
def dealias(dsk):
""" Remove aliases from dask
Removes and renames aliases using ``inline``. Keeps aliases at the top of
the DAG to ensure entry points stay the same.
Aliases are not expected by schedulers. It's unclear that this is a legal
state.
Examples
--------
>>> dsk = {'a': (range, 5),
... 'b': 'a',
... 'c': 'b',
... 'd': (sum, 'c'),
... 'e': 'd',
... 'f': (inc, 'd')}
>>> dealias(dsk) # doctest: +SKIP
{'a': (range, 5),
'd': (sum, 'a'),
'e': (identity, 'd'),
'f': (inc, 'd')}
"""
dependencies = dict((k, get_dependencies(dsk, k)) for k in dsk)
dependents = reverse_dict(dependencies)
aliases = set((k for k, task in dsk.items() if ishashable(task) and task in dsk))
roots = set((k for k, v in dependents.items() if not v))
dsk2 = inline(dsk, aliases - roots, inline_constants=False)
dsk3 = dsk2.copy()
dependencies = dict((k, get_dependencies(dsk2, k)) for k in dsk2)
dependents = reverse_dict(dependencies)
for k in roots & aliases:
k2 = dsk3[k]
if len(dependents[k2]) == 1:
dsk3[k] = dsk3[k2]
del dsk3[k2]
else:
dsk3[k] = (identity, k2)
return dsk3
def equivalent(term1, term2, subs=None):
"""Determine if two terms are equivalent, modulo variable substitution.
Equivalent to applying substitutions in `subs` to `term2`, then checking if
`term1 == term2`.
If a subterm doesn't support comparison (i.e. `term1 == term2` errors),
returns `False`.
Parameters
----------
term1, term2 : terms
subs : dict, optional
Mapping of substitutions from `term2` to `term1`
Examples
--------
>>> from operator import add
>>> term1 = (add, 'a', 'b')
>>> term2 = (add, 'x', 'y')
>>> subs = {'x': 'a', 'y': 'b'}
>>> equivalent(term1, term2, subs)
True
>>> subs = {'x': 'a'}
>>> equivalent(term1, term2, subs)
False
"""
# Quick escape for special cases
head_type = type(term1)
if type(term2) != head_type:
# If terms aren't same type, fail
return False
elif head_type not in (tuple, list):
# For literals, just compare
try:
# `is` is tried first, to allow objects that don't implement `==`
# to work for cases where term1 is term2. If `is` returns False,
# and `==` errors, then the only thing we can do is return False.
return term1 is term2 or term1 == term2
except:
return False
pot1 = preorder_traversal(term1)
pot2 = preorder_traversal(term2)
subs = {} if subs is None else subs
for t1, t2 in zip_longest(pot1, pot2, fillvalue=END):
if t1 is END or t2 is END:
# If terms aren't same length: fail
return False
elif ishashable(t2) and t2 in subs:
val = subs[t2]
else:
val = t2
try:
if t1 is not t2 and t1 != val:
return False
except:
return False
return True
def dependency_dict(dsk):
"""Create a dict matching ordered dependencies to keys.
Examples
--------
>>> from operator import add
>>> dsk = {'a': 1, 'b': 2, 'c': (add, 'a', 'a'), 'd': (add, 'b', 'a')}
>>> dependency_dict(dsk) # doctest: +SKIP
{(): ['a', 'b'], ('a', 'a'): ['c'], ('b', 'a'): ['d']}
"""
dep_dict = {}
for key in dsk:
deps = tuple(get_dependencies(dsk, key, True))
dep_dict.setdefault(deps, []).append(key)
return dep_dict
def _possible_matches(dep_dict, deps, subs):
deps2 = []
for d in deps:
v = subs.get(d, None)
if v is not None:
deps2.append(v)
else:
return []
deps2 = tuple(deps2)
return dep_dict.get(deps2, [])
def _sync_keys(dsk1, dsk2, dsk2_topo):
dep_dict1 = dependency_dict(dsk1)
subs = {}
for key2 in toposort(dsk2):
deps = tuple(get_dependencies(dsk2, key2, True))
# List of keys in dsk1 that have terms that *may* match key2
possible_matches = _possible_matches(dep_dict1, deps, subs)
if possible_matches:
val2 = dsk2[key2]
for key1 in possible_matches:
val1 = dsk1[key1]
if equivalent(val1, val2, subs):
subs[key2] = key1
break
return subs
def sync_keys(dsk1, dsk2):
"""Return a dict matching keys in `dsk2` to equivalent keys in `dsk1`.
Parameters
----------
dsk1, dsk2 : dict
Examples
--------
>>> from operator import add, mul
>>> dsk1 = {'a': 1, 'b': (add, 'a', 10), 'c': (mul, 'b', 5)}
>>> dsk2 = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> sync_keys(dsk1, dsk2) # doctest: +SKIP
{'x': 'a', 'y': 'b'}
"""
return _sync_keys(dsk1, dsk2, toposort(dsk2))
def merge_sync(dsk1, dsk2):
"""Merge two dasks together, combining equivalent tasks.
If a task in `dsk2` exists in `dsk1`, the task and key from `dsk1` is used.
If a task in `dsk2` has the same key as a task in `dsk1` (and they aren't
equivalent tasks), then a new key is created for the task in `dsk2`. This
prevents name conflicts.
Parameters
----------
dsk1, dsk2 : dict
Variable names in `dsk2` are replaced with equivalent ones in `dsk1`
before merging.
Returns
-------
new_dsk : dict
The merged dask.
key_map : dict
A mapping between the keys from `dsk2` to their new names in `new_dsk`.
Examples
--------
>>> from operator import add, mul
>>> dsk1 = {'a': 1, 'b': (add, 'a', 10), 'c': (mul, 'b', 5)}
>>> dsk2 = {'x': 1, 'y': (add, 'x', 10), 'z': (mul, 'y', 2)}
>>> new_dsk, key_map = merge_sync(dsk1, dsk2)
>>> new_dsk # doctest: +SKIP
{'a': 1, 'b': (add, 'a', 10), 'c': (mul, 'b', 5), 'z': (mul, 'b', 2)}
>>> key_map # doctest: +SKIP
{'x': 'a', 'y': 'b', 'z': 'z'}
Conflicting names are replaced with auto-generated names upon merging.
>>> dsk1 = {'a': 1, 'res': (add, 'a', 1)}
>>> dsk2 = {'x': 1, 'res': (add, 'x', 2)}
>>> new_dsk, key_map = merge_sync(dsk1, dsk2)
>>> new_dsk # doctest: +SKIP
{'a': 1, 'res': (add, 'a', 1), 'merge_1': (add, 'a', 2)}
>>> key_map # doctest: +SKIP
{'x': 'a', 'res': 'merge_1'}
"""
dsk2_topo = toposort(dsk2)
sd = _sync_keys(dsk1, dsk2, dsk2_topo)
new_dsk = dsk1.copy()
for key in dsk2_topo:
if key in sd:
new_key = sd[key]
else:
if key in dsk1:
new_key = next(merge_sync.names)
else:
new_key = key
sd[key] = new_key
task = dsk2[key]
for a, b in sd.items():
task = subs(task, a, b)
new_dsk[new_key] = task
return new_dsk, sd
# store the name iterator in the function
merge_sync.names = ('merge_%d' % i for i in count(1))
| jayhetee/dask | dask/optimize.py | Python | bsd-3-clause | 13,546 |
"""
python generate_sparsetools.py
Generate manual wrappers for C++ sparsetools code.
Type codes used:
'i': integer scalar
'I': integer array
'T': data array
'B': boolean array
'V': std::vector<integer>*
'W': std::vector<data>*
'*': indicates that the next argument is an output argument
'v': void
'l': 64-bit integer scalar
See sparsetools.cxx for more details.
"""
import optparse
import os
from distutils.dep_util import newer
#
# List of all routines and their argument types.
#
# The first code indicates the return value, the rest the arguments.
#
# bsr.h
BSR_ROUTINES = """
bsr_diagonal v iiiiiIIT*T
bsr_tocsr v iiiiIIT*I*I*T
bsr_scale_rows v iiiiII*TT
bsr_scale_columns v iiiiII*TT
bsr_sort_indices v iiii*I*I*T
bsr_transpose v iiiiIIT*I*I*T
bsr_matmat v iiiiiiIITIIT*I*I*T
bsr_matvec v iiiiIITT*T
bsr_matvecs v iiiiiIITT*T
bsr_elmul_bsr v iiiiIITIIT*I*I*T
bsr_eldiv_bsr v iiiiIITIIT*I*I*T
bsr_plus_bsr v iiiiIITIIT*I*I*T
bsr_minus_bsr v iiiiIITIIT*I*I*T
bsr_maximum_bsr v iiiiIITIIT*I*I*T
bsr_minimum_bsr v iiiiIITIIT*I*I*T
bsr_ne_bsr v iiiiIITIIT*I*I*B
bsr_lt_bsr v iiiiIITIIT*I*I*B
bsr_gt_bsr v iiiiIITIIT*I*I*B
bsr_le_bsr v iiiiIITIIT*I*I*B
bsr_ge_bsr v iiiiIITIIT*I*I*B
"""
# csc.h
CSC_ROUTINES = """
csc_diagonal v iiiIIT*T
csc_tocsr v iiIIT*I*I*T
csc_matmat_maxnnz l iiIIII
csc_matmat v iiIITIIT*I*I*T
csc_matvec v iiIITT*T
csc_matvecs v iiiIITT*T
csc_elmul_csc v iiIITIIT*I*I*T
csc_eldiv_csc v iiIITIIT*I*I*T
csc_plus_csc v iiIITIIT*I*I*T
csc_minus_csc v iiIITIIT*I*I*T
csc_maximum_csc v iiIITIIT*I*I*T
csc_minimum_csc v iiIITIIT*I*I*T
csc_ne_csc v iiIITIIT*I*I*B
csc_lt_csc v iiIITIIT*I*I*B
csc_gt_csc v iiIITIIT*I*I*B
csc_le_csc v iiIITIIT*I*I*B
csc_ge_csc v iiIITIIT*I*I*B
"""
# csr.h
CSR_ROUTINES = """
csr_matmat_maxnnz l iiIIII
csr_matmat v iiIITIIT*I*I*T
csr_diagonal v iiiIIT*T
csr_tocsc v iiIIT*I*I*T
csr_tobsr v iiiiIIT*I*I*T
csr_todense v iiIIT*T
csr_matvec v iiIITT*T
csr_matvecs v iiiIITT*T
csr_elmul_csr v iiIITIIT*I*I*T
csr_eldiv_csr v iiIITIIT*I*I*T
csr_plus_csr v iiIITIIT*I*I*T
csr_minus_csr v iiIITIIT*I*I*T
csr_maximum_csr v iiIITIIT*I*I*T
csr_minimum_csr v iiIITIIT*I*I*T
csr_ne_csr v iiIITIIT*I*I*B
csr_lt_csr v iiIITIIT*I*I*B
csr_gt_csr v iiIITIIT*I*I*B
csr_le_csr v iiIITIIT*I*I*B
csr_ge_csr v iiIITIIT*I*I*B
csr_scale_rows v iiII*TT
csr_scale_columns v iiII*TT
csr_sort_indices v iI*I*T
csr_eliminate_zeros v ii*I*I*T
csr_sum_duplicates v ii*I*I*T
get_csr_submatrix v iiIITiiii*V*V*W
csr_row_index v iIIIT*I*T
csr_row_slice v iiiIIT*I*T
csr_column_index1 v iIiiII*I*I
csr_column_index2 v IIiIT*I*T
csr_sample_values v iiIITiII*T
csr_count_blocks i iiiiII
csr_sample_offsets i iiIIiII*I
csr_hstack v iiIIIT*I*I*T
expandptr v iI*I
test_throw_error i
csr_has_sorted_indices i iII
csr_has_canonical_format i iII
"""
# coo.h, dia.h, csgraph.h
OTHER_ROUTINES = """
coo_tocsr v iiiIIT*I*I*T
coo_todense v iilIIT*Ti
coo_matvec v lIITT*T
dia_matvec v iiiiITT*T
cs_graph_components i iII*I
"""
# List of compilation units
COMPILATION_UNITS = [
('bsr', BSR_ROUTINES),
('csr', CSR_ROUTINES),
('csc', CSC_ROUTINES),
('other', OTHER_ROUTINES),
]
#
# List of the supported index typenums and the corresponding C++ types
#
I_TYPES = [
('NPY_INT32', 'npy_int32'),
('NPY_INT64', 'npy_int64'),
]
#
# List of the supported data typenums and the corresponding C++ types
#
T_TYPES = [
('NPY_BOOL', 'npy_bool_wrapper'),
('NPY_BYTE', 'npy_byte'),
('NPY_UBYTE', 'npy_ubyte'),
('NPY_SHORT', 'npy_short'),
('NPY_USHORT', 'npy_ushort'),
('NPY_INT', 'npy_int'),
('NPY_UINT', 'npy_uint'),
('NPY_LONG', 'npy_long'),
('NPY_ULONG', 'npy_ulong'),
('NPY_LONGLONG', 'npy_longlong'),
('NPY_ULONGLONG', 'npy_ulonglong'),
('NPY_FLOAT', 'npy_float'),
('NPY_DOUBLE', 'npy_double'),
('NPY_LONGDOUBLE', 'npy_longdouble'),
('NPY_CFLOAT', 'npy_cfloat_wrapper'),
('NPY_CDOUBLE', 'npy_cdouble_wrapper'),
('NPY_CLONGDOUBLE', 'npy_clongdouble_wrapper'),
]
#
# Code templates
#
THUNK_TEMPLATE = """
static PY_LONG_LONG %(name)s_thunk(int I_typenum, int T_typenum, void **a)
{
%(thunk_content)s
}
"""
METHOD_TEMPLATE = """
NPY_VISIBILITY_HIDDEN PyObject *
%(name)s_method(PyObject *self, PyObject *args)
{
return call_thunk('%(ret_spec)s', "%(arg_spec)s", %(name)s_thunk, args);
}
"""
GET_THUNK_CASE_TEMPLATE = """
static int get_thunk_case(int I_typenum, int T_typenum)
{
%(content)s;
return -1;
}
"""
#
# Code generation
#
def get_thunk_type_set():
"""
Get a list containing cartesian product of data types, plus a getter routine.
Returns
-------
i_types : list [(j, I_typenum, None, I_type, None), ...]
Pairing of index type numbers and the corresponding C++ types,
and an unique index `j`. This is for routines that are parameterized
only by I but not by T.
it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...]
Same as `i_types`, but for routines parameterized both by T and I.
getter_code : str
C++ code for a function that takes I_typenum, T_typenum and returns
the unique index corresponding to the lists, or -1 if no match was
found.
"""
it_types = []
i_types = []
j = 0
getter_code = " if (0) {}"
for I_typenum, I_type in I_TYPES:
piece = """
else if (I_typenum == %(I_typenum)s) {
if (T_typenum == -1) { return %(j)s; }"""
getter_code += piece % dict(I_typenum=I_typenum, j=j)
i_types.append((j, I_typenum, None, I_type, None))
j += 1
for T_typenum, T_type in T_TYPES:
piece = """
else if (T_typenum == %(T_typenum)s) { return %(j)s; }"""
getter_code += piece % dict(T_typenum=T_typenum, j=j)
it_types.append((j, I_typenum, T_typenum, I_type, T_type))
j += 1
getter_code += """
}"""
return i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content=getter_code)
def parse_routine(name, args, types):
"""
Generate thunk and method code for a given routine.
Parameters
----------
name : str
Name of the C++ routine
args : str
Argument list specification (in format explained above)
types : list
List of types to instantiate, as returned `get_thunk_type_set`
"""
ret_spec = args[0]
arg_spec = args[1:]
def get_arglist(I_type, T_type):
"""
Generate argument list for calling the C++ function
"""
args = []
next_is_writeable = False
j = 0
for t in arg_spec:
const = '' if next_is_writeable else 'const '
next_is_writeable = False
if t == '*':
next_is_writeable = True
continue
elif t == 'i':
args.append("*(%s*)a[%d]" % (const + I_type, j))
elif t == 'I':
args.append("(%s*)a[%d]" % (const + I_type, j))
elif t == 'T':
args.append("(%s*)a[%d]" % (const + T_type, j))
elif t == 'B':
args.append("(npy_bool_wrapper*)a[%d]" % (j,))
elif t == 'V':
if const:
raise ValueError("'V' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (I_type, j,))
elif t == 'W':
if const:
raise ValueError("'W' argument must be an output arg")
args.append("(std::vector<%s>*)a[%d]" % (T_type, j,))
elif t == 'l':
args.append("*(%snpy_int64*)a[%d]" % (const, j))
else:
raise ValueError("Invalid spec character %r" % (t,))
j += 1
return ", ".join(args)
# Generate thunk code: a giant switch statement with different
# type combinations inside.
thunk_content = """int j = get_thunk_case(I_typenum, T_typenum);
switch (j) {"""
for j, I_typenum, T_typenum, I_type, T_type in types:
arglist = get_arglist(I_type, T_type)
piece = """
case %(j)s:"""
if ret_spec == 'v':
piece += """
(void)%(name)s(%(arglist)s);
return 0;"""
else:
piece += """
return %(name)s(%(arglist)s);"""
thunk_content += piece % dict(j=j, I_type=I_type, T_type=T_type,
I_typenum=I_typenum, T_typenum=T_typenum,
arglist=arglist, name=name)
thunk_content += """
default:
throw std::runtime_error("internal error: invalid argument typenums");
}"""
thunk_code = THUNK_TEMPLATE % dict(name=name,
thunk_content=thunk_content)
# Generate method code
method_code = METHOD_TEMPLATE % dict(name=name,
ret_spec=ret_spec,
arg_spec=arg_spec)
return thunk_code, method_code
def main():
p = optparse.OptionParser(usage=(__doc__ or '').strip())
p.add_option("--no-force", action="store_false",
dest="force", default=True)
options, args = p.parse_args()
names = []
i_types, it_types, getter_code = get_thunk_type_set()
# Generate *_impl.h for each compilation unit
for unit_name, routines in COMPILATION_UNITS:
thunks = []
methods = []
# Generate thunks and methods for all routines
for line in routines.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
try:
name, args = line.split(None, 1)
except ValueError as e:
raise ValueError("Malformed line: %r" % (line,)) from e
args = "".join(args.split())
if 't' in args or 'T' in args:
thunk, method = parse_routine(name, args, it_types)
else:
thunk, method = parse_routine(name, args, i_types)
if name in names:
raise ValueError("Duplicate routine %r" % (name,))
names.append(name)
thunks.append(thunk)
methods.append(method)
# Produce output
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
unit_name + '_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(getter_code)
for thunk in thunks:
f.write(thunk)
for method in methods:
f.write(method)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
# Generate code for method struct
method_defs = ""
for name in names:
method_defs += "NPY_VISIBILITY_HIDDEN PyObject *%s_method(PyObject *, PyObject *);\n" % (name,)
method_struct = """\nstatic struct PyMethodDef sparsetools_methods[] = {"""
for name in names:
method_struct += """
{"%(name)s", (PyCFunction)%(name)s_method, METH_VARARGS, NULL},""" % dict(name=name)
method_struct += """
{NULL, NULL, 0, NULL}
};"""
# Produce sparsetools_impl.h
dst = os.path.join(os.path.dirname(__file__),
'sparsetools',
'sparsetools_impl.h')
if newer(__file__, dst) or options.force:
print("[generate_sparsetools] generating %r" % (dst,))
with open(dst, 'w') as f:
write_autogen_blurb(f)
f.write(method_defs)
f.write(method_struct)
else:
print("[generate_sparsetools] %r already up-to-date" % (dst,))
def write_autogen_blurb(stream):
stream.write("""\
/* This file is autogenerated by generate_sparsetools.py
* Do not edit manually or check into VCS.
*/
""")
if __name__ == "__main__":
main()
| anntzer/scipy | scipy/sparse/_generate_sparsetools.py | Python | bsd-3-clause | 12,609 |
# Test generation of a WPS request from input arguments.
# The specific request involves a FeatureWeightedGridStatisticsAlgorithm process over a WFS feature.
from tests.utils import resource_file, compare_xml
from owslib.wps import WPSExecution, WFSFeatureCollection, WFSQuery
from owslib.etree import etree
def test_wps_request2():
# Supply process input argument
wfsUrl = "http://igsarm-cida-gdp2.er.usgs.gov:8082/geoserver/wfs"
query = WFSQuery("sample:CONUS_States",
propertyNames=['the_geom', "STATE"],
filters=["CONUS_States.508", "CONUS_States.469"])
featureCollection = WFSFeatureCollection(wfsUrl, query)
processid = 'gov.usgs.cida.gdp.wps.algorithm.FeatureWeightedGridStatisticsAlgorithm'
inputs = [("FEATURE_ATTRIBUTE_NAME", "STATE"),
("DATASET_URI", "dods://igsarm-cida-thredds1.er.usgs.gov:8080/thredds/dodsC/dcp/conus_grid.w_meta.ncml"),
("DATASET_ID", "ccsm3_a1b_tmax"),
("DATASET_ID", "ccsm3_a1b_pr"),
("DATASET_ID", "ccsm3_a1fi_tmax"),
("TIME_START", "1960-01-01T00:00:00.000Z"),
("TIME_END", "1960-12-31T00:00:00.000Z"),
("REQUIRE_FULL_COVERAGE", "true"),
("DELIMITER", "COMMA"),
("STATISTICS", "MEAN"),
("STATISTICS", "MINIMUM"),
("STATISTICS", "MAXIMUM"),
("STATISTICS", "WEIGHT_SUM"),
("STATISTICS", "VARIANCE"),
("STATISTICS", "STD_DEV"),
("STATISTICS", "COUNT"),
("GROUP_BY", "STATISTIC"),
("SUMMARIZE_TIMESTEP", "true"),
("SUMMARIZE_FEATURE_ATTRIBUTE", "true"),
("FEATURE_COLLECTION", featureCollection)
]
output = "OUTPUT"
# build XML request for WPS process execution
execution = WPSExecution()
requestElement = execution.buildRequest(processid, inputs, output=[(output, True)])
request = etree.tostring(requestElement)
# Compare to cached XML request
_request = open(resource_file('wps_USGSExecuteRequest2.xml'), 'rb').read()
assert compare_xml(request, _request) is True
| tomkralidis/OWSLib | tests/test_wps_request2.py | Python | bsd-3-clause | 2,182 |
# coding: utf-8
import numpy as np
from ._draw import _coords_inside_image
def _ellipse_in_shape(shape, center, radiuses):
"""Generate coordinates of points within ellipse bounded by shape."""
y, x = np.ogrid[0:float(shape[0]), 0:float(shape[1])]
cy, cx = center
ry, rx = radiuses
distances = ((y - cy) / ry) ** 2 + ((x - cx) / rx) ** 2
return np.nonzero(distances < 1)
def ellipse(cy, cx, yradius, xradius, shape=None):
"""Generate coordinates of pixels within ellipse.
Parameters
----------
cy, cx : double
Centre coordinate of ellipse.
yradius, xradius : double
Minor and major semi-axes. ``(x/xradius)**2 + (y/yradius)**2 = 1``.
shape : tuple, optional
Image shape which is used to determine the maximum extent of output pixel
coordinates. This is useful for ellipses which exceed the image size.
By default the full extent of the ellipse are used.
Returns
-------
rr, cc : ndarray of int
Pixel coordinates of ellipse.
May be used to directly index into an array, e.g.
``img[rr, cc] = 1``.
Examples
--------
>>> from skimage.draw import ellipse
>>> img = np.zeros((10, 10), dtype=np.uint8)
>>> rr, cc = ellipse(5, 5, 3, 4)
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
center = np.array([cy, cx])
radiuses = np.array([yradius, xradius])
if shape is not None:
return _ellipse_in_shape(shape, center, radiuses)
else:
# rounding here is necessary to avoid rounding issues later
upper_left = np.floor(center - radiuses).astype(int)
shifted_center = center - upper_left
# Shifted center is in interval [radiuses, radiuses + 1], so
# the ellipse must fit in [0, 2*radiuses + 1].
bounding_shape = np.ceil(2 * radiuses + 1)
rr, cc = _ellipse_in_shape(bounding_shape, shifted_center, radiuses)
rr.flags.writeable = True
cc.flags.writeable = True
rr += upper_left[0]
cc += upper_left[1]
return rr, cc
def circle(cy, cx, radius, shape=None):
"""Generate coordinates of pixels within circle.
Parameters
----------
cy, cx : double
Centre coordinate of circle.
radius: double
Radius of circle.
shape : tuple, optional
Image shape which is used to determine the maximum extent of output pixel
coordinates. This is useful for circles which exceed the image size.
By default the full extent of the circle are used.
Returns
-------
rr, cc : ndarray of int
Pixel coordinates of circle.
May be used to directly index into an array, e.g.
``img[rr, cc] = 1``.
Notes
-----
This function is a wrapper for skimage.draw.ellipse()
Examples
--------
>>> from skimage.draw import circle
>>> img = np.zeros((10, 10), dtype=np.uint8)
>>> rr, cc = circle(4, 4, 5)
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
return ellipse(cy, cx, radius, radius, shape)
def set_color(img, coords, color):
"""Set pixel color in the image at the given coordinates.
Coordinates that exceed the shape of the image will be ignored.
Parameters
----------
img : (M, N, D) ndarray
Image
coords : ((P,) ndarray, (P,) ndarray)
Coordinates of pixels to be colored.
color : (D,) ndarray
Color to be assigned to coordinates in the image.
Returns
-------
img : (M, N, D) ndarray
The updated image.
Examples
--------
>>> from skimage.draw import line, set_color
>>> img = np.zeros((10, 10), dtype=np.uint8)
>>> rr, cc = line(1, 1, 20, 20)
>>> set_color(img, (rr, cc), 1)
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]], dtype=uint8)
"""
rr, cc = coords
rr, cc = _coords_inside_image(rr, cc, img.shape)
img[rr, cc] = color
| Britefury/scikit-image | skimage/draw/draw.py | Python | bsd-3-clause | 5,187 |
# Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 3].values
# Taking care of missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 0] = labelencoder_X.fit_transform(X[:, 0])
onehotencoder = OneHotEncoder(categorical_features = [0])
X = onehotencoder.fit_transform(X).toarray()
# Encoding the Dependent Variable
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y) | xavialex/Deep-Learning-Templates | Volume 1 - Supervised Deep Learning/Part 1 - Artificial Neural Networks (ANN)/Section 4 - Building an ANN/categorical_data.py | Python | mit | 859 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import ServiceClient
from msrest import Serializer, Deserializer
from msrestazure import AzureConfiguration
from .version import VERSION
from .operations.xms_client_request_id_operations import XMsClientRequestIdOperations
from .operations.subscription_in_credentials_operations import SubscriptionInCredentialsOperations
from .operations.subscription_in_method_operations import SubscriptionInMethodOperations
from .operations.api_version_default_operations import ApiVersionDefaultOperations
from .operations.api_version_local_operations import ApiVersionLocalOperations
from .operations.skip_url_encoding_operations import SkipUrlEncodingOperations
from .operations.odata_operations import OdataOperations
from .operations.header_operations import HeaderOperations
from . import models
class AutoRestAzureSpecialParametersTestClientConfiguration(AzureConfiguration):
"""Configuration for AutoRestAzureSpecialParametersTestClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The subscription id, which appears in the path,
always modeled in credentials. The value is always '1234-5678-9012-3456'
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not isinstance(subscription_id, str):
raise TypeError("Parameter 'subscription_id' must be str.")
if not base_url:
base_url = 'http://localhost'
super(AutoRestAzureSpecialParametersTestClientConfiguration, self).__init__(base_url)
self.add_user_agent('autorestazurespecialparameterstestclient/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
class AutoRestAzureSpecialParametersTestClient(object):
"""Test Infrastructure for AutoRest
:ivar config: Configuration for client.
:vartype config: AutoRestAzureSpecialParametersTestClientConfiguration
:ivar xms_client_request_id: XMsClientRequestId operations
:vartype xms_client_request_id: fixtures.acceptancetestsazurespecials.operations.XMsClientRequestIdOperations
:ivar subscription_in_credentials: SubscriptionInCredentials operations
:vartype subscription_in_credentials: fixtures.acceptancetestsazurespecials.operations.SubscriptionInCredentialsOperations
:ivar subscription_in_method: SubscriptionInMethod operations
:vartype subscription_in_method: fixtures.acceptancetestsazurespecials.operations.SubscriptionInMethodOperations
:ivar api_version_default: ApiVersionDefault operations
:vartype api_version_default: fixtures.acceptancetestsazurespecials.operations.ApiVersionDefaultOperations
:ivar api_version_local: ApiVersionLocal operations
:vartype api_version_local: fixtures.acceptancetestsazurespecials.operations.ApiVersionLocalOperations
:ivar skip_url_encoding: SkipUrlEncoding operations
:vartype skip_url_encoding: fixtures.acceptancetestsazurespecials.operations.SkipUrlEncodingOperations
:ivar odata: Odata operations
:vartype odata: fixtures.acceptancetestsazurespecials.operations.OdataOperations
:ivar header: Header operations
:vartype header: fixtures.acceptancetestsazurespecials.operations.HeaderOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: The subscription id, which appears in the path,
always modeled in credentials. The value is always '1234-5678-9012-3456'
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = AutoRestAzureSpecialParametersTestClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2015-07-01-preview'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.xms_client_request_id = XMsClientRequestIdOperations(
self._client, self.config, self._serialize, self._deserialize)
self.subscription_in_credentials = SubscriptionInCredentialsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.subscription_in_method = SubscriptionInMethodOperations(
self._client, self.config, self._serialize, self._deserialize)
self.api_version_default = ApiVersionDefaultOperations(
self._client, self.config, self._serialize, self._deserialize)
self.api_version_local = ApiVersionLocalOperations(
self._client, self.config, self._serialize, self._deserialize)
self.skip_url_encoding = SkipUrlEncodingOperations(
self._client, self.config, self._serialize, self._deserialize)
self.odata = OdataOperations(
self._client, self.config, self._serialize, self._deserialize)
self.header = HeaderOperations(
self._client, self.config, self._serialize, self._deserialize)
| lmazuel/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/AzureSpecials/fixtures/acceptancetestsazurespecials/auto_rest_azure_special_parameters_test_client.py | Python | mit | 6,304 |
# This is a virtual module that is entirely implemented server side
# Copyright: (c) 2012, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: raw
short_description: Executes a low-down and dirty command
version_added: historical
options:
free_form:
description:
- The raw module takes a free form command to run.
- There is no parameter actually named 'free form'; see the examples!
required: true
executable:
description:
- Change the shell used to execute the command. Should be an absolute path to the executable.
- When using privilege escalation (C(become)) a default shell will be assigned if one is not provided
as privilege escalation requires a shell.
version_added: "1.0"
description:
- Executes a low-down and dirty SSH command, not going through the module
subsystem.
- This is useful and should only be done in a few cases. A common
case is installing C(python) on a system without python installed by default.
Another is speaking to any devices such as
routers that do not have any Python installed. In any other case, using
the M(shell) or M(command) module is much more appropriate.
- Arguments given to C(raw) are run directly through the configured remote shell.
- Standard output, error output and return code are returned when
available.
- There is no change handler support for this module.
- This module does not require python on the remote system, much like
the M(script) module.
- This module is also supported for Windows targets.
notes:
- "If using raw from a playbook, you may need to disable fact gathering
using C(gather_facts: no) if you're using C(raw) to bootstrap python
onto the machine."
- If you want to execute a command securely and predictably, it may be
better to use the M(command) or M(shell) modules instead.
- The C(environment) keyword does not work with raw normally, it requires a shell
which means it only works if C(executable) is set or using the module
with privilege escalation (C(become)).
- This module is also supported for Windows targets.
seealso:
- module: command
- module: shell
- module: win_command
- module: win_shell
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Bootstrap a host without python2 installed
raw: dnf install -y python2 python2-dnf libselinux-python
- name: Run a command that uses non-posix shell-isms (in this example /bin/sh doesn't handle redirection and wildcards together but bash does)
raw: cat < /tmp/*txt
args:
executable: /bin/bash
- name: Safely use templated variables. Always use quote filter to avoid injection issues.
raw: "{{ package_mgr|quote }} {{ pkg_flags|quote }} install {{ python|quote }}"
- name: List user accounts on a Windows system
raw: Get-WmiObject -Class Win32_UserAccount
'''
| indrajitr/ansible | lib/ansible/modules/raw.py | Python | gpl-3.0 | 3,103 |
# -*- test-case-name: twisted.web.test.test_httpauth -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
A guard implementation which supports HTTP header-based authentication
schemes.
If no I{Authorization} header is supplied, an anonymous login will be
attempted by using a L{Anonymous} credentials object. If such a header is
supplied and does not contain allowed credentials, or if anonymous login is
denied, a 401 will be sent in the response along with I{WWW-Authenticate}
headers for each of the allowed authentication schemes.
"""
from __future__ import division, absolute_import
from zope.interface import implementer
from twisted.python import log
from twisted.python.components import proxyForInterface
from twisted.python.compat import networkString
from twisted.web.resource import IResource, ErrorPage
from twisted.web import util
from twisted.cred import error
from twisted.cred.credentials import Anonymous
@implementer(IResource)
class UnauthorizedResource(object):
"""
Simple IResource to escape Resource dispatch
"""
isLeaf = True
def __init__(self, factories):
self._credentialFactories = factories
def render(self, request):
"""
Send www-authenticate headers to the client
"""
def generateWWWAuthenticate(scheme, challenge):
l = []
for k,v in challenge.items():
l.append(networkString("%s=%s" % (k, quoteString(v))))
return b" ".join([scheme, b", ".join(l)])
def quoteString(s):
return '"%s"' % (s.replace('\\', '\\\\').replace('"', '\\"'),)
request.setResponseCode(401)
for fact in self._credentialFactories:
challenge = fact.getChallenge(request)
request.responseHeaders.addRawHeader(
b'www-authenticate',
generateWWWAuthenticate(fact.scheme, challenge))
if request.method == b'HEAD':
return b''
return b'Unauthorized'
def getChildWithDefault(self, path, request):
"""
Disable resource dispatch
"""
return self
@implementer(IResource)
class HTTPAuthSessionWrapper(object):
"""
Wrap a portal, enforcing supported header-based authentication schemes.
@ivar _portal: The L{Portal} which will be used to retrieve L{IResource}
avatars.
@ivar _credentialFactories: A list of L{ICredentialFactory} providers which
will be used to decode I{Authorization} headers into L{ICredentials}
providers.
"""
isLeaf = False
def __init__(self, portal, credentialFactories):
"""
Initialize a session wrapper
@type portal: C{Portal}
@param portal: The portal that will authenticate the remote client
@type credentialFactories: C{Iterable}
@param credentialFactories: The portal that will authenticate the
remote client based on one submitted C{ICredentialFactory}
"""
self._portal = portal
self._credentialFactories = credentialFactories
def _authorizedResource(self, request):
"""
Get the L{IResource} which the given request is authorized to receive.
If the proper authorization headers are present, the resource will be
requested from the portal. If not, an anonymous login attempt will be
made.
"""
authheader = request.getHeader(b'authorization')
if not authheader:
return util.DeferredResource(self._login(Anonymous()))
factory, respString = self._selectParseHeader(authheader)
if factory is None:
return UnauthorizedResource(self._credentialFactories)
try:
credentials = factory.decode(respString, request)
except error.LoginFailed:
return UnauthorizedResource(self._credentialFactories)
except:
log.err(None, "Unexpected failure from credentials factory")
return ErrorPage(500, None, None)
else:
return util.DeferredResource(self._login(credentials))
def render(self, request):
"""
Find the L{IResource} avatar suitable for the given request, if
possible, and render it. Otherwise, perhaps render an error page
requiring authorization or describing an internal server failure.
"""
return self._authorizedResource(request).render(request)
def getChildWithDefault(self, path, request):
"""
Inspect the Authorization HTTP header, and return a deferred which,
when fired after successful authentication, will return an authorized
C{Avatar}. On authentication failure, an C{UnauthorizedResource} will
be returned, essentially halting further dispatch on the wrapped
resource and all children
"""
# Don't consume any segments of the request - this class should be
# transparent!
request.postpath.insert(0, request.prepath.pop())
return self._authorizedResource(request)
def _login(self, credentials):
"""
Get the L{IResource} avatar for the given credentials.
@return: A L{Deferred} which will be called back with an L{IResource}
avatar or which will errback if authentication fails.
"""
d = self._portal.login(credentials, None, IResource)
d.addCallbacks(self._loginSucceeded, self._loginFailed)
return d
def _loginSucceeded(self, args):
"""
Handle login success by wrapping the resulting L{IResource} avatar
so that the C{logout} callback will be invoked when rendering is
complete.
"""
interface, avatar, logout = args
class ResourceWrapper(proxyForInterface(IResource, 'resource')):
"""
Wrap an L{IResource} so that whenever it or a child of it
completes rendering, the cred logout hook will be invoked.
An assumption is made here that exactly one L{IResource} from
among C{avatar} and all of its children will be rendered. If
more than one is rendered, C{logout} will be invoked multiple
times and probably earlier than desired.
"""
def getChildWithDefault(self, name, request):
"""
Pass through the lookup to the wrapped resource, wrapping
the result in L{ResourceWrapper} to ensure C{logout} is
called when rendering of the child is complete.
"""
return ResourceWrapper(self.resource.getChildWithDefault(name, request))
def render(self, request):
"""
Hook into response generation so that when rendering has
finished completely (with or without error), C{logout} is
called.
"""
request.notifyFinish().addBoth(lambda ign: logout())
return super(ResourceWrapper, self).render(request)
return ResourceWrapper(avatar)
def _loginFailed(self, result):
"""
Handle login failure by presenting either another challenge (for
expected authentication/authorization-related failures) or a server
error page (for anything else).
"""
if result.check(error.Unauthorized, error.LoginFailed):
return UnauthorizedResource(self._credentialFactories)
else:
log.err(
result,
"HTTPAuthSessionWrapper.getChildWithDefault encountered "
"unexpected error")
return ErrorPage(500, None, None)
def _selectParseHeader(self, header):
"""
Choose an C{ICredentialFactory} from C{_credentialFactories}
suitable to use to decode the given I{Authenticate} header.
@return: A two-tuple of a factory and the remaining portion of the
header value to be decoded or a two-tuple of C{None} if no
factory can decode the header value.
"""
elements = header.split(b' ')
scheme = elements[0].lower()
for fact in self._credentialFactories:
if fact.scheme == scheme:
return (fact, b' '.join(elements[1:]))
return (None, None)
| Architektor/PySnip | venv/lib/python2.7/site-packages/twisted/web/_auth/wrapper.py | Python | gpl-3.0 | 8,356 |
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import tempfile
from ._fsnative import path2fsn, fsnative
def gettempdir():
"""
Returns:
`fsnative`
Like :func:`python3:tempfile.gettempdir`, but always returns a `fsnative`
path
"""
# FIXME: I don't want to reimplement all that logic, reading env vars etc.
# At least for the default it works.
return path2fsn(tempfile.gettempdir())
def gettempprefix():
"""
Returns:
`fsnative`
Like :func:`python3:tempfile.gettempprefix`, but always returns a
`fsnative` path
"""
return path2fsn(tempfile.gettempprefix())
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
"""
Args:
suffix (`pathlike` or `None`): suffix or `None` to use the default
prefix (`pathlike` or `None`): prefix or `None` to use the default
dir (`pathlike` or `None`): temp dir or `None` to use the default
text (bool): if the file should be opened in text mode
Returns:
Tuple[`int`, `fsnative`]:
A tuple containing the file descriptor and the file path
Raises:
EnvironmentError
Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative`
path.
"""
suffix = fsnative() if suffix is None else path2fsn(suffix)
prefix = gettempprefix() if prefix is None else path2fsn(prefix)
dir = gettempdir() if dir is None else path2fsn(dir)
return tempfile.mkstemp(suffix, prefix, dir, text)
def mkdtemp(suffix=None, prefix=None, dir=None):
"""
Args:
suffix (`pathlike` or `None`): suffix or `None` to use the default
prefix (`pathlike` or `None`): prefix or `None` to use the default
dir (`pathlike` or `None`): temp dir or `None` to use the default
Returns:
`fsnative`: A path to a directory
Raises:
EnvironmentError
Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative` path.
"""
suffix = fsnative() if suffix is None else path2fsn(suffix)
prefix = gettempprefix() if prefix is None else path2fsn(prefix)
dir = gettempdir() if dir is None else path2fsn(dir)
return tempfile.mkdtemp(suffix, prefix, dir)
| nwokeo/supysonic | venv/lib/python2.7/site-packages/mutagen/_senf/_temp.py | Python | agpl-3.0 | 2,805 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'fuel_agent.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default
| zhaochao/fuel-web | fuel_agent/fuel_agent/openstack/common/importutils.py | Python | apache-2.0 | 2,365 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.version import version
class GoogleCloudStorageCreateBucketOperator(BaseOperator):
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace,
so you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket. (templated)
:type bucket_name: str
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage (templated). Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket. (templated)
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso::
https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the GCP Project. (templated)
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must
have domain-wide delegation enabled.
:type delegate_to: str
**Example**:
The following Operator would create a new bucket ``test-bucket``
with ``MULTI_REGIONAL`` storage class in ``EU`` region ::
CreateBucket = GoogleCloudStorageCreateBucketOperator(
task_id='CreateNewBucket',
bucket_name='test-bucket',
storage_class='MULTI_REGIONAL',
location='EU',
labels={'env': 'dev', 'team': 'airflow'},
google_cloud_storage_conn_id='airflow-service-account'
)
"""
template_fields = ('bucket_name', 'storage_class',
'location', 'project_id')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
bucket_name,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None,
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageCreateBucketOperator, self).__init__(*args, **kwargs)
self.bucket_name = bucket_name
self.storage_class = storage_class
self.location = location
self.project_id = project_id
self.labels = labels
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
if self.labels is not None:
self.labels.update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')}
)
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to
)
hook.create_bucket(bucket_name=self.bucket_name,
storage_class=self.storage_class,
location=self.location,
project_id=self.project_id,
labels=self.labels)
| adamhaney/airflow | airflow/contrib/operators/gcs_operator.py | Python | apache-2.0 | 4,816 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Generate captions for images using default beam search parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import tensorflow as tf
from im2txt import configuration
from im2txt import inference_wrapper
from im2txt.inference_utils import caption_generator
from im2txt.inference_utils import vocabulary
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_path", "",
"Model checkpoint file or directory containing a "
"model checkpoint file.")
tf.flags.DEFINE_string("vocab_file", "", "Text file containing the vocabulary.")
tf.flags.DEFINE_string("input_files", "",
"File pattern or comma-separated list of file patterns "
"of image files.")
def main(_):
# Build the inference graph.
g = tf.Graph()
with g.as_default():
model = inference_wrapper.InferenceWrapper()
restore_fn = model.build_graph_from_config(configuration.ModelConfig(),
FLAGS.checkpoint_path)
g.finalize()
# Create the vocabulary.
vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
filenames = []
for file_pattern in FLAGS.input_files.split(","):
filenames.extend(tf.gfile.Glob(file_pattern))
tf.logging.info("Running caption generation on %d files matching %s",
len(filenames), FLAGS.input_files)
with tf.Session(graph=g) as sess:
# Load the model from checkpoint.
restore_fn(sess)
# Prepare the caption generator. Here we are implicitly using the default
# beam search parameters. See caption_generator.py for a description of the
# available beam search parameters.
generator = caption_generator.CaptionGenerator(model, vocab)
for filename in filenames:
with tf.gfile.GFile(filename, "r") as f:
image = f.read()
captions = generator.beam_search(sess, image)
print("Captions for image %s:" % os.path.basename(filename))
for i, caption in enumerate(captions):
# Ignore begin and end words.
sentence = [vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
print(" %d) %s (p=%f)" % (i, sentence, math.exp(caption.logprob)))
if __name__ == "__main__":
tf.app.run()
| srome/jacksearch | im2txt/run_inference.py | Python | apache-2.0 | 3,047 |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
# If you are using a code grep query service and want to resolve
# certain global symbols to local directories,
# add them as REPOS below. We will essentially replace a global
# match against something like:
# www/myFile.py
# to:
# ~/www/myFile.py
REPOS = ['www']
| Shenil/PathPicker | src/repos.py | Python | bsd-3-clause | 566 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import os.path
import re
class ParseException(Exception):
"""Thrown when data in the model is invalid.
"""
def __init__(self, parent, message):
hierarchy = _GetModelHierarchy(parent)
hierarchy.append(message)
Exception.__init__(
self, 'Model parse exception at:\n' + '\n'.join(hierarchy))
class Model(object):
"""Model of all namespaces that comprise an API.
Properties:
- |namespaces| a map of a namespace name to its model.Namespace
"""
def __init__(self):
self.namespaces = {}
def AddNamespace(self, json, source_file):
"""Add a namespace's json to the model and returns the namespace.
"""
namespace = Namespace(json, source_file)
self.namespaces[namespace.name] = namespace
return namespace
class Namespace(object):
"""An API namespace.
Properties:
- |name| the name of the namespace
- |unix_name| the unix_name of the namespace
- |source_file| the file that contained the namespace definition
- |source_file_dir| the directory component of |source_file|
- |source_file_filename| the filename component of |source_file|
- |types| a map of type names to their model.Type
- |functions| a map of function names to their model.Function
- |properties| a map of property names to their model.Property
"""
def __init__(self, json, source_file):
self.name = json['namespace']
self.unix_name = _UnixName(self.name)
self.source_file = source_file
self.source_file_dir, self.source_file_filename = os.path.split(source_file)
self.parent = None
_AddTypes(self, json)
_AddFunctions(self, json)
_AddProperties(self, json)
class Type(object):
"""A Type defined in the json.
Properties:
- |name| the type name
- |description| the description of the type (if provided)
- |properties| a map of property unix_names to their model.Property
- |functions| a map of function names to their model.Function
- |from_client| indicates that instances of the Type can originate from the
users of generated code, such as top-level types and function results
- |from_json| indicates that instances of the Type can originate from the
JSON (as described by the schema), such as top-level types and function
parameters
- |type_| the PropertyType of this Type
- |item_type| if this is an array, the type of items in the array
"""
def __init__(self, parent, name, json):
if json.get('type') == 'array':
self.type_ = PropertyType.ARRAY
self.item_type = Property(self, name + "Element", json['items'],
from_json=True,
from_client=True)
elif json.get('type') == 'string':
self.type_ = PropertyType.STRING
else:
if not (
'properties' in json or
'additionalProperties' in json or
'functions' in json):
raise ParseException(self, name + " has no properties or functions")
self.type_ = PropertyType.OBJECT
self.name = name
self.description = json.get('description')
self.from_json = True
self.from_client = True
self.parent = parent
_AddFunctions(self, json)
_AddProperties(self, json, from_json=True, from_client=True)
additional_properties_key = 'additionalProperties'
additional_properties = json.get(additional_properties_key)
if additional_properties:
self.properties[additional_properties_key] = Property(
self,
additional_properties_key,
additional_properties,
is_additional_properties=True)
class Callback(object):
"""A callback parameter to a Function.
Properties:
- |params| the parameters to this callback.
"""
def __init__(self, parent, json):
params = json['parameters']
self.parent = parent
self.params = []
if len(params) == 0:
return
elif len(params) == 1:
param = params[0]
self.params.append(Property(self, param['name'], param,
from_client=True))
else:
raise ParseException(
self,
"Callbacks can have at most a single parameter")
class Function(object):
"""A Function defined in the API.
Properties:
- |name| the function name
- |params| a list of parameters to the function (order matters). A separate
parameter is used for each choice of a 'choices' parameter.
- |description| a description of the function (if provided)
- |callback| the callback parameter to the function. There should be exactly
one
"""
def __init__(self, parent, json):
self.name = json['name']
self.params = []
self.description = json.get('description')
self.callback = None
self.parent = parent
for param in json['parameters']:
if param.get('type') == 'function':
if self.callback:
raise ParseException(self, self.name + " has more than one callback")
self.callback = Callback(self, param)
else:
self.params.append(Property(self, param['name'], param,
from_json=True))
class Property(object):
"""A property of a type OR a parameter to a function.
Properties:
- |name| name of the property as in the json. This shouldn't change since
it is the key used to access DictionaryValues
- |unix_name| the unix_style_name of the property. Used as variable name
- |optional| a boolean representing whether the property is optional
- |description| a description of the property (if provided)
- |type_| the model.PropertyType of this property
- |ref_type| the type that the REF property is referencing. Can be used to
map to its model.Type
- |item_type| a model.Property representing the type of each element in an
ARRAY
- |properties| the properties of an OBJECT parameter
"""
def __init__(self, parent, name, json, is_additional_properties=False,
from_json=False, from_client=False):
"""
Parameters:
- |from_json| indicates that instances of the Type can originate from the
JSON (as described by the schema), such as top-level types and function
parameters
- |from_client| indicates that instances of the Type can originate from the
users of generated code, such as top-level types and function results
"""
self.name = name
self._unix_name = _UnixName(self.name)
self._unix_name_used = False
self.optional = json.get('optional', False)
self.has_value = False
self.description = json.get('description')
self.parent = parent
_AddProperties(self, json)
if is_additional_properties:
self.type_ = PropertyType.ADDITIONAL_PROPERTIES
elif '$ref' in json:
self.ref_type = json['$ref']
self.type_ = PropertyType.REF
elif 'enum' in json:
self.enum_values = []
for value in json['enum']:
self.enum_values.append(value)
self.type_ = PropertyType.ENUM
elif 'type' in json:
json_type = json['type']
if json_type == 'string':
self.type_ = PropertyType.STRING
elif json_type == 'any':
self.type_ = PropertyType.ANY
elif json_type == 'boolean':
self.type_ = PropertyType.BOOLEAN
elif json_type == 'integer':
self.type_ = PropertyType.INTEGER
elif json_type == 'number':
self.type_ = PropertyType.DOUBLE
elif json_type == 'array':
self.item_type = Property(self, name + "Element", json['items'],
from_json=from_json,
from_client=from_client)
self.type_ = PropertyType.ARRAY
elif json_type == 'object':
self.type_ = PropertyType.OBJECT
# These members are read when this OBJECT Property is used as a Type
self.from_json = from_json
self.from_client = from_client
type_ = Type(self, self.name, json)
# self.properties will already have some value from |_AddProperties|.
self.properties.update(type_.properties)
self.functions = type_.functions
elif json_type == 'binary':
self.type_ = PropertyType.BINARY
else:
raise ParseException(self, 'type ' + json_type + ' not recognized')
elif 'choices' in json:
if not json['choices']:
raise ParseException(self, 'Choices has no choices')
self.choices = {}
self.type_ = PropertyType.CHOICES
for choice_json in json['choices']:
choice = Property(self, self.name, choice_json,
from_json=from_json,
from_client=from_client)
# A choice gets its unix_name set in
# cpp_type_generator.GetExpandedChoicesInParams
choice._unix_name = None
# The existence of any single choice is optional
choice.optional = True
self.choices[choice.type_] = choice
elif 'value' in json:
self.has_value = True
self.value = json['value']
if type(self.value) == int:
self.type_ = PropertyType.INTEGER
else:
# TODO(kalman): support more types as necessary.
raise ParseException(
self, '"%s" is not a supported type' % type(self.value))
else:
raise ParseException(
self, 'Property has no type, $ref, choices, or value')
def GetUnixName(self):
"""Gets the property's unix_name. Raises AttributeError if not set.
"""
if not self._unix_name:
raise AttributeError('No unix_name set on %s' % self.name)
self._unix_name_used = True
return self._unix_name
def SetUnixName(self, unix_name):
"""Set the property's unix_name. Raises AttributeError if the unix_name has
already been used (GetUnixName has been called).
"""
if unix_name == self._unix_name:
return
if self._unix_name_used:
raise AttributeError(
'Cannot set the unix_name on %s; '
'it is already used elsewhere as %s' %
(self.name, self._unix_name))
self._unix_name = unix_name
def Copy(self):
"""Makes a copy of this model.Property object and allow the unix_name to be
set again.
"""
property_copy = copy.copy(self)
property_copy._unix_name_used = False
return property_copy
unix_name = property(GetUnixName, SetUnixName)
class PropertyType(object):
"""Enum of different types of properties/parameters.
"""
class _Info(object):
def __init__(self, is_fundamental, name):
self.is_fundamental = is_fundamental
self.name = name
def __repr__(self):
return self.name
INTEGER = _Info(True, "INTEGER")
DOUBLE = _Info(True, "DOUBLE")
BOOLEAN = _Info(True, "BOOLEAN")
STRING = _Info(True, "STRING")
ENUM = _Info(False, "ENUM")
ARRAY = _Info(False, "ARRAY")
REF = _Info(False, "REF")
CHOICES = _Info(False, "CHOICES")
OBJECT = _Info(False, "OBJECT")
BINARY = _Info(False, "BINARY")
ANY = _Info(False, "ANY")
ADDITIONAL_PROPERTIES = _Info(False, "ADDITIONAL_PROPERTIES")
def _UnixName(name):
"""Returns the unix_style name for a given lowerCamelCase string.
"""
# First replace any lowerUpper patterns with lower_Upper.
s1 = re.sub('([a-z])([A-Z])', r'\1_\2', name)
# Now replace any ACMEWidgets patterns with ACME_Widgets
s2 = re.sub('([A-Z]+)([A-Z][a-z])', r'\1_\2', s1)
# Finally, replace any remaining periods, and make lowercase.
return s2.replace('.', '_').lower()
def _GetModelHierarchy(entity):
"""Returns the hierarchy of the given model entity."""
hierarchy = []
while entity:
try:
hierarchy.append(entity.name)
except AttributeError:
hierarchy.append(repr(entity))
entity = entity.parent
hierarchy.reverse()
return hierarchy
def _AddTypes(model, json):
"""Adds Type objects to |model| contained in the 'types' field of |json|.
"""
model.types = {}
for type_json in json.get('types', []):
type_ = Type(model, type_json['id'], type_json)
model.types[type_.name] = type_
def _AddFunctions(model, json):
"""Adds Function objects to |model| contained in the 'types' field of |json|.
"""
model.functions = {}
for function_json in json.get('functions', []):
function = Function(model, function_json)
model.functions[function.name] = function
def _AddProperties(model, json, from_json=False, from_client=False):
"""Adds model.Property objects to |model| contained in the 'properties' field
of |json|.
"""
model.properties = {}
for name, property_json in json.get('properties', {}).items():
# TODO(calamity): support functions (callbacks) as properties. The model
# doesn't support it yet because the h/cc generators don't -- this is
# because we'd need to hook it into a base::Callback or something.
#
# However, pragmatically it's not necessary to support them anyway, since
# the instances of functions-on-properties in the extension APIs are all
# handled in pure Javascript on the render process (and .: never reach
# C++ let alone the browser).
if property_json.get('type') == 'function':
continue
model.properties[name] = Property(
model,
name,
property_json,
from_json=from_json,
from_client=from_client)
| gx1997/chrome-loongson | tools/json_schema_compiler/model.py | Python | bsd-3-clause | 13,227 |
# -*- coding: utf-8 -*-
"""
wechatpy.client.jsapi
~~~~~~~~~~~~~~~~~~~~
This module provides some APIs for JS SDK
:copyright: (c) 2014 by messense.
:license: MIT, see LICENSE for more details.
"""
from __future__ import absolute_import, unicode_literals
import time
from wechatpy.utils import WeChatSigner
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatJSAPI(BaseWeChatAPI):
def get_ticket(self, type='jsapi'):
"""
获取微信 JS-SDK ticket
:return: 返回的 JSON 数据包
"""
return self._get(
'ticket/getticket',
params={'type': type}
)
def get_jsapi_ticket(self):
"""
获取微信 JS-SDK ticket
该方法会通过 session 对象自动缓存管理 ticket
:return: ticket
"""
ticket = self.session.get('jsapi_ticket')
expires_at = self.session.get('jsapi_ticket_expires_at', 0)
if not ticket or expires_at < int(time.time()):
jsapi_ticket = self.get_ticket('jsapi')
ticket = jsapi_ticket['ticket']
expires_at = int(time.time()) + int(jsapi_ticket['expires_in'])
self.session.set('jsapi_ticket', ticket)
self.session.set('jsapi_ticket_expires_at', expires_at)
return ticket
def get_jsapi_signature(self, noncestr, ticket, timestamp, url):
data = [
'noncestr={noncestr}'.format(noncestr=noncestr),
'jsapi_ticket={ticket}'.format(ticket=ticket),
'timestamp={timestamp}'.format(timestamp=timestamp),
'url={url}'.format(url=url),
]
signer = WeChatSigner(delimiter=b'&')
signer.add_data(*data)
return signer.signature
| tdautc19841202/wechatpy | wechatpy/client/api/jsapi.py | Python | mit | 1,766 |
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_websvr
# Purpose: SpiderFoot plug-in for scanning retreived content by other
# modules (such as sfp_spider) and identifying web servers used
#
# Author: Steve Micallef <[email protected]>
#
# Created: 06/04/2012
# Copyright: (c) Steve Micallef 2012
# Licence: GPL
# -------------------------------------------------------------------------------
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_websvr(SpiderFootPlugin):
"""Web Server:Obtain web server banners to identify versions of web servers being used."""
# Default options
opts = {}
results = dict()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = dict()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["WEBSERVER_HTTPHEADERS"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["WEBSERVER_BANNER", "WEBSERVER_TECHNOLOGY"]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
parentEvent = event.sourceEvent
eventSource = event.sourceEvent.data
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
if eventSource in self.results:
return None
else:
self.results[eventSource] = True
if not self.getTarget().matches(self.sf.urlFQDN(eventSource)):
self.sf.debug("Not collecting web server information for external sites.")
return None
# Could apply some smarts here, for instance looking for certain
# banners and therefore classifying them further (type and version,
# possibly OS. This could also trigger additional tests, such as 404s
# and other errors to see what the header looks like.
if 'server' in eventData:
evt = SpiderFootEvent("WEBSERVER_BANNER", eventData['server'],
self.__name__, parentEvent)
self.notifyListeners(evt)
self.sf.info("Found web server: " + eventData['server'] + " (" + eventSource + ")")
if 'x-powered-by' in eventData:
evt = SpiderFootEvent("WEBSERVER_TECHNOLOGY", eventData['x-powered-by'],
self.__name__, parentEvent)
self.notifyListeners(evt)
return None
tech = None
if 'set-cookie'in eventData and 'PHPSESS' in eventData['set-cookie']:
tech = "PHP"
if 'set-cookie' in eventData and 'JSESSIONID' in eventData['set-cookie']:
tech = "Java/JSP"
if 'set-cookie' in eventData and 'ASP.NET' in eventData['set-cookie']:
tech = "ASP.NET"
if 'x-aspnet-version' in eventData:
tech = "ASP.NET"
if tech is not None and '.jsp' in eventSource:
tech = "Java/JSP"
if tech is not None and '.php' in eventSource:
tech = "PHP"
if tech is not None:
evt = SpiderFootEvent("WEBSERVER_TECHNOLOGY", tech, self.__name__, parentEvent)
self.notifyListeners(evt)
# End of sfp_websvr class
| Wingless-Archangel/spiderfoot | modules/sfp_websvr.py | Python | gpl-2.0 | 3,664 |
# Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Sending and formatting emails.
"""
import time
import re
import socket
import smtplib
import mimetypes
from getpass import getuser
from email import encoders
from email.mime.multipart import MIMEMultipart
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
from Exscript.util.event import Event
###########################################################
# Helpers. (non-public)
###########################################################
_varname_re = re.compile(r'[a-z][\w_]*', re.I)
_string_re = re.compile(r'(\\?){([\w_]+)}', re.I)
class _TemplateParser(object):
"""
This exists for backward compatibility; Python 2.3 does not come
with a similar way for string substitution yet.
"""
def __init__(self):
self.tmpl_vars = None
# Tokens that include variables in a string may use this callback to
# substitute the variable against its value.
def _variable_sub_cb(self, match):
escape = match.group(1)
varname = match.group(2)
if escape == '\\':
return '$' + varname
if not _varname_re.match(varname):
raise Exception('%s is not a variable name' % varname)
value = self.tmpl_vars.get(varname)
if value is None:
raise Exception('Undefined value for %s' % varname)
elif hasattr(value, '__iter__'):
value = '\n'.join([str(v) for v in value])
return str(value)
def parse(self, template, **kwargs):
self.tmpl_vars = kwargs
output = ''
for line in template.split('\n'):
if line.endswith(' '):
output += line
else:
output += line + '\n'
return _string_re.sub(self._variable_sub_cb, output)
def _render_template(string, **vars):
default = {'date': time.strftime('%Y-%m-%d'),
'user': getuser()}
default.update(vars)
parser = _TemplateParser()
return parser.parse(string, **default)
def _is_header_line(line):
return re.match(r'^\w+: .+$', line) is not None
def _get_var_from_header_line(line):
match = re.match(r'^(\w+): (.+)$', line)
return match.group(1).strip().lower(), match.group(2).strip()
def _cleanup_mail_addresses(receipients):
if not isinstance(receipients, str):
receipients = ','.join(receipients)
rcpt = re.split(r'\s*[,;]\s*', receipients.lower())
return [r for r in rcpt if r.strip()]
###########################################################
# Public.
###########################################################
class Mail(object):
"""
Represents an email.
"""
def __init__(self,
sender = None,
to = '',
cc = '',
bcc = '',
subject = '',
body = ''):
"""
Creates a new email with the given values.
If the given sender is None, one will be automatically chosen
using getpass.getuser().
@type sender: string
@param sender: The email address of the sender.
@type to: string|list(string)
@param to: A list of email addresses, passed to set_to().
@type cc: string|list(string)
@param cc: A list of email addresses, passed to set_cc().
@type bcc: string|list(string)
@param bcc: A list of email addresses, passed to set_bcc().
@type subject: string
@param subject: A subject line, passed to set_subject().
@type body: string
@param body: The email body, passed to set_body().
"""
self.changed_event = Event()
self.files = []
self.sender = None
self.cc = None
self.bcc = None
self.to = None
self.subject = None
self.body = None
if not sender:
domain = socket.getfqdn('localhost')
sender = getuser() + '@' + domain
self.set_sender(sender)
self.set_to(to)
self.set_cc(cc)
self.set_bcc(bcc)
self.set_subject(subject)
self.set_body(body)
def set_from_template_string(self, string):
"""
Reads the given template (SMTP formatted) and sets all fields
accordingly.
@type string: string
@param string: The template.
"""
in_header = True
body = ''
for line in string.split('\n'):
if not in_header:
body += line + '\n'
continue
if not _is_header_line(line):
body += line + '\n'
in_header = False
continue
key, value = _get_var_from_header_line(line)
if key == 'from':
self.set_sender(value)
elif key == 'to':
self.add_to(value)
elif key == 'cc':
self.add_cc(value)
elif key == 'bcc':
self.add_bcc(value)
elif key == 'subject':
self.set_subject(value)
else:
raise Exception('Invalid header field "%s"' % key)
self.set_body(body.strip())
def set_sender(self, sender):
"""
Defines the value of the "From:" field.
@type sender: string
@param sender: The email address of the sender.
"""
self.sender = sender
self.changed_event()
def get_sender(self):
"""
Returns the value of the "From:" field.
@rtype: string
@return: The email address of the sender.
"""
return self.sender
def set_to(self, to):
"""
Replaces the current list of receipients in the 'to' field by
the given value. The value may be one of the following:
- A list of strings (email addresses).
- A comma separated string containing one or more email addresses.
@type to: string|list(string)
@param to: The email addresses for the 'to' field.
"""
self.to = _cleanup_mail_addresses(to)
self.changed_event()
def add_to(self, to):
"""
Adds the given list of receipients to the 'to' field.
Accepts the same argument types as set_to().
@type to: string|list(string)
@param to: The list of email addresses.
"""
self.to += _cleanup_mail_addresses(to)
self.changed_event()
def get_to(self):
"""
Returns the value of the "to" field.
@rtype: list(string)
@return: The email addresses in the 'to' field.
"""
return self.to
def set_cc(self, cc):
"""
Like set_to(), but for the 'cc' field.
@type cc: string|list(string)
@param cc: The email addresses for the 'cc' field.
"""
self.cc = _cleanup_mail_addresses(cc)
self.changed_event()
def add_cc(self, cc):
"""
Like add_to(), but for the 'cc' field.
@type cc: string|list(string)
@param cc: The list of email addresses.
"""
self.cc += _cleanup_mail_addresses(cc)
self.changed_event()
def get_cc(self):
"""
Returns the value of the "cc" field.
@rtype: list(string)
@return: The email addresses in the 'cc' field.
"""
return self.cc
def set_bcc(self, bcc):
"""
Like set_to(), but for the 'bcc' field.
@type bcc: string|list(string)
@param bcc: The email addresses for the 'bcc' field.
"""
self.bcc = _cleanup_mail_addresses(bcc)
self.changed_event()
def add_bcc(self, bcc):
"""
Like add_to(), but for the 'bcc' field.
@type bcc: string|list(string)
@param bcc: The list of email addresses.
"""
self.bcc += _cleanup_mail_addresses(bcc)
self.changed_event()
def get_bcc(self):
"""
Returns the value of the "bcc" field.
@rtype: list(string)
@return: The email addresses in the 'bcc' field.
"""
return self.bcc
def get_receipients(self):
"""
Returns a list of all receipients (to, cc, and bcc).
@rtype: list(string)
@return: The email addresses of all receipients.
"""
return self.get_to() + self.get_cc() + self.get_bcc()
def set_subject(self, subject):
"""
Defines the subject line.
@type subject: string
@param subject: The new subject line.
"""
self.subject = subject
self.changed_event()
def get_subject(self):
"""
Returns the subject line.
@rtype: string
@return: The subject line.
"""
return self.subject
def set_body(self, body):
"""
Defines the body of the mail.
@type body: string
@param body: The new email body.
"""
self.body = body
self.changed_event()
def get_body(self):
"""
Returns the body of the mail.
@rtype: string
@return: The body of the mail.
"""
return self.body
def get_smtp_header(self):
"""
Returns the SMTP formatted header of the line.
@rtype: string
@return: The SMTP header.
"""
header = "From: %s\r\n" % self.get_sender()
header += "To: %s\r\n" % ',\r\n '.join(self.get_to())
header += "Cc: %s\r\n" % ',\r\n '.join(self.get_cc())
header += "Bcc: %s\r\n" % ',\r\n '.join(self.get_bcc())
header += "Subject: %s\r\n" % self.get_subject()
return header
def get_smtp_mail(self):
"""
Returns the SMTP formatted email, as it may be passed to sendmail.
@rtype: string
@return: The SMTP formatted mail.
"""
header = self.get_smtp_header()
body = self.get_body().replace('\n', '\r\n')
return header + '\r\n' + body + '\r\n'
def add_attachment(self, filename):
"""
Adds the file with the given name as an attachment.
@type filename: string
@param filename: A filename.
"""
self.files.append(filename)
def get_attachments(self):
"""
Returns a list of attached files.
@rtype: list[string]
@return: The list of filenames.
"""
return self.files
def from_template_string(string, **kwargs):
"""
Reads the given SMTP formatted template, and creates a new Mail object
using the information.
@type string: str
@param string: The SMTP formatted template.
@type kwargs: str
@param kwargs: Variables to replace in the template.
@rtype: Mail
@return: The resulting mail.
"""
tmpl = _render_template(string, **kwargs)
mail = Mail()
mail.set_from_template_string(tmpl)
return mail
def from_template(filename, **kwargs):
"""
Like from_template_string(), but reads the template from the file with
the given name instead.
@type filename: string
@param filename: The name of the template file.
@type kwargs: str
@param kwargs: Variables to replace in the template.
@rtype: Mail
@return: The resulting mail.
"""
tmpl = open(filename).read()
return from_template_string(tmpl, **kwargs)
def _get_mime_object(filename):
# Guess the content type based on the file's extension. Encoding
# is ignored, although we should check for simple things like
# gzip'd or compressed files.
ctype, encoding = mimetypes.guess_type(filename)
if ctype is None or encoding is not None:
ctype = 'application/octet-stream'
maintype, subtype = ctype.split('/', 1)
if maintype == 'text':
fp = open(filename)
msg = MIMEText(fp.read(), _subtype = subtype)
elif maintype == 'image':
fp = open(filename, 'rb')
msg = MIMEImage(fp.read(), _subtype = subtype)
elif maintype == 'audio':
fp = open(filename, 'rb')
msg = MIMEAudio(fp.read(), _subtype = subtype)
else:
fp = open(filename, 'rb')
msg = MIMEBase(maintype, subtype)
msg.set_payload(fp.read())
encoders.encode_base64(msg)
fp.close()
# Set the filename parameter
msg.add_header('Content-Disposition', 'attachment', filename = filename)
return msg
def send(mail, server = 'localhost'):
"""
Sends the given mail.
@type mail: Mail
@param mail: The mail object.
@type server: string
@param server: The address of the mailserver.
"""
sender = mail.get_sender()
rcpt = mail.get_receipients()
session = smtplib.SMTP(server)
message = MIMEMultipart()
message['Subject'] = mail.get_subject()
message['From'] = mail.get_sender()
message['To'] = ', '.join(mail.get_to())
message['Cc'] = ', '.join(mail.get_cc())
message.preamble = 'Your mail client is not MIME aware.'
body = MIMEText(mail.get_body())
body.add_header('Content-Disposition', 'inline')
message.attach(body)
for filename in mail.get_attachments():
message.attach(_get_mime_object(filename))
session.sendmail(sender, rcpt, message.as_string())
| gnperumal/exscript | src/Exscript/util/mail.py | Python | gpl-2.0 | 14,239 |
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from weboob.capabilities.base import empty
from weboob.capabilities.gauge import CapGauge, SensorNotFound
from weboob.tools.application.repl import ReplApplication
from weboob.tools.application.formatters.iformatter import IFormatter
__all__ = ['Boobsize']
class GaugeFormatter(IFormatter):
MANDATORY_FIELDS = ('name', 'object', 'sensors')
DISPLAYED_FIELDS = ('city', )
def start_format(self, **kwargs):
# Name = 27 Object = 10 City = 10 Sensors = 33
self.output(' Name and ID Object City Sensors ')
self.output('----------------------------+----------+----------+---------------------------------')
def format_obj(self, obj, alias):
name = obj.name
city = u""
if not empty(obj.city):
city = obj.city
if not obj.sensors or (len(obj.sensors) == 0):
result = u' %s %s %s \n' %\
(self.colored('%-27s' % name[:27], 'red'),
self.colored('%-10s' % obj.object[:10], 'yellow'),
self.colored('%-10s' % city[:10], 'yellow')
)
result += u' %s \n' % self.colored('%-47s' % obj.fullid[:47], 'blue')
else:
first = True
firstaddress = obj.sensors[0].address
for sensor in obj.sensors:
sensorname = sensor.name
# This is a int value, do not display it as a float
if not empty(sensor.lastvalue.level):
if int(sensor.lastvalue.level) == sensor.lastvalue.level:
lastvalue = "%d " % sensor.lastvalue.level
else:
lastvalue = "%r " % sensor.lastvalue.level
if not empty(sensor.unit):
lastvalue += "%s" % sensor.unit
else:
lastvalue = u"? "
if first:
result = u' %s %s %s ' %\
(self.colored('%-27s' % name[:27], 'red'),
self.colored('%-10s' % obj.object[:10], 'yellow'),
self.colored('%-10s' % city[:10], 'yellow'),
)
if not empty(firstaddress):
result += u'%s' % self.colored('%-33s' % sensor.address[:33], 'yellow')
result += u'\n'
result += u' %s' % self.colored('%-47s' % obj.fullid[:47], 'blue')
result += u' %s %s\n' %\
(self.colored('%-20s' % sensorname[:20], 'magenta'),
self.colored('%-13s' % lastvalue[:13], 'red')
)
first = False
else:
result += u' %s %s\n' %\
(self.colored('%-20s' % sensorname[:20], 'magenta'),
self.colored('%-13s' % lastvalue[:13], 'red')
)
if not empty(sensor.address) and sensor.address != firstaddress:
result += u' %s \n' %\
self.colored('%-33s' % sensor.address[:33], 'yellow')
return result
class Boobsize(ReplApplication):
APPNAME = 'Boobsize'
VERSION = '1.1'
COPYRIGHT = 'Copyright(C) 2013-YEAR Florent Fourcot'
DESCRIPTION = "Console application allowing to display various sensors and gauges values."
SHORT_DESCRIPTION = "display sensors and gauges values"
CAPS = (CapGauge)
DEFAULT_FORMATTER = 'table'
EXTRA_FORMATTERS = {'gauge_list': GaugeFormatter, }
COMMANDS_FORMATTERS = {'search': 'gauge_list',
}
def main(self, argv):
self.load_config()
return ReplApplication.main(self, argv)
def bcall_error_handler(self, backend, error, backtrace):
if isinstance(error, SensorNotFound):
msg = unicode(error) or 'Sensor not found (hint: try details command)'
print('Error(%s): %s' % (backend.name, msg), file=self.stderr)
else:
return ReplApplication.bcall_error_handler(self, backend, error, backtrace)
def do_search(self, pattern):
"""
search [PATTERN]
Display all gauges. If PATTERN is specified, search on a pattern.
"""
self.change_path([u'gauges'])
self.start_format()
for gauge in self.do('iter_gauges', pattern or None, caps=CapGauge):
self.cached_format(gauge)
def complete_search(self, text, line, *ignored):
args = line.split(' ')
if len(args) == 2:
return self._complete_object()
def do_details(self, line):
"""
details GAUGE_ID
Display details of all sensors of the gauge.
"""
gauge, pattern = self.parse_command_args(line, 2, 1)
_id, backend_name = self.parse_id(gauge)
self.start_format()
for sensor in self.do('iter_sensors', _id, pattern=pattern, backends=backend_name, caps=CapGauge):
self.format(sensor)
def do_history(self, line):
"""
history SENSOR_ID
Get history of a specific sensor (use 'search' to find a gauge, and sensors GAUGE_ID to list sensors attached to the gauge).
"""
gauge, = self.parse_command_args(line, 1, 1)
_id, backend_name = self.parse_id(gauge)
self.start_format()
for measure in self.do('iter_gauge_history', _id, backends=backend_name, caps=CapGauge):
self.format(measure)
def complete_last_sensor_measure(self, text, line, *ignored):
args = line.split(' ')
if len(args) == 2:
return self._complete_object()
def do_last_sensor_measure(self, line):
"""
last_sensor_measure SENSOR_ID
Get last measure of a sensor.
"""
gauge, = self.parse_command_args(line, 1, 1)
_id, backend_name = self.parse_id(gauge)
self.start_format()
for measure in self.do('get_last_measure', _id, backends=backend_name, caps=CapGauge):
self.format(measure)
| sputnick-dev/weboob | weboob/applications/boobsize/boobsize.py | Python | agpl-3.0 | 7,094 |
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_EXT_texture'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_EXT_texture',error_checker=_errors._error_checker)
GL_ALPHA12_EXT=_C('GL_ALPHA12_EXT',0x803D)
GL_ALPHA16_EXT=_C('GL_ALPHA16_EXT',0x803E)
GL_ALPHA4_EXT=_C('GL_ALPHA4_EXT',0x803B)
GL_ALPHA8_EXT=_C('GL_ALPHA8_EXT',0x803C)
GL_INTENSITY12_EXT=_C('GL_INTENSITY12_EXT',0x804C)
GL_INTENSITY16_EXT=_C('GL_INTENSITY16_EXT',0x804D)
GL_INTENSITY4_EXT=_C('GL_INTENSITY4_EXT',0x804A)
GL_INTENSITY8_EXT=_C('GL_INTENSITY8_EXT',0x804B)
GL_INTENSITY_EXT=_C('GL_INTENSITY_EXT',0x8049)
GL_LUMINANCE12_ALPHA12_EXT=_C('GL_LUMINANCE12_ALPHA12_EXT',0x8047)
GL_LUMINANCE12_ALPHA4_EXT=_C('GL_LUMINANCE12_ALPHA4_EXT',0x8046)
GL_LUMINANCE12_EXT=_C('GL_LUMINANCE12_EXT',0x8041)
GL_LUMINANCE16_ALPHA16_EXT=_C('GL_LUMINANCE16_ALPHA16_EXT',0x8048)
GL_LUMINANCE16_EXT=_C('GL_LUMINANCE16_EXT',0x8042)
GL_LUMINANCE4_ALPHA4_EXT=_C('GL_LUMINANCE4_ALPHA4_EXT',0x8043)
GL_LUMINANCE4_EXT=_C('GL_LUMINANCE4_EXT',0x803F)
GL_LUMINANCE6_ALPHA2_EXT=_C('GL_LUMINANCE6_ALPHA2_EXT',0x8044)
GL_LUMINANCE8_ALPHA8_EXT=_C('GL_LUMINANCE8_ALPHA8_EXT',0x8045)
GL_LUMINANCE8_EXT=_C('GL_LUMINANCE8_EXT',0x8040)
GL_PROXY_TEXTURE_1D_EXT=_C('GL_PROXY_TEXTURE_1D_EXT',0x8063)
GL_PROXY_TEXTURE_2D_EXT=_C('GL_PROXY_TEXTURE_2D_EXT',0x8064)
GL_REPLACE_EXT=_C('GL_REPLACE_EXT',0x8062)
GL_RGB10_A2_EXT=_C('GL_RGB10_A2_EXT',0x8059)
GL_RGB10_EXT=_C('GL_RGB10_EXT',0x8052)
GL_RGB12_EXT=_C('GL_RGB12_EXT',0x8053)
GL_RGB16_EXT=_C('GL_RGB16_EXT',0x8054)
GL_RGB2_EXT=_C('GL_RGB2_EXT',0x804E)
GL_RGB4_EXT=_C('GL_RGB4_EXT',0x804F)
GL_RGB5_A1_EXT=_C('GL_RGB5_A1_EXT',0x8057)
GL_RGB5_EXT=_C('GL_RGB5_EXT',0x8050)
GL_RGB8_EXT=_C('GL_RGB8_EXT',0x8051)
GL_RGBA12_EXT=_C('GL_RGBA12_EXT',0x805A)
GL_RGBA16_EXT=_C('GL_RGBA16_EXT',0x805B)
GL_RGBA2_EXT=_C('GL_RGBA2_EXT',0x8055)
GL_RGBA4_EXT=_C('GL_RGBA4_EXT',0x8056)
GL_RGBA8_EXT=_C('GL_RGBA8_EXT',0x8058)
GL_TEXTURE_ALPHA_SIZE_EXT=_C('GL_TEXTURE_ALPHA_SIZE_EXT',0x805F)
GL_TEXTURE_BLUE_SIZE_EXT=_C('GL_TEXTURE_BLUE_SIZE_EXT',0x805E)
GL_TEXTURE_GREEN_SIZE_EXT=_C('GL_TEXTURE_GREEN_SIZE_EXT',0x805D)
GL_TEXTURE_INTENSITY_SIZE_EXT=_C('GL_TEXTURE_INTENSITY_SIZE_EXT',0x8061)
GL_TEXTURE_LUMINANCE_SIZE_EXT=_C('GL_TEXTURE_LUMINANCE_SIZE_EXT',0x8060)
GL_TEXTURE_RED_SIZE_EXT=_C('GL_TEXTURE_RED_SIZE_EXT',0x805C)
GL_TEXTURE_TOO_LARGE_EXT=_C('GL_TEXTURE_TOO_LARGE_EXT',0x8065)
| stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/raw/GL/EXT/texture.py | Python | lgpl-3.0 | 2,667 |
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen
from unittest import main, TestCase
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from test.probe.common import kill_nonprimary_server, kill_server, \
kill_servers, reset_environment, start_server
class TestObjectAsyncUpdate(TestCase):
def setUp(self):
(self.pids, self.port2server, self.account_ring, self.container_ring,
self.object_ring, self.url, self.token,
self.account) = reset_environment()
def tearDown(self):
kill_servers(self.port2server, self.pids)
def test_main(self):
# Create container
# Kill container servers excepting two of the primaries
# Create container/obj
# Restart other primary server
# Assert it does not know about container/obj
# Run the object-updaters
# Assert the other primary server now knows about container/obj
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container)
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
kill_nonprimary_server(cnodes, self.port2server, self.pids)
kill_server(cnode['port'], self.port2server, self.pids)
obj = 'object-%s' % uuid4()
client.put_object(self.url, self.token, container, obj, '')
start_server(cnode['port'], self.port2server, self.pids)
self.assert_(not direct_client.direct_get_container(
cnode, cpart, self.account, container)[1])
processes = []
for node in xrange(1, 5):
processes.append(Popen(['swift-object-updater',
'/etc/swift/object-server/%d.conf' % node,
'once']))
for process in processes:
process.wait()
objs = [o['name'] for o in direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
self.assert_(obj in objs)
if __name__ == '__main__':
main()
| Mirantis/swift-encrypt | test/probe/test_object_async_update.py | Python | apache-2.0 | 2,679 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.tpu.datasets import *
# pylint: enable=wildcard-import,unused-import
| theflofly/tensorflow | tensorflow/contrib/tpu/python/tpu/datasets.py | Python | apache-2.0 | 989 |
'''
Harvester for the Iowa Research Online for the SHARE project
Example API call: http://ir.uiowa.edu/do/oai/?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class IowaresearchHarvester(OAIHarvester):
short_name = 'iowaresearch'
long_name = 'Iowa Research Online'
url = 'http://ir.uiowa.edu'
base_url = 'http://ir.uiowa.edu/do/oai/'
property_list = ['date', 'source', 'identifier', 'type']
timezone_granularity = True
| fabianvf/scrapi | scrapi/harvesters/iowaresearch.py | Python | apache-2.0 | 520 |
__author__ = 'fjlopez'
| Swaathik/cellbase | clients/python/lib/exceptions/__init__.py | Python | apache-2.0 | 23 |
# Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test to ensure that admin services are registered correctly."""
from concurrent.futures import ThreadPoolExecutor
import logging
import sys
import unittest
import grpc
import grpc_admin
from grpc_channelz.v1 import channelz_pb2
from grpc_channelz.v1 import channelz_pb2_grpc
from grpc_csds import csds_pb2
from grpc_csds import csds_pb2_grpc
@unittest.skipIf(sys.version_info[0] < 3,
'ProtoBuf descriptor has moved on from Python2')
class TestAdmin(unittest.TestCase):
def setUp(self):
self._server = grpc.server(ThreadPoolExecutor())
port = self._server.add_insecure_port('localhost:0')
grpc_admin.add_admin_servicers(self._server)
self._server.start()
self._channel = grpc.insecure_channel('localhost:%s' % port)
def tearDown(self):
self._channel.close()
self._server.stop(0)
def test_has_csds(self):
stub = csds_pb2_grpc.ClientStatusDiscoveryServiceStub(self._channel)
resp = stub.FetchClientStatus(csds_pb2.ClientStatusRequest())
# No exception raised and the response is valid
self.assertGreater(len(resp.config), 0)
def test_has_channelz(self):
stub = channelz_pb2_grpc.ChannelzStub(self._channel)
resp = stub.GetTopChannels(channelz_pb2.GetTopChannelsRequest())
# No exception raised and the response is valid
self.assertGreater(len(resp.channel), 0)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main(verbosity=2)
| ejona86/grpc | src/python/grpcio_tests/tests/admin/test_admin.py | Python | apache-2.0 | 2,110 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for GCE Instance Groups."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
from tensorflow.python.util.tf_export import tf_export
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
@tf_export('distribute.cluster_resolver.GCEClusterResolver')
class GCEClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Compute Engine.
This is an implementation of cluster resolvers for the Google Compute Engine
instance group platform. By specifying a project, zone, and instance group,
this will retrieve the IP address of all the instances within the instance
group and return a Cluster Resolver object suitable for use for distributed
TensorFlow.
"""
def __init__(self,
project,
zone,
instance_group,
port,
task_type='worker',
task_id=0,
rpc_layer='grpc',
credentials='default',
service=None):
"""Creates a new GCEClusterResolver object.
This takes in a few parameters and creates a GCEClusterResolver project. It
will then use these parameters to query the GCE API for the IP addresses of
each instance in the instance group.
Args:
project: Name of the GCE project.
zone: Zone of the GCE instance group.
instance_group: Name of the GCE instance group.
port: Port of the listening TensorFlow server (default: 8470)
task_type: Name of the TensorFlow job this GCE instance group of VM
instances belong to.
task_id: The task index for this particular VM, within the GCE
instance group. In particular, every single instance should be assigned
a unique ordinal index within an instance group manually so that they
can be distinguished from each other.
rpc_layer: The RPC layer TensorFlow should use to communicate across
instances.
credentials: GCE Credentials. If nothing is specified, this defaults to
GoogleCredentials.get_application_default().
service: The GCE API object returned by the googleapiclient.discovery
function. (Default: discovery.build('compute', 'v1')). If you specify a
custom service object, then the credentials parameter will be ignored.
Raises:
ImportError: If the googleapiclient is not installed.
"""
self._project = project
self._zone = zone
self._instance_group = instance_group
self._task_type = task_type
self._task_id = task_id
self._rpc_layer = rpc_layer
self._port = port
self._credentials = credentials
if credentials == 'default':
if _GOOGLE_API_CLIENT_INSTALLED:
self._credentials = GoogleCredentials.get_application_default()
if service is None:
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('googleapiclient must be installed before using the '
'GCE cluster resolver')
self._service = discovery.build(
'compute', 'v1',
credentials=self._credentials)
else:
self._service = service
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest instance group info.
This returns a ClusterSpec object for use based on information from the
specified instance group. We will retrieve the information from the GCE APIs
every time this method is called.
Returns:
A ClusterSpec containing host information retrieved from GCE.
"""
request_body = {'instanceState': 'RUNNING'}
request = self._service.instanceGroups().listInstances(
project=self._project,
zone=self._zone,
instanceGroups=self._instance_group,
body=request_body,
orderBy='name')
worker_list = []
while request is not None:
response = request.execute()
items = response['items']
for instance in items:
instance_name = instance['instance'].split('/')[-1]
instance_request = self._service.instances().get(
project=self._project,
zone=self._zone,
instance=instance_name)
if instance_request is not None:
instance_details = instance_request.execute()
ip_address = instance_details['networkInterfaces'][0]['networkIP']
instance_url = '%s:%s' % (ip_address, self._port)
worker_list.append(instance_url)
request = self._service.instanceGroups().listInstances_next(
previous_request=request,
previous_response=response)
worker_list.sort()
return ClusterSpec({self._task_type: worker_list})
def master(self, task_type=None, task_id=None, rpc_layer=None):
task_type = task_type if task_type is not None else self._task_type
task_id = task_id if task_id is not None else self._task_id
if task_type is not None and task_id is not None:
master = self.cluster_spec().task_address(task_type, task_id)
if rpc_layer or self._rpc_layer:
return '%s://%s' % (rpc_layer or self._rpc_layer, master)
else:
return master
return ''
@property
def task_type(self):
return self._task_type
@property
def task_id(self):
return self._task_id
@task_type.setter
def task_type(self, task_type):
raise RuntimeError(
'You cannot reset the task_type of the GCEClusterResolver after it has '
'been created.')
@task_id.setter
def task_id(self, task_id):
self._task_id = task_id
@property
def rpc_layer(self):
return self._rpc_layer
@rpc_layer.setter
def rpc_layer(self, rpc_layer):
self._rpc_layer = rpc_layer
| ghchinoy/tensorflow | tensorflow/python/distribute/cluster_resolver/gce_cluster_resolver.py | Python | apache-2.0 | 6,761 |
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 coding=utf-8
import os, glob
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.utils.translation import ugettext_lazy
class Command(BaseCommand):
help = ugettext_lazy("Import ODK forms and instances.")
def handle(self, *args, **kwargs):
path = args[0]
call_command('import_forms', os.path.join(path, "forms"))
call_command('import_instances', os.path.join(path, "instances"))
| GeoODK/formhub | odk_viewer/management/commands/import.py | Python | bsd-2-clause | 525 |
from socket import *
from struct import unpack
from time import ctime, sleep
from sys import argv
argv = argv[1:]
if len(argv) == 0:
argv = [ 'time-nw.nist.gov' ]
s = socket(AF_INET, SOCK_DGRAM)
s.settimeout(5.0)
for server in argv:
print server, ":",
try:
s.sendto('', 0, (server, 37))
t = long(unpack('!L', s.recv(16)[:4])[0])
# Convert from 1900/01/01 epoch to 1970/01/01 epoch
t -= 2208988800
print ctime(t)
except timeout:
print "TIMEOUT"
except:
print "ERROR"
s.close()
| uthcode/learntosolveit | languages/python/networking_udp_time.py | Python | bsd-3-clause | 544 |
#################################################################
# seSceneGraphExplorer.py
# Originally from SceneGraphExplorer.py
# Altered by Yi-Hong Lin, [email protected], 2004
#
# we need a customized SceneGraphExplorer.
#
# Do forget to check the seTree.
#
#################################################################
from direct.showbase.DirectObject import DirectObject
from seTree import TreeNode, TreeItem
import Pmw, sys
if sys.version_info >= (3, 0):
from tkinter import IntVar, Frame, Label
import tkinter
else:
from Tkinter import IntVar, Frame, Label
import Tkinter as tkinter
# changing these strings requires changing sceneEditor.py SGE_ strs too!
# This list of items will be showed on the pop out window when user right click on
# any node on the graph. And, this is also the main reason we decide to copy from
# the original one but not inherited from it.
# Because except drawing part, we have changed a lot of things...
DEFAULT_MENU_ITEMS = [
'Update Explorer',
'Separator',
'Properties',
'Separator',
'Duplicate',
'Remove',
'Add Dummy',
'Add Collision Object',
'Metadata',
'Separator',
'Set as Reparent Target',
'Reparent to Target',
'Separator',
'Animation Panel',
'Blend Animation Panel',
'MoPath Panel',
'Align Tool',
'Separator']
class seSceneGraphExplorer(Pmw.MegaWidget, DirectObject):
"Graphical display of a scene graph"
def __init__(self, parent = None, nodePath = render, **kw):
# Define the megawidget options.
optiondefs = (
('menuItems', [], Pmw.INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise superclass
Pmw.MegaWidget.__init__(self, parent)
# Initialize some class variables
self.nodePath = nodePath
# Create the components.
# Setup up container
interior = self.interior()
interior.configure(relief = tkinter.GROOVE, borderwidth = 2)
# Create a label and an entry
self._scrolledCanvas = self.createcomponent(
'scrolledCanvas',
(), None,
Pmw.ScrolledCanvas, (interior,),
hull_width = 200, hull_height = 300,
usehullsize = 1)
self._canvas = self._scrolledCanvas.component('canvas')
self._canvas['scrollregion'] = ('0i', '0i', '2i', '4i')
self._scrolledCanvas.resizescrollregion()
self._scrolledCanvas.pack(padx = 3, pady = 3, expand=1, fill = tkinter.BOTH)
self._canvas.bind('<ButtonPress-2>', self.mouse2Down)
self._canvas.bind('<B2-Motion>', self.mouse2Motion)
self._canvas.bind('<Configure>',
lambda e, sc = self._scrolledCanvas:
sc.resizescrollregion())
self.interior().bind('<Destroy>', self.onDestroy)
# Create the contents
self._treeItem = SceneGraphExplorerItem(self.nodePath)
self._node = TreeNode(self._canvas, None, self._treeItem,
DEFAULT_MENU_ITEMS + self['menuItems'])
self._node.expand()
self._parentFrame = Frame(interior)
self._label = self.createcomponent(
'parentLabel',
(), None,
Label, (interior,),
text = 'Active Reparent Target: ',
anchor = tkinter.W, justify = tkinter.LEFT)
self._label.pack(fill = tkinter.X)
# Add update parent label
def updateLabel(nodePath = None, s = self):
s._label['text'] = 'Active Reparent Target: ' + nodePath.getName()
self.accept('DIRECT_activeParent', updateLabel)
# Add update hook
self.accept('SGE_Update Explorer',
lambda np, s = self: s.update())
# Check keywords and initialise options based on input values.
self.initialiseoptions(seSceneGraphExplorer)
def update(self):
""" Refresh scene graph explorer """
self._node.update()
def mouse2Down(self, event):
self._width = 1.0 * self._canvas.winfo_width()
self._height = 1.0 * self._canvas.winfo_height()
xview = self._canvas.xview()
yview = self._canvas.yview()
self._left = xview[0]
self._top = yview[0]
self._dxview = xview[1] - xview[0]
self._dyview = yview[1] - yview[0]
self._2lx = event.x
self._2ly = event.y
def mouse2Motion(self,event):
newx = self._left - ((event.x - self._2lx)/self._width) * self._dxview
self._canvas.xview_moveto(newx)
newy = self._top - ((event.y - self._2ly)/self._height) * self._dyview
self._canvas.yview_moveto(newy)
self._2lx = event.x
self._2ly = event.y
self._left = self._canvas.xview()[0]
self._top = self._canvas.yview()[0]
def onDestroy(self, event):
# Remove hooks
self.ignore('DIRECT_activeParent')
self.ignore('SGE_Update Explorer')
def deSelectTree(self):
self._node.deselecttree()
def selectNodePath(self,nodePath, callBack=True):
item = self._node.find(nodePath.get_key())
if item!= None:
item.select(callBack)
else:
print('----SGE: Error Selection')
class SceneGraphExplorerItem(TreeItem):
"""Example TreeItem subclass -- browse the file system."""
def __init__(self, nodePath):
self.nodePath = nodePath
def GetText(self):
type = self.nodePath.node().getType().getName()
name = self.nodePath.getName()
return type + " " + name
def GetTextForEdit(self):
name = self.nodePath.getName()
return name
def GetKey(self):
return self.nodePath.get_key()
def IsEditable(self):
# All nodes' names can be edited nowadays.
return 1
#return issubclass(self.nodePath.node().__class__, NamedNode)
def SetText(self, text):
try:
messenger.send('SGE_changeName', [self.nodePath, text])
except AttributeError:
pass
def GetIconName(self):
return "sphere2" # XXX wish there was a "file" icon
def IsExpandable(self):
return self.nodePath.getNumChildren() != 0
def GetSubList(self):
sublist = []
for nodePath in self.nodePath.getChildren():
item = SceneGraphExplorerItem(nodePath)
sublist.append(item)
return sublist
def OnSelect(self, callback):
messenger.send('SGE_Flash', [self.nodePath])
if not callback:
messenger.send('SGE_madeSelection', [self.nodePath, callback])
else:
messenger.send('SGE_madeSelection', [self.nodePath])
def MenuCommand(self, command):
messenger.send('SGE_' + command, [self.nodePath])
def explore(nodePath = render):
tl = Toplevel()
tl.title('Explore: ' + nodePath.getName())
sge = seSceneGraphExplorer(parent = tl, nodePath = nodePath)
sge.pack(expand = 1, fill = 'both')
return sge
| chandler14362/panda3d | contrib/src/sceneeditor/seSceneGraphExplorer.py | Python | bsd-3-clause | 7,053 |
Subsets and Splits