code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
from django.db import connection
from django.db.backends.ddl_references import (
Columns, Expressions, ForeignKeyName, IndexName, Statement, Table,
)
from django.db.models import ExpressionList, F
from django.db.models.functions import Upper
from django.db.models.indexes import IndexExpression
from django.test import SimpleTestCase, TransactionTestCase
from .models import Person
class TableTests(SimpleTestCase):
def setUp(self):
self.reference = Table('table', lambda table: table.upper())
def test_references_table(self):
self.assertIs(self.reference.references_table('table'), True)
self.assertIs(self.reference.references_table('other'), False)
def test_rename_table_references(self):
self.reference.rename_table_references('other', 'table')
self.assertIs(self.reference.references_table('table'), True)
self.assertIs(self.reference.references_table('other'), False)
self.reference.rename_table_references('table', 'other')
self.assertIs(self.reference.references_table('table'), False)
self.assertIs(self.reference.references_table('other'), True)
def test_repr(self):
self.assertEqual(repr(self.reference), "<Table 'TABLE'>")
def test_str(self):
self.assertEqual(str(self.reference), 'TABLE')
class ColumnsTests(TableTests):
def setUp(self):
self.reference = Columns(
'table', ['first_column', 'second_column'], lambda column: column.upper()
)
def test_references_column(self):
self.assertIs(self.reference.references_column('other', 'first_column'), False)
self.assertIs(self.reference.references_column('table', 'third_column'), False)
self.assertIs(self.reference.references_column('table', 'first_column'), True)
def test_rename_column_references(self):
self.reference.rename_column_references('other', 'first_column', 'third_column')
self.assertIs(self.reference.references_column('table', 'first_column'), True)
self.assertIs(self.reference.references_column('table', 'third_column'), False)
self.assertIs(self.reference.references_column('other', 'third_column'), False)
self.reference.rename_column_references('table', 'third_column', 'first_column')
self.assertIs(self.reference.references_column('table', 'first_column'), True)
self.assertIs(self.reference.references_column('table', 'third_column'), False)
self.reference.rename_column_references('table', 'first_column', 'third_column')
self.assertIs(self.reference.references_column('table', 'first_column'), False)
self.assertIs(self.reference.references_column('table', 'third_column'), True)
def test_repr(self):
self.assertEqual(repr(self.reference), "<Columns 'FIRST_COLUMN, SECOND_COLUMN'>")
def test_str(self):
self.assertEqual(str(self.reference), 'FIRST_COLUMN, SECOND_COLUMN')
class IndexNameTests(ColumnsTests):
def setUp(self):
def create_index_name(table_name, column_names, suffix):
return ', '.join("%s_%s_%s" % (table_name, column_name, suffix) for column_name in column_names)
self.reference = IndexName(
'table', ['first_column', 'second_column'], 'suffix', create_index_name
)
def test_repr(self):
self.assertEqual(repr(self.reference), "<IndexName 'table_first_column_suffix, table_second_column_suffix'>")
def test_str(self):
self.assertEqual(str(self.reference), 'table_first_column_suffix, table_second_column_suffix')
class ForeignKeyNameTests(IndexNameTests):
def setUp(self):
def create_foreign_key_name(table_name, column_names, suffix):
return ', '.join("%s_%s_%s" % (table_name, column_name, suffix) for column_name in column_names)
self.reference = ForeignKeyName(
'table', ['first_column', 'second_column'],
'to_table', ['to_first_column', 'to_second_column'],
'%(to_table)s_%(to_column)s_fk',
create_foreign_key_name,
)
def test_references_table(self):
super().test_references_table()
self.assertIs(self.reference.references_table('to_table'), True)
def test_references_column(self):
super().test_references_column()
self.assertIs(self.reference.references_column('to_table', 'second_column'), False)
self.assertIs(self.reference.references_column('to_table', 'to_second_column'), True)
def test_rename_table_references(self):
super().test_rename_table_references()
self.reference.rename_table_references('to_table', 'other_to_table')
self.assertIs(self.reference.references_table('other_to_table'), True)
self.assertIs(self.reference.references_table('to_table'), False)
def test_rename_column_references(self):
super().test_rename_column_references()
self.reference.rename_column_references('to_table', 'second_column', 'third_column')
self.assertIs(self.reference.references_column('table', 'second_column'), True)
self.assertIs(self.reference.references_column('to_table', 'to_second_column'), True)
self.reference.rename_column_references('to_table', 'to_first_column', 'to_third_column')
self.assertIs(self.reference.references_column('to_table', 'to_first_column'), False)
self.assertIs(self.reference.references_column('to_table', 'to_third_column'), True)
def test_repr(self):
self.assertEqual(
repr(self.reference),
"<ForeignKeyName 'table_first_column_to_table_to_first_column_fk, "
"table_second_column_to_table_to_first_column_fk'>"
)
def test_str(self):
self.assertEqual(
str(self.reference),
'table_first_column_to_table_to_first_column_fk, '
'table_second_column_to_table_to_first_column_fk'
)
class MockReference:
def __init__(self, representation, referenced_tables, referenced_columns):
self.representation = representation
self.referenced_tables = referenced_tables
self.referenced_columns = referenced_columns
def references_table(self, table):
return table in self.referenced_tables
def references_column(self, table, column):
return (table, column) in self.referenced_columns
def rename_table_references(self, old_table, new_table):
if old_table in self.referenced_tables:
self.referenced_tables.remove(old_table)
self.referenced_tables.add(new_table)
def rename_column_references(self, table, old_column, new_column):
column = (table, old_column)
if column in self.referenced_columns:
self.referenced_columns.remove(column)
self.referenced_columns.add((table, new_column))
def __str__(self):
return self.representation
class StatementTests(SimpleTestCase):
def test_references_table(self):
statement = Statement('', reference=MockReference('', {'table'}, {}), non_reference='')
self.assertIs(statement.references_table('table'), True)
self.assertIs(statement.references_table('other'), False)
def test_references_column(self):
statement = Statement('', reference=MockReference('', {}, {('table', 'column')}), non_reference='')
self.assertIs(statement.references_column('table', 'column'), True)
self.assertIs(statement.references_column('other', 'column'), False)
def test_rename_table_references(self):
reference = MockReference('', {'table'}, {})
statement = Statement('', reference=reference, non_reference='')
statement.rename_table_references('table', 'other')
self.assertEqual(reference.referenced_tables, {'other'})
def test_rename_column_references(self):
reference = MockReference('', {}, {('table', 'column')})
statement = Statement('', reference=reference, non_reference='')
statement.rename_column_references('table', 'column', 'other')
self.assertEqual(reference.referenced_columns, {('table', 'other')})
def test_repr(self):
reference = MockReference('reference', {}, {})
statement = Statement("%(reference)s - %(non_reference)s", reference=reference, non_reference='non_reference')
self.assertEqual(repr(statement), "<Statement 'reference - non_reference'>")
def test_str(self):
reference = MockReference('reference', {}, {})
statement = Statement("%(reference)s - %(non_reference)s", reference=reference, non_reference='non_reference')
self.assertEqual(str(statement), 'reference - non_reference')
class ExpressionsTests(TransactionTestCase):
available_apps = []
def setUp(self):
compiler = Person.objects.all().query.get_compiler(connection.alias)
self.editor = connection.schema_editor()
self.expressions = Expressions(
table=Person._meta.db_table,
expressions=ExpressionList(
IndexExpression(F('first_name')),
IndexExpression(F('last_name').desc()),
IndexExpression(Upper('last_name')),
).resolve_expression(compiler.query),
compiler=compiler,
quote_value=self.editor.quote_value,
)
def test_references_table(self):
self.assertIs(self.expressions.references_table(Person._meta.db_table), True)
self.assertIs(self.expressions.references_table('other'), False)
def test_references_column(self):
table = Person._meta.db_table
self.assertIs(self.expressions.references_column(table, 'first_name'), True)
self.assertIs(self.expressions.references_column(table, 'last_name'), True)
self.assertIs(self.expressions.references_column(table, 'other'), False)
def test_rename_table_references(self):
table = Person._meta.db_table
self.expressions.rename_table_references(table, 'other')
self.assertIs(self.expressions.references_table(table), False)
self.assertIs(self.expressions.references_table('other'), True)
self.assertIn(
'%s.%s' % (
self.editor.quote_name('other'),
self.editor.quote_name('first_name'),
),
str(self.expressions),
)
def test_rename_column_references(self):
table = Person._meta.db_table
self.expressions.rename_column_references(table, 'first_name', 'other')
self.assertIs(self.expressions.references_column(table, 'other'), True)
self.assertIs(self.expressions.references_column(table, 'first_name'), False)
self.assertIn(
'%s.%s' % (self.editor.quote_name(table), self.editor.quote_name('other')),
str(self.expressions),
)
def test_str(self):
table_name = self.editor.quote_name(Person._meta.db_table)
expected_str = '%s.%s, %s.%s DESC, (UPPER(%s.%s))' % (
table_name,
self.editor.quote_name('first_name'),
table_name,
self.editor.quote_name('last_name'),
table_name,
self.editor.quote_name('last_name'),
)
self.assertEqual(str(self.expressions), expected_str)
| wkschwartz/django | tests/backends/test_ddl_references.py | Python | bsd-3-clause | 11,306 |
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case9_3_1 import Case9_3_1
class Case9_3_9(Case9_3_1):
DESCRIPTION = """Send fragmented text message message with message payload of length 4 * 2**20 (8M). Sent out in fragments of 4M."""
EXPECTATION = """Receive echo'ed text message (with payload as sent)."""
def init(self):
self.DATALEN = 4 * 2**20
self.FRAGSIZE = 4 * 2**20
self.PAYLOAD = "*" * self.DATALEN
self.WAITSECS = 100
self.reportTime = True
| frivoal/presto-testo | wpt/websockets/autobahn/oberstet-Autobahn-643d2ee/lib/python/autobahn/case/case9_3_9.py | Python | bsd-3-clause | 1,251 |
###
# Copyright (c) 2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
# This will be called by supybot to configure this module. advanced is
# a bool that specifies whether the user identified himself as an advanced
# user or not. You should effect your configuration by manipulating the
# registry as appropriate.
from supybot.questions import expect, anything, something, yn
conf.registerPlugin('Alias', True)
Alias = conf.registerPlugin('Alias')
conf.registerGroup(Alias, 'aliases')
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| tecan/xchat-rt | plugins/scripts/encryption/supybot-code-6361b1e856ebbc8e14d399019e2c53a35f4e0063/plugins/Alias/config.py | Python | gpl-2.0 | 2,179 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
from cStringIO import StringIO
import xml.dom.minidom
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import reportlab.lib
import copy
class simple(render.render):
def _render(self):
self.result = StringIO()
parser = xml.dom.minidom.parseString(self.xml)
title = parser.documentElement.tagName
doc = SimpleDocTemplate(self.result, pagesize=A4, title=title,
author='OpenERP, Fabien Pinckaers', leftmargin=10*mm, rightmargin=10*mm)
styles = reportlab.lib.styles.getSampleStyleSheet()
title_style = copy.deepcopy(styles["Heading1"])
title_style.alignment = reportlab.lib.enums.TA_CENTER
story = [ Paragraph(title, title_style) ]
style_level = {}
nodes = [ (parser.documentElement,0) ]
while len(nodes):
node = nodes.pop(0)
value = ''
n=len(node[0].childNodes)-1
while n>=0:
if node[0].childNodes[n].nodeType==3:
value += node[0].childNodes[n].nodeValue
else:
nodes.insert( 0, (node[0].childNodes[n], node[1]+1) )
n-=1
if not node[1] in style_level:
style = copy.deepcopy(styles["Normal"])
style.leftIndent=node[1]*6*mm
style.firstLineIndent=-3*mm
style_level[node[1]] = style
story.append( Paragraph('<b>%s</b>: %s' % (node[0].tagName, value), style_level[node[1]]))
doc.build(story)
return self.result.getvalue()
if __name__=='__main__':
import time
s = simple()
s.xml = '''<test>
<author-list>
<author>
<name>Fabien Pinckaers</name>
<age>23</age>
</author>
<author>
<name>Michel Pinckaers</name>
<age>53</age>
</author>
No other
</author-list>
</test>'''
if s.render():
print s.get()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| BorgERP/borg-erp-6of3 | server/openerp/report/render/simple.py | Python | agpl-3.0 | 3,164 |
"""
This config file runs the simplest dev environment"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=W0401, W0614
from .common import *
from logsettings import get_logger_config
# import settings from LMS for consistent behavior with CMS
from lms.envs.dev import (WIKI_ENABLED)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
tracking_filename="tracking.log",
dev_env=True,
debug=True)
update_module_store_settings(
MODULESTORE,
module_store_options={
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': GITHUB_REPO_ROOT,
}
)
DJFS = {
'type': 'osfs',
'directory_root': 'cms/static/djpyfs',
'url_root': '/static/djpyfs'
}
# cdodge: This is the specifier for the MongoDB (using GridFS) backed static content store
# This is for static content for courseware, not system static content (e.g. javascript, css, edX branding, etc)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'xcontent',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
}
}
LMS_BASE = "localhost:8000"
FEATURES['PREVIEW_LMS_BASE'] = "localhost:8000"
REPOS = {
'edx4edx': {
'branch': 'master',
'origin': '[email protected]:MITx/edx4edx.git',
},
'content-mit-6002x': {
'branch': 'master',
# 'origin': '[email protected]:MITx/6002x-fall-2012.git',
'origin': '[email protected]:MITx/content-mit-6002x.git',
},
'6.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/6.00x.git',
},
'7.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/7.00x.git',
},
'3.091x': {
'branch': 'master',
'origin': '[email protected]:MITx/3.091x.git',
},
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
################################ PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info --require {proj_dir}/static/sass/bourbon/lib/bourbon.rb'.format(proj_dir=PROJECT_ROOT)
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo', 'djpyfs')
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
# Enabling the profiler has a weird bug as of django-debug-toolbar==0.9.4 and
# Django=1.3.1/1.4 where requests to views get duplicated (your method gets
# hit twice). So you can uncomment when you need to diagnose performance
# problems, but you shouldn't leave it on.
# 'debug_toolbar.panels.profiling.ProfilingDebugPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# To see stacktraces for MongoDB queries, set this to True.
# Stacktraces slow down page loads drastically (for pages with lots of queries).
DEBUG_TOOLBAR_MONGO_STACKTRACES = False
# Enable URL that shows information about the status of variuous services
FEATURES['ENABLE_SERVICE_STATUS'] = True
FEATURES['ALLOW_COURSE_RERUNS'] = True
############################# SEGMENT-IO ##################################
# If there's an environment variable set, grab it and turn on Segment.io
# Note that this is the Studio key. There is a separate key for the LMS.
import os
SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY')
if SEGMENT_IO_KEY:
FEATURES['SEGMENT_IO'] = True
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=F0401
except ImportError:
pass
| c0710204/edx-platform | cms/envs/dev.py | Python | agpl-3.0 | 6,206 |
# (c) 2014 Michael DeHaan, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import iteritems, string_types
from types import NoneType
from ansible.errors import AnsibleParserError
from ansible.plugins import module_loader
from ansible.parsing.splitter import parse_kv
class ModuleArgsParser:
"""
There are several ways a module and argument set can be expressed:
# legacy form (for a shell command)
- action: shell echo hi
# common shorthand for local actions vs delegate_to
- local_action: shell echo hi
# most commonly:
- copy: src=a dest=b
# legacy form
- action: copy src=a dest=b
# complex args form, for passing structured data
- copy:
src: a
dest: b
# gross, but technically legal
- action:
module: copy
args:
src: a
dest: b
# extra gross, but also legal. in this case, the args specified
# will act as 'defaults' and will be overriden by any args specified
# in one of the other formats (complex args under the action, or
# parsed from the k=v string
- command: 'pwd'
args:
chdir: '/tmp'
This class has some of the logic to canonicalize these into the form
- module: <module_name>
delegate_to: <optional>
args: <args>
Args may also be munged for certain shell command parameters.
"""
def __init__(self, task_ds=dict()):
assert isinstance(task_ds, dict)
self._task_ds = task_ds
def _split_module_string(self, str):
'''
when module names are expressed like:
action: copy src=a dest=b
the first part of the string is the name of the module
and the rest are strings pertaining to the arguments.
'''
tokens = str.split()
if len(tokens) > 1:
return (tokens[0], " ".join(tokens[1:]))
else:
return (tokens[0], "")
def _handle_shell_weirdness(self, action, args):
'''
given an action name and an args dictionary, return the
proper action name and args dictionary. This mostly is due
to shell/command being treated special and nothing else
'''
# don't handle non shell/command modules in this function
# TODO: in terms of the whole app, should 'raw' also fit here?
if action not in ['shell', 'command']:
return (action, args)
# the shell module really is the command module with an additional
# parameter
if action == 'shell':
action = 'command'
args['_uses_shell'] = True
return (action, args)
def _normalize_parameters(self, thing, action=None, additional_args=dict()):
'''
arguments can be fuzzy. Deal with all the forms.
'''
# final args are the ones we'll eventually return, so first update
# them with any additional args specified, which have lower priority
# than those which may be parsed/normalized next
final_args = dict()
if additional_args:
final_args.update(additional_args)
# how we normalize depends if we figured out what the module name is
# yet. If we have already figured it out, it's an 'old style' invocation.
# otherwise, it's not
if action is not None:
args = self._normalize_old_style_args(thing, action)
else:
(action, args) = self._normalize_new_style_args(thing)
# this can occasionally happen, simplify
if args and 'args' in args:
args = args['args']
# finally, update the args we're going to return with the ones
# which were normalized above
if args:
final_args.update(args)
return (action, final_args)
def _normalize_old_style_args(self, thing, action):
'''
deals with fuzziness in old-style (action/local_action) module invocations
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'local_action' : 'shell echo hi' }
{ 'action' : 'shell echo hi' }
{ 'local_action' : { 'module' : 'ec2', 'x' : 1, 'y': 2 }}
standardized outputs like:
( 'command', { _raw_params: 'echo hi', _uses_shell: True }
'''
if isinstance(thing, dict):
# form is like: local_action: { module: 'xyz', x: 2, y: 3 } ... uncommon!
args = thing
elif isinstance(thing, string_types):
# form is like: local_action: copy src=a dest=b ... pretty common
check_raw = action in ('command', 'shell', 'script')
args = parse_kv(thing, check_raw=check_raw)
elif isinstance(thing, NoneType):
# this can happen with modules which take no params, like ping:
args = None
else:
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return args
def _normalize_new_style_args(self, thing):
'''
deals with fuzziness in new style module invocations
accepting key=value pairs and dictionaries, and always returning dictionaries
returns tuple of (module_name, dictionary_args)
possible example inputs:
{ 'shell' : 'echo hi' }
{ 'ec2' : { 'region' : 'xyz' }
{ 'ec2' : 'region=xyz' }
standardized outputs like:
('ec2', { region: 'xyz'} )
'''
action = None
args = None
if isinstance(thing, dict):
# form is like: copy: { src: 'a', dest: 'b' } ... common for structured (aka "complex") args
thing = thing.copy()
if 'module' in thing:
action = thing['module']
args = thing.copy()
del args['module']
elif isinstance(thing, string_types):
# form is like: copy: src=a dest=b ... common shorthand throughout ansible
(action, args) = self._split_module_string(thing)
check_raw = action in ('command', 'shell', 'script')
args = parse_kv(args, check_raw=check_raw)
else:
# need a dict or a string, so giving up
raise AnsibleParserError("unexpected parameter type in action: %s" % type(thing), obj=self._task_ds)
return (action, args)
def parse(self):
'''
Given a task in one of the supported forms, parses and returns
returns the action, arguments, and delegate_to values for the
task, dealing with all sorts of levels of fuzziness.
'''
thing = None
action = None
delegate_to = self._task_ds.get('delegate_to', None)
args = dict()
#
# We can have one of action, local_action, or module specified
#
# this is the 'extra gross' scenario detailed above, so we grab
# the args and pass them in as additional arguments, which can/will
# be overwritten via dict updates from the other arg sources below
# FIXME: add test cases for this
additional_args = self._task_ds.get('args', dict())
# action
if 'action' in self._task_ds:
# an old school 'action' statement
thing = self._task_ds['action']
action, args = self._normalize_parameters(thing, additional_args=additional_args)
# local_action
if 'local_action' in self._task_ds:
# local_action is similar but also implies a delegate_to
if action is not None:
raise AnsibleParserError("action and local_action are mutually exclusive", obj=self._task_ds)
thing = self._task_ds.get('local_action', '')
delegate_to = 'localhost'
action, args = self._normalize_parameters(thing, additional_args=additional_args)
# module: <stuff> is the more new-style invocation
# walk the input dictionary to see we recognize a module name
for (item, value) in iteritems(self._task_ds):
if item in module_loader or item == 'meta' or item == 'include':
# finding more than one module name is a problem
if action is not None:
raise AnsibleParserError("conflicting action statements", obj=self._task_ds)
action = item
thing = value
action, args = self._normalize_parameters(value, action=action, additional_args=additional_args)
# if we didn't see any module in the task at all, it's not a task really
if action is None:
raise AnsibleParserError("no action detected in task", obj=self._task_ds)
# FIXME: disabled for now, as there are other places besides the shell/script modules where
# having variables as the sole param for the module is valid (include_vars, add_host, and group_by?)
#elif args.get('_raw_params', '') != '' and action not in ('command', 'shell', 'script', 'include_vars'):
# raise AnsibleParserError("this task has extra params, which is only allowed in the command, shell or script module.", obj=self._task_ds)
# shell modules require special handling
(action, args) = self._handle_shell_weirdness(action, args)
return (action, args, delegate_to)
| M0ses/ansible | v2/ansible/parsing/mod_args.py | Python | gpl-3.0 | 10,166 |
# coding: utf-8
from __future__ import absolute_import, division, print_function, with_statement
import re
import sys
import datetime
import tornado.escape
from tornado.escape import utf8
from tornado.util import raise_exc_info, Configurable, exec_in, ArgReplacer, timedelta_to_seconds, import_object, re_unescape, PY3
from tornado.test.util import unittest
if PY3:
from io import StringIO
else:
from cStringIO import StringIO
class RaiseExcInfoTest(unittest.TestCase):
def test_two_arg_exception(self):
# This test would fail on python 3 if raise_exc_info were simply
# a three-argument raise statement, because TwoArgException
# doesn't have a "copy constructor"
class TwoArgException(Exception):
def __init__(self, a, b):
super(TwoArgException, self).__init__()
self.a, self.b = a, b
try:
raise TwoArgException(1, 2)
except TwoArgException:
exc_info = sys.exc_info()
try:
raise_exc_info(exc_info)
self.fail("didn't get expected exception")
except TwoArgException as e:
self.assertIs(e, exc_info[1])
class TestConfigurable(Configurable):
@classmethod
def configurable_base(cls):
return TestConfigurable
@classmethod
def configurable_default(cls):
return TestConfig1
class TestConfig1(TestConfigurable):
def initialize(self, pos_arg=None, a=None):
self.a = a
self.pos_arg = pos_arg
class TestConfig2(TestConfigurable):
def initialize(self, pos_arg=None, b=None):
self.b = b
self.pos_arg = pos_arg
class ConfigurableTest(unittest.TestCase):
def setUp(self):
self.saved = TestConfigurable._save_configuration()
def tearDown(self):
TestConfigurable._restore_configuration(self.saved)
def checkSubclasses(self):
# no matter how the class is configured, it should always be
# possible to instantiate the subclasses directly
self.assertIsInstance(TestConfig1(), TestConfig1)
self.assertIsInstance(TestConfig2(), TestConfig2)
obj = TestConfig1(a=1)
self.assertEqual(obj.a, 1)
obj = TestConfig2(b=2)
self.assertEqual(obj.b, 2)
def test_default(self):
obj = TestConfigurable()
self.assertIsInstance(obj, TestConfig1)
self.assertIs(obj.a, None)
obj = TestConfigurable(a=1)
self.assertIsInstance(obj, TestConfig1)
self.assertEqual(obj.a, 1)
self.checkSubclasses()
def test_config_class(self):
TestConfigurable.configure(TestConfig2)
obj = TestConfigurable()
self.assertIsInstance(obj, TestConfig2)
self.assertIs(obj.b, None)
obj = TestConfigurable(b=2)
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 2)
self.checkSubclasses()
def test_config_args(self):
TestConfigurable.configure(None, a=3)
obj = TestConfigurable()
self.assertIsInstance(obj, TestConfig1)
self.assertEqual(obj.a, 3)
obj = TestConfigurable(42, a=4)
self.assertIsInstance(obj, TestConfig1)
self.assertEqual(obj.a, 4)
self.assertEqual(obj.pos_arg, 42)
self.checkSubclasses()
# args bound in configure don't apply when using the subclass directly
obj = TestConfig1()
self.assertIs(obj.a, None)
def test_config_class_args(self):
TestConfigurable.configure(TestConfig2, b=5)
obj = TestConfigurable()
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 5)
obj = TestConfigurable(42, b=6)
self.assertIsInstance(obj, TestConfig2)
self.assertEqual(obj.b, 6)
self.assertEqual(obj.pos_arg, 42)
self.checkSubclasses()
# args bound in configure don't apply when using the subclass directly
obj = TestConfig2()
self.assertIs(obj.b, None)
class UnicodeLiteralTest(unittest.TestCase):
def test_unicode_escapes(self):
self.assertEqual(utf8(u'\u00e9'), b'\xc3\xa9')
class ExecInTest(unittest.TestCase):
# This test is python 2 only because there are no new future imports
# defined in python 3 yet.
@unittest.skipIf(sys.version_info >= print_function.getMandatoryRelease(),
'no testable future imports')
def test_no_inherit_future(self):
# This file has from __future__ import print_function...
f = StringIO()
print('hello', file=f)
# ...but the template doesn't
exec_in('print >> f, "world"', dict(f=f))
self.assertEqual(f.getvalue(), 'hello\nworld\n')
class ArgReplacerTest(unittest.TestCase):
def setUp(self):
def function(x, y, callback=None, z=None):
pass
self.replacer = ArgReplacer(function, 'callback')
def test_omitted(self):
args = (1, 2)
kwargs = dict()
self.assertIs(self.replacer.get_old_value(args, kwargs), None)
self.assertEqual(self.replacer.replace('new', args, kwargs),
(None, (1, 2), dict(callback='new')))
def test_position(self):
args = (1, 2, 'old', 3)
kwargs = dict()
self.assertEqual(self.replacer.get_old_value(args, kwargs), 'old')
self.assertEqual(self.replacer.replace('new', args, kwargs),
('old', [1, 2, 'new', 3], dict()))
def test_keyword(self):
args = (1,)
kwargs = dict(y=2, callback='old', z=3)
self.assertEqual(self.replacer.get_old_value(args, kwargs), 'old')
self.assertEqual(self.replacer.replace('new', args, kwargs),
('old', (1,), dict(y=2, callback='new', z=3)))
class TimedeltaToSecondsTest(unittest.TestCase):
def test_timedelta_to_seconds(self):
time_delta = datetime.timedelta(hours=1)
self.assertEqual(timedelta_to_seconds(time_delta), 3600.0)
class ImportObjectTest(unittest.TestCase):
def test_import_member(self):
self.assertIs(import_object('tornado.escape.utf8'), utf8)
def test_import_member_unicode(self):
self.assertIs(import_object(u'tornado.escape.utf8'), utf8)
def test_import_module(self):
self.assertIs(import_object('tornado.escape'), tornado.escape)
def test_import_module_unicode(self):
# The internal implementation of __import__ differs depending on
# whether the thing being imported is a module or not.
# This variant requires a byte string in python 2.
self.assertIs(import_object(u'tornado.escape'), tornado.escape)
class ReUnescapeTest(unittest.TestCase):
def test_re_unescape(self):
test_strings = (
'/favicon.ico',
'index.html',
'Hello, World!',
'!$@#%;',
)
for string in test_strings:
self.assertEqual(string, re_unescape(re.escape(string)))
def test_re_unescape_raises_error_on_invalid_input(self):
with self.assertRaises(ValueError):
re_unescape('\\d')
with self.assertRaises(ValueError):
re_unescape('\\b')
with self.assertRaises(ValueError):
re_unescape('\\Z')
| KILLER-CHIEF/uqcs-hackathon-2016 | tornado/test/util_test.py | Python | gpl-3.0 | 7,283 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt.volume import glusterfs
class LibvirtGlusterfsVolumeDriverTestCase(
test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_glusterfs_driver(self):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
libvirt_driver.disconnect_volume(connection_info, "vde")
device_path = os.path.join(export_mnt_base,
connection_info['data']['name'])
self.assertEqual(device_path, connection_info['data']['device_path'])
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'glusterfs', export_string, export_mnt_base),
('umount', export_mnt_base)]
self.assertEqual(expected_commands, self.executes)
def test_libvirt_glusterfs_driver_get_config(self):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn)
export_string = '192.168.1.1:/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
file_path = os.path.join(export_mnt_base, self.name)
# Test default format - raw
connection_info = {'data': {'export': export_string,
'name': self.name,
'device_path': file_path}}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
self.assertEqual('raw', tree.find('./driver').get('type'))
# Test specified format - qcow2
connection_info = {'data': {'export': export_string,
'name': self.name,
'device_path': file_path,
'format': 'qcow2'}}
conf = libvirt_driver.get_config(connection_info, self.disk_info)
tree = conf.format_dom()
self._assertFileTypeEquals(tree, file_path)
self.assertEqual('qcow2', tree.find('./driver').get('type'))
def test_libvirt_glusterfs_driver_already_mounted(self):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn)
export_string = '192.168.1.1:/volume-00001'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('findmnt', '--target', export_mnt_base,
'--source', export_string),
('umount', export_mnt_base)]
self.assertEqual(self.executes, expected_commands)
def test_libvirt_glusterfs_driver_with_opts(self):
mnt_base = '/mnt'
self.flags(glusterfs_mount_point_base=mnt_base, group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/volume-00001'
options = '-o backupvolfile-server=192.168.1.2'
export_mnt_base = os.path.join(mnt_base,
utils.get_hash_str(export_string))
connection_info = {'data': {'export': export_string,
'name': self.name,
'options': options}}
libvirt_driver.connect_volume(connection_info, self.disk_info)
libvirt_driver.disconnect_volume(connection_info, "vde")
expected_commands = [
('mkdir', '-p', export_mnt_base),
('mount', '-t', 'glusterfs',
'-o', 'backupvolfile-server=192.168.1.2',
export_string, export_mnt_base),
('umount', export_mnt_base),
]
self.assertEqual(self.executes, expected_commands)
def test_libvirt_glusterfs_libgfapi(self):
self.flags(qemu_allowed_storage_drivers=['gluster'], group='libvirt')
libvirt_driver = glusterfs.LibvirtGlusterfsVolumeDriver(self.fake_conn)
self.stubs.Set(libvirt_utils, 'is_mounted', lambda x, d: False)
export_string = '192.168.1.1:/volume-00001'
name = 'volume-00001'
connection_info = {'data': {'export': export_string, 'name': name}}
disk_info = {
"dev": "vde",
"type": "disk",
"bus": "virtio",
}
libvirt_driver.connect_volume(connection_info, disk_info)
conf = libvirt_driver.get_config(connection_info, disk_info)
tree = conf.format_dom()
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./driver').get('type'), 'raw')
source = tree.find('./source')
self.assertEqual(source.get('protocol'), 'gluster')
self.assertEqual(source.get('name'), 'volume-00001/volume-00001')
self.assertEqual(source.find('./host').get('name'), '192.168.1.1')
self.assertEqual(source.find('./host').get('port'), '24007')
libvirt_driver.disconnect_volume(connection_info, "vde")
| takeshineshiro/nova | nova/tests/unit/virt/libvirt/volume/test_glusterfs.py | Python | apache-2.0 | 6,687 |
#!/usr/bin/env python
__all__ = ['facebook_download']
from ..common import *
import json
def facebook_download(url, output_dir='.', merge=True, info_only=False):
html = get_html(url)
title = r1(r'<title id="pageTitle">(.+) \| Facebook</title>', html)
s2 = parse.unquote(unicodize(r1(r'\["params","([^"]*)"\]', html)))
data = json.loads(s2)
video_data = data["video_data"][0]
for fmt in ["hd_src", "sd_src"]:
src = video_data[fmt]
if src:
break
type, ext, size = url_info(src, True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([src], title, ext, size, output_dir, merge=merge)
site_info = "Facebook.com"
download = facebook_download
download_playlist = playlist_not_supported('facebook')
| rain1988/you-get | src/you_get/extractors/facebook.py | Python | mit | 796 |
# Copyright 2009 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import os
from threading import Lock
from whoosh.compat import BytesIO
from whoosh.index import _DEF_INDEX_NAME
from whoosh.store import Storage
from whoosh.support.filelock import FileLock
from whoosh.filedb.structfile import StructFile
class ReadOnlyError(Exception):
pass
class FileStorage(Storage):
"""Storage object that stores the index as files in a directory on disk.
"""
def __init__(self, path, mapped=True, readonly=False):
self.folder = path
self.mapped = mapped
self.readonly = readonly
self.locks = {}
if not os.path.exists(path):
raise IOError("Directory %s does not exist" % path)
def create_index(self, schema, indexname=_DEF_INDEX_NAME):
if self.readonly:
raise ReadOnlyError
from whoosh.filedb.fileindex import _create_index, FileIndex
_create_index(self, schema, indexname)
return FileIndex(self, schema, indexname)
def open_index(self, indexname=_DEF_INDEX_NAME, schema=None):
from whoosh.filedb.fileindex import FileIndex
return FileIndex(self, schema=schema, indexname=indexname)
def create_file(self, name, excl=False, mode="wb", **kwargs):
if self.readonly:
raise ReadOnlyError
path = self._fpath(name)
if excl:
flags = os.O_CREAT | os.O_EXCL | os.O_RDWR
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
fd = os.open(path, flags)
fileobj = os.fdopen(fd, mode)
else:
fileobj = open(path, mode)
f = StructFile(fileobj, name=name, mapped=self.mapped, **kwargs)
return f
def open_file(self, name, *args, **kwargs):
try:
f = StructFile(open(self._fpath(name), "rb"), name=name, *args,
**kwargs)
except IOError:
#print("Tried to open %r, files=%r" % (name, self.list()))
raise
return f
def _fpath(self, fname):
return os.path.join(self.folder, fname)
def clean(self):
path = self.folder
if not os.path.exists(path):
os.mkdir(path)
files = self.list()
for file in files:
os.remove(os.path.join(path, file))
def list(self):
try:
files = os.listdir(self.folder)
except IOError:
files = []
return files
def file_exists(self, name):
return os.path.exists(self._fpath(name))
def file_modified(self, name):
return os.path.getmtime(self._fpath(name))
def file_length(self, name):
return os.path.getsize(self._fpath(name))
def delete_file(self, name):
os.remove(self._fpath(name))
def rename_file(self, frm, to, safe=False):
if os.path.exists(self._fpath(to)):
if safe:
raise NameError("File %r exists" % to)
else:
os.remove(self._fpath(to))
os.rename(self._fpath(frm), self._fpath(to))
def lock(self, name):
return FileLock(self._fpath(name))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.folder))
class RamStorage(FileStorage):
"""Storage object that keeps the index in memory.
"""
def __init__(self):
self.files = {}
self.locks = {}
self.folder = ''
def list(self):
return list(self.files.keys())
def clean(self):
self.files = {}
def total_size(self):
return sum(self.file_length(f) for f in self.list())
def file_exists(self, name):
return name in self.files
def file_length(self, name):
if name not in self.files:
raise NameError
return len(self.files[name])
def delete_file(self, name):
if name not in self.files:
raise NameError
del self.files[name]
def rename_file(self, name, newname, safe=False):
if name not in self.files:
raise NameError("File %r does not exist" % name)
if safe and newname in self.files:
raise NameError("File %r exists" % newname)
content = self.files[name]
del self.files[name]
self.files[newname] = content
def create_file(self, name, **kwargs):
def onclose_fn(sfile):
self.files[name] = sfile.file.getvalue()
f = StructFile(BytesIO(), name=name, onclose=onclose_fn)
return f
def open_file(self, name, *args, **kwargs):
if name not in self.files:
raise NameError("No such file %r" % name)
return StructFile(BytesIO(self.files[name]), name=name, *args,
**kwargs)
def lock(self, name):
if name not in self.locks:
self.locks[name] = Lock()
return self.locks[name]
def copy_to_ram(storage):
"""Copies the given FileStorage object into a new RamStorage object.
:rtype: :class:`RamStorage`
"""
import shutil
ram = RamStorage()
for name in storage.list():
f = storage.open_file(name)
r = ram.create_file(name)
shutil.copyfileobj(f.file, r.file)
f.close()
r.close()
return ram
| cscott/wikiserver | whoosh/filedb/filestore.py | Python | gpl-2.0 | 6,771 |
from django.utils.translation import ugettext_lazy as _
#: http://sk.wikipedia.org/wiki/Administrat%C3%ADvne_%C4%8Dlenenie_Slovenska
DISTRICT_CHOICES = (
('BB', _('Banska Bystrica')),
('BS', _('Banska Stiavnica')),
('BJ', _('Bardejov')),
('BN', _('Banovce nad Bebravou')),
('BR', _('Brezno')),
('BA1', _('Bratislava I')),
('BA2', _('Bratislava II')),
('BA3', _('Bratislava III')),
('BA4', _('Bratislava IV')),
('BA5', _('Bratislava V')),
('BY', _('Bytca')),
('CA', _('Cadca')),
('DT', _('Detva')),
('DK', _('Dolny Kubin')),
('DS', _('Dunajska Streda')),
('GA', _('Galanta')),
('GL', _('Gelnica')),
('HC', _('Hlohovec')),
('HE', _('Humenne')),
('IL', _('Ilava')),
('KK', _('Kezmarok')),
('KN', _('Komarno')),
('KE1', _('Kosice I')),
('KE2', _('Kosice II')),
('KE3', _('Kosice III')),
('KE4', _('Kosice IV')),
('KEO', _('Kosice - okolie')),
('KA', _('Krupina')),
('KM', _('Kysucke Nove Mesto')),
('LV', _('Levice')),
('LE', _('Levoca')),
('LM', _('Liptovsky Mikulas')),
('LC', _('Lucenec')),
('MA', _('Malacky')),
('MT', _('Martin')),
('ML', _('Medzilaborce')),
('MI', _('Michalovce')),
('MY', _('Myjava')),
('NO', _('Namestovo')),
('NR', _('Nitra')),
('NM', _('Nove Mesto nad Vahom')),
('NZ', _('Nove Zamky')),
('PE', _('Partizanske')),
('PK', _('Pezinok')),
('PN', _('Piestany')),
('PT', _('Poltar')),
('PP', _('Poprad')),
('PB', _('Povazska Bystrica')),
('PO', _('Presov')),
('PD', _('Prievidza')),
('PU', _('Puchov')),
('RA', _('Revuca')),
('RS', _('Rimavska Sobota')),
('RV', _('Roznava')),
('RK', _('Ruzomberok')),
('SB', _('Sabinov')),
('SC', _('Senec')),
('SE', _('Senica')),
('SI', _('Skalica')),
('SV', _('Snina')),
('SO', _('Sobrance')),
('SN', _('Spisska Nova Ves')),
('SL', _('Stara Lubovna')),
('SP', _('Stropkov')),
('SK', _('Svidnik')),
('SA', _('Sala')),
('TO', _('Topolcany')),
('TV', _('Trebisov')),
('TN', _('Trencin')),
('TT', _('Trnava')),
('TR', _('Turcianske Teplice')),
('TS', _('Tvrdosin')),
('VK', _('Velky Krtis')),
('VT', _('Vranov nad Toplou')),
('ZM', _('Zlate Moravce')),
('ZV', _('Zvolen')),
('ZC', _('Zarnovica')),
('ZH', _('Ziar nad Hronom')),
('ZA', _('Zilina')),
)
| rodrigolucianocosta/ControleEstoque | rOne/Storage101/django-localflavor/django-localflavor-1.3/localflavor/sk/sk_districts.py | Python | gpl-3.0 | 2,417 |
# Copyright 2012,2013 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
VERSION = (0, 0, 11, 1, -1)
PREFIX = ["pgi.repository"]
| jackkiej/SickRage | lib/pgi/const.py | Python | gpl-3.0 | 355 |
import warnings
from pymysql.tests import base
import pymysql.cursors
class CursorTest(base.PyMySQLTestCase):
def setUp(self):
super(CursorTest, self).setUp()
conn = self.connections[0]
self.safe_create_table(
conn,
"test", "create table test (data varchar(10))",
)
cursor = conn.cursor()
cursor.execute(
"insert into test (data) values "
"('row1'), ('row2'), ('row3'), ('row4'), ('row5')")
cursor.close()
self.test_connection = pymysql.connect(**self.databases[0])
self.addCleanup(self.test_connection.close)
def test_cleanup_rows_unbuffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.SSCursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
c2.execute("select 1")
self.assertEqual(c2.fetchone(), (1,))
self.assertIsNone(c2.fetchone())
def test_cleanup_rows_buffered(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.Cursor)
cursor.execute("select * from test as t1, test as t2")
for counter, row in enumerate(cursor):
if counter > 10:
break
del cursor
self.safe_gc_collect()
c2 = conn.cursor()
c2.execute("select 1")
self.assertEqual(
c2.fetchone(), (1,)
)
self.assertIsNone(c2.fetchone())
def test_executemany(self):
conn = self.test_connection
cursor = conn.cursor(pymysql.cursors.Cursor)
m = pymysql.cursors.RE_INSERT_VALUES.match("INSERT INTO TEST (ID, NAME) VALUES (%s, %s)")
self.assertIsNotNone(m, 'error parse %s')
self.assertEqual(m.group(3), '', 'group 3 not blank, bug in RE_INSERT_VALUES?')
m = pymysql.cursors.RE_INSERT_VALUES.match("INSERT INTO TEST (ID, NAME) VALUES (%(id)s, %(name)s)")
self.assertIsNotNone(m, 'error parse %(name)s')
self.assertEqual(m.group(3), '', 'group 3 not blank, bug in RE_INSERT_VALUES?')
m = pymysql.cursors.RE_INSERT_VALUES.match("INSERT INTO TEST (ID, NAME) VALUES (%(id_name)s, %(name)s)")
self.assertIsNotNone(m, 'error parse %(id_name)s')
self.assertEqual(m.group(3), '', 'group 3 not blank, bug in RE_INSERT_VALUES?')
m = pymysql.cursors.RE_INSERT_VALUES.match("INSERT INTO TEST (ID, NAME) VALUES (%(id_name)s, %(name)s) ON duplicate update")
self.assertIsNotNone(m, 'error parse %(id_name)s')
self.assertEqual(m.group(3), ' ON duplicate update', 'group 3 not ON duplicate update, bug in RE_INSERT_VALUES?')
# cursor._executed must bee "insert into test (data) values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9)"
# list args
data = range(10)
cursor.executemany("insert into test (data) values (%s)", data)
self.assertTrue(cursor._executed.endswith(b",(7),(8),(9)"), 'execute many with %s not in one query')
# dict args
data_dict = [{'data': i} for i in range(10)]
cursor.executemany("insert into test (data) values (%(data)s)", data_dict)
self.assertTrue(cursor._executed.endswith(b",(7),(8),(9)"), 'execute many with %(data)s not in one query')
# %% in column set
cursor.execute("""\
CREATE TABLE percent_test (
`A%` INTEGER,
`B%` INTEGER)""")
try:
q = "INSERT INTO percent_test (`A%%`, `B%%`) VALUES (%s, %s)"
self.assertIsNotNone(pymysql.cursors.RE_INSERT_VALUES.match(q))
cursor.executemany(q, [(3, 4), (5, 6)])
self.assertTrue(cursor._executed.endswith(b"(3, 4),(5, 6)"), "executemany with %% not in one query")
finally:
cursor.execute("DROP TABLE IF EXISTS percent_test")
| zwChan/VATEC | ~/eb-virt/Lib/site-packages/pymysql/tests/test_cursor.py | Python | apache-2.0 | 4,013 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array as pyarray
import unittest
from numpy import array
from pyspark.mllib.linalg import Vector, SparseVector, DenseVector, VectorUDT, _convert_to_vector, \
DenseMatrix, SparseMatrix, Vectors, Matrices, MatrixUDT
from pyspark.mllib.random import RandomRDDs
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat import Statistics
from pyspark.sql.utils import IllegalArgumentException
from pyspark.testing.mllibutils import MLlibTestCase
class StatTests(MLlibTestCase):
# SPARK-4023
def test_col_with_different_rdds(self):
# numpy
data = RandomRDDs.normalVectorRDD(self.sc, 1000, 10, 10)
summary = Statistics.colStats(data)
self.assertEqual(1000, summary.count())
# array
data = self.sc.parallelize([range(10)] * 10)
summary = Statistics.colStats(data)
self.assertEqual(10, summary.count())
# array
data = self.sc.parallelize([pyarray.array("d", range(10))] * 10)
summary = Statistics.colStats(data)
self.assertEqual(10, summary.count())
def test_col_norms(self):
data = RandomRDDs.normalVectorRDD(self.sc, 1000, 10, 10)
summary = Statistics.colStats(data)
self.assertEqual(10, len(summary.normL1()))
self.assertEqual(10, len(summary.normL2()))
data2 = self.sc.parallelize(range(10)).map(lambda x: Vectors.dense(x))
summary2 = Statistics.colStats(data2)
self.assertEqual(array([45.0]), summary2.normL1())
import math
expectedNormL2 = math.sqrt(sum(map(lambda x: x*x, range(10))))
self.assertTrue(math.fabs(summary2.normL2()[0] - expectedNormL2) < 1e-14)
class ChiSqTestTests(MLlibTestCase):
def test_goodness_of_fit(self):
from numpy import inf
observed = Vectors.dense([4, 6, 5])
pearson = Statistics.chiSqTest(observed)
# Validated against the R command `chisq.test(c(4, 6, 5), p=c(1/3, 1/3, 1/3))`
self.assertEqual(pearson.statistic, 0.4)
self.assertEqual(pearson.degreesOfFreedom, 2)
self.assertAlmostEqual(pearson.pValue, 0.8187, 4)
# Different expected and observed sum
observed1 = Vectors.dense([21, 38, 43, 80])
expected1 = Vectors.dense([3, 5, 7, 20])
pearson1 = Statistics.chiSqTest(observed1, expected1)
# Results validated against the R command
# `chisq.test(c(21, 38, 43, 80), p=c(3/35, 1/7, 1/5, 4/7))`
self.assertAlmostEqual(pearson1.statistic, 14.1429, 4)
self.assertEqual(pearson1.degreesOfFreedom, 3)
self.assertAlmostEqual(pearson1.pValue, 0.002717, 4)
# Vectors with different sizes
observed3 = Vectors.dense([1.0, 2.0, 3.0])
expected3 = Vectors.dense([1.0, 2.0, 3.0, 4.0])
self.assertRaises(ValueError, Statistics.chiSqTest, observed3, expected3)
# Negative counts in observed
neg_obs = Vectors.dense([1.0, 2.0, 3.0, -4.0])
self.assertRaises(IllegalArgumentException, Statistics.chiSqTest, neg_obs, expected1)
# Count = 0.0 in expected but not observed
zero_expected = Vectors.dense([1.0, 0.0, 3.0])
pearson_inf = Statistics.chiSqTest(observed, zero_expected)
self.assertEqual(pearson_inf.statistic, inf)
self.assertEqual(pearson_inf.degreesOfFreedom, 2)
self.assertEqual(pearson_inf.pValue, 0.0)
# 0.0 in expected and observed simultaneously
zero_observed = Vectors.dense([2.0, 0.0, 1.0])
self.assertRaises(
IllegalArgumentException, Statistics.chiSqTest, zero_observed, zero_expected)
def test_matrix_independence(self):
data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
# Results validated against R command
# `chisq.test(rbind(c(40, 56, 31, 30),c(24, 32, 10, 15), c(29, 42, 0, 12)))`
self.assertAlmostEqual(chi.statistic, 21.9958, 4)
self.assertEqual(chi.degreesOfFreedom, 6)
self.assertAlmostEqual(chi.pValue, 0.001213, 4)
# Negative counts
neg_counts = Matrices.dense(2, 2, [4.0, 5.0, 3.0, -3.0])
self.assertRaises(IllegalArgumentException, Statistics.chiSqTest, neg_counts)
# Row sum = 0.0
row_zero = Matrices.dense(2, 2, [0.0, 1.0, 0.0, 2.0])
self.assertRaises(IllegalArgumentException, Statistics.chiSqTest, row_zero)
# Column sum = 0.0
col_zero = Matrices.dense(2, 2, [0.0, 0.0, 2.0, 2.0])
self.assertRaises(IllegalArgumentException, Statistics.chiSqTest, col_zero)
def test_chi_sq_pearson(self):
data = [
LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
LabeledPoint(1.0, Vectors.dense([3.5, 40.0]))
]
for numParts in [2, 4, 6, 8]:
chi = Statistics.chiSqTest(self.sc.parallelize(data, numParts))
feature1 = chi[0]
self.assertEqual(feature1.statistic, 0.75)
self.assertEqual(feature1.degreesOfFreedom, 2)
self.assertAlmostEqual(feature1.pValue, 0.6873, 4)
feature2 = chi[1]
self.assertEqual(feature2.statistic, 1.5)
self.assertEqual(feature2.degreesOfFreedom, 3)
self.assertAlmostEqual(feature2.pValue, 0.6823, 4)
def test_right_number_of_results(self):
num_cols = 1001
sparse_data = [
LabeledPoint(0.0, Vectors.sparse(num_cols, [(100, 2.0)])),
LabeledPoint(0.1, Vectors.sparse(num_cols, [(200, 1.0)]))
]
chi = Statistics.chiSqTest(self.sc.parallelize(sparse_data))
self.assertEqual(len(chi), num_cols)
self.assertIsNotNone(chi[1000])
class KolmogorovSmirnovTest(MLlibTestCase):
def test_R_implementation_equivalence(self):
data = self.sc.parallelize([
1.1626852897838, -0.585924465893051, 1.78546500331661, -1.33259371048501,
-0.446566766553219, 0.569606122374976, -2.88971761441412, -0.869018343326555,
-0.461702683149641, -0.555540910137444, -0.0201353678515895, -0.150382224136063,
-0.628126755843964, 1.32322085193283, -1.52135057001199, -0.437427868856691,
0.970577579543399, 0.0282226444247749, -0.0857821886527593, 0.389214404984942
])
model = Statistics.kolmogorovSmirnovTest(data, "norm")
self.assertAlmostEqual(model.statistic, 0.189, 3)
self.assertAlmostEqual(model.pValue, 0.422, 3)
model = Statistics.kolmogorovSmirnovTest(data, "norm", 0, 1)
self.assertAlmostEqual(model.statistic, 0.189, 3)
self.assertAlmostEqual(model.pValue, 0.422, 3)
if __name__ == "__main__":
from pyspark.mllib.tests.test_stat import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| pgandhi999/spark | python/pyspark/mllib/tests/test_stat.py | Python | apache-2.0 | 8,008 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Check Point Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cp_mgmt_service_rpc_facts
short_description: Get service-rpc objects facts on Check Point over Web Services API
description:
- Get service-rpc objects facts on Check Point devices.
- All operations are performed over Web Services API.
- This module handles both operations, get a specific object and get several objects,
For getting a specific object use the parameter 'name'.
version_added: "2.9"
author: "Or Soffer (@chkp-orso)"
options:
name:
description:
- Object name.
This parameter is relevant only for getting a specific object.
type: str
details_level:
description:
- The level of detail for some of the fields in the response can vary from showing only the UID value of the object to a fully detailed
representation of the object.
type: str
choices: ['uid', 'standard', 'full']
limit:
description:
- No more than that many results will be returned.
This parameter is relevant only for getting few objects.
type: int
offset:
description:
- Skip that many results before beginning to return them.
This parameter is relevant only for getting few objects.
type: int
order:
description:
- Sorts results by the given field. By default the results are sorted in the ascending order by name.
This parameter is relevant only for getting few objects.
type: list
suboptions:
ASC:
description:
- Sorts results by the given field in ascending order.
type: str
choices: ['name']
DESC:
description:
- Sorts results by the given field in descending order.
type: str
choices: ['name']
show_membership:
description:
- Indicates whether to calculate and show "groups" field for every object in reply.
type: bool
extends_documentation_fragment: checkpoint_facts
"""
EXAMPLES = """
- name: show-service-rpc
cp_mgmt_service_rpc_facts:
name: nisplus
- name: show-services-rpc
cp_mgmt_service_rpc_facts:
details_level: standard
limit: 50
offset: 0
"""
RETURN = """
ansible_facts:
description: The checkpoint object facts.
returned: always.
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec_for_facts, api_call_facts
def main():
argument_spec = dict(
name=dict(type='str'),
details_level=dict(type='str', choices=['uid', 'standard', 'full']),
limit=dict(type='int'),
offset=dict(type='int'),
order=dict(type='list', options=dict(
ASC=dict(type='str', choices=['name']),
DESC=dict(type='str', choices=['name'])
)),
show_membership=dict(type='bool')
)
argument_spec.update(checkpoint_argument_spec_for_facts)
module = AnsibleModule(argument_spec=argument_spec)
api_call_object = "service-rpc"
api_call_object_plural_version = "services-rpc"
result = api_call_facts(module, api_call_object, api_call_object_plural_version)
module.exit_json(ansible_facts=result)
if __name__ == '__main__':
main()
| anryko/ansible | lib/ansible/modules/network/check_point/cp_mgmt_service_rpc_facts.py | Python | gpl-3.0 | 4,158 |
from typing import NamedTuple
MyTuple = NamedTuple('MyTuple', [('foo', int)])
def func():
return MyTuple(foo=42)
| smmribeiro/intellij-community | python/testData/intentions/PyAnnotateVariableTypeIntentionTest/AnnotationTypingNamedTupleInOtherFile/lib.py | Python | apache-2.0 | 120 |
"""SCons.Scanner.RC
This module implements the depenency scanner for RC (Interface
Definition Language) files.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Scanner/RC.py 5357 2011/09/09 21:31:03 bdeegan"
import SCons.Node.FS
import SCons.Scanner
import re
def RCScan():
"""Return a prototype Scanner instance for scanning RC source files"""
res_re= r'^(?:\s*#\s*(?:include)|' \
'.*?\s+(?:ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)' \
'\s*.*?)' \
'\s*(<|"| )([^>"\s]+)(?:[>"\s])*$'
resScanner = SCons.Scanner.ClassicCPP( "ResourceScanner",
"$RCSUFFIXES",
"CPPPATH",
res_re )
return resScanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib64/scons-2.1.0/SCons/Scanner/RC.py | Python | gpl-2.0 | 2,075 |
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from sentry.models import AuthIdentity, AuthProvider, OrganizationMember
from sentry.testutils import AuthProviderTestCase, PermissionTestCase
class OrganizationAuthSettingsPermissionTest(PermissionTestCase):
def setUp(self):
super(OrganizationAuthSettingsPermissionTest, self).setUp()
self.path = reverse('sentry-organization-auth-settings', args=[self.organization.slug])
def test_teamless_owner_cannot_load(self):
with self.feature('organizations:sso'):
self.assert_teamless_owner_cannot_access(self.path)
def test_org_admin_cannot_load(self):
with self.feature('organizations:sso'):
self.assert_org_admin_cannot_access(self.path)
def test_org_owner_can_load(self):
with self.feature('organizations:sso'):
self.assert_org_owner_can_access(self.path)
class OrganizationAuthSettingsTest(AuthProviderTestCase):
def test_renders_with_context(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-auth-settings', args=[organization.slug])
self.login_as(self.user)
with self.feature('organizations:sso'):
resp = self.client.get(path)
assert resp.status_code == 200
self.assertTemplateUsed(resp, 'sentry/organization-auth-settings.html')
assert resp.context['organization'] == organization
assert 'dummy' in [k for k, v in resp.context['provider_list']]
def test_can_start_auth_flow(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
path = reverse('sentry-organization-auth-settings', args=[organization.slug])
self.login_as(self.user)
with self.feature('organizations:sso'):
resp = self.client.post(path, {'provider': 'dummy'})
assert resp.status_code == 200
assert resp.content == self.provider.TEMPLATE
def test_disable_provider(self):
organization = self.create_organization(name='foo', owner=self.user)
team = self.create_team(organization=organization)
project = self.create_project(team=team)
auth_provider = AuthProvider.objects.create(
organization=organization,
provider='dummy',
)
AuthIdentity.objects.create(
user=self.user,
ident='foo',
auth_provider=auth_provider,
)
om = OrganizationMember.objects.get(
user=self.user,
organization=organization,
)
setattr(om.flags, 'sso:linked', True)
om.save()
path = reverse('sentry-organization-auth-settings', args=[organization.slug])
self.login_as(self.user)
with self.feature('organizations:sso'):
resp = self.client.post(path, {'op': 'disable'})
assert resp.status_code == 302
assert not AuthProvider.objects.filter(organization=organization).exists()
assert not AuthProvider.objects.filter(id=auth_provider.id).exists()
om = OrganizationMember.objects.get(id=om.id)
assert not getattr(om.flags, 'sso:linked')
| jokey2k/sentry | tests/sentry/web/frontend/test_organization_auth_settings.py | Python | bsd-3-clause | 3,466 |
# -*- coding: utf-8 -*-
"""
pygments.lexers._lasso_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Built-in Lasso types, traits, methods, and members.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
BUILTINS = {
'Types': (
'array',
'atbegin',
'boolean',
'bson_iter',
'bson',
'bytes_document_body',
'bytes',
'cache_server_element',
'cache_server',
'capture',
'client_address',
'client_ip',
'component_container',
'component_render_state',
'component',
'curl',
'curltoken',
'currency',
'custom',
'data_document',
'database_registry',
'date',
'dateandtime',
'dbgp_packet',
'dbgp_server',
'debugging_stack',
'decimal',
'delve',
'dir',
'dirdesc',
'dns_response',
'document_base',
'document_body',
'document_header',
'dsinfo',
'duration',
'eacher',
'email_compose',
'email_parse',
'email_pop',
'email_queue_impl_base',
'email_queue_impl',
'email_smtp',
'email_stage_impl_base',
'email_stage_impl',
'fastcgi_each_fcgi_param',
'fastcgi_server',
'fcgi_record',
'fcgi_request',
'file',
'filedesc',
'filemaker_datasource',
'generateforeachkeyed',
'generateforeachunkeyed',
'generateseries',
'hash_map',
'html_atomic_element',
'html_attr',
'html_base',
'html_binary',
'html_br',
'html_cdata',
'html_container_element',
'html_div',
'html_document_body',
'html_document_head',
'html_eol',
'html_fieldset',
'html_form',
'html_h1',
'html_h2',
'html_h3',
'html_h4',
'html_h5',
'html_h6',
'html_hr',
'html_img',
'html_input',
'html_json',
'html_label',
'html_legend',
'html_link',
'html_meta',
'html_object',
'html_option',
'html_raw',
'html_script',
'html_select',
'html_span',
'html_style',
'html_table',
'html_td',
'html_text',
'html_th',
'html_tr',
'http_document_header',
'http_document',
'http_error',
'http_header_field',
'http_server_connection_handler_globals',
'http_server_connection_handler',
'http_server_request_logger_thread',
'http_server_web_connection',
'http_server',
'image',
'include_cache',
'inline_type',
'integer',
'java_jnienv',
'jbyte',
'jbytearray',
'jchar',
'jchararray',
'jfieldid',
'jfloat',
'jint',
'jmethodid',
'jobject',
'jshort',
'json_decode',
'json_encode',
'json_literal',
'json_object',
'keyword',
'lassoapp_compiledsrc_appsource',
'lassoapp_compiledsrc_fileresource',
'lassoapp_content_rep_halt',
'lassoapp_dirsrc_appsource',
'lassoapp_dirsrc_fileresource',
'lassoapp_installer',
'lassoapp_livesrc_appsource',
'lassoapp_livesrc_fileresource',
'lassoapp_long_expiring_bytes',
'lassoapp_manualsrc_appsource',
'lassoapp_zip_file_server',
'lassoapp_zipsrc_appsource',
'lassoapp_zipsrc_fileresource',
'ldap',
'library_thread_loader',
'list_node',
'list',
'locale',
'log_impl_base',
'log_impl',
'magick_image',
'map_node',
'map',
'memberstream',
'memory_session_driver_impl_entry',
'memory_session_driver_impl',
'memory_session_driver',
'mime_reader',
'mongo_client',
'mongo_collection',
'mongo_cursor',
'mustache_ctx',
'mysql_session_driver_impl',
'mysql_session_driver',
'net_named_pipe',
'net_tcp_ssl',
'net_tcp',
'net_udp_packet',
'net_udp',
'null',
'odbc_session_driver_impl',
'odbc_session_driver',
'opaque',
'os_process',
'pair_compare',
'pair',
'pairup',
'pdf_barcode',
'pdf_chunk',
'pdf_color',
'pdf_doc',
'pdf_font',
'pdf_hyphenator',
'pdf_image',
'pdf_list',
'pdf_paragraph',
'pdf_phrase',
'pdf_read',
'pdf_table',
'pdf_text',
'pdf_typebase',
'percent',
'portal_impl',
'queriable_groupby',
'queriable_grouping',
'queriable_groupjoin',
'queriable_join',
'queriable_orderby',
'queriable_orderbydescending',
'queriable_select',
'queriable_selectmany',
'queriable_skip',
'queriable_take',
'queriable_thenby',
'queriable_thenbydescending',
'queriable_where',
'queue',
'raw_document_body',
'regexp',
'repeat',
'scientific',
'security_registry',
'serialization_element',
'serialization_object_identity_compare',
'serialization_reader',
'serialization_writer_ref',
'serialization_writer_standin',
'serialization_writer',
'session_delete_expired_thread',
'set',
'signature',
'sourcefile',
'sqlite_column',
'sqlite_currentrow',
'sqlite_db',
'sqlite_results',
'sqlite_session_driver_impl_entry',
'sqlite_session_driver_impl',
'sqlite_session_driver',
'sqlite_table',
'sqlite3_stmt',
'sqlite3',
'staticarray',
'string',
'sys_process',
'tag',
'text_document',
'tie',
'timeonly',
'trait',
'tree_base',
'tree_node',
'tree_nullnode',
'ucal',
'usgcpu',
'usgvm',
'void',
'web_error_atend',
'web_node_base',
'web_node_content_representation_css_specialized',
'web_node_content_representation_html_specialized',
'web_node_content_representation_js_specialized',
'web_node_content_representation_xhr_container',
'web_node_echo',
'web_node_root',
'web_request_impl',
'web_request',
'web_response_impl',
'web_response',
'web_router',
'websocket_handler',
'worker_pool',
'xml_attr',
'xml_cdatasection',
'xml_characterdata',
'xml_comment',
'xml_document',
'xml_documentfragment',
'xml_documenttype',
'xml_domimplementation',
'xml_element',
'xml_entity',
'xml_entityreference',
'xml_namednodemap_attr',
'xml_namednodemap_ht',
'xml_namednodemap',
'xml_node',
'xml_nodelist',
'xml_notation',
'xml_processinginstruction',
'xml_text',
'xmlstream',
'zip_file_impl',
'zip_file',
'zip_impl',
'zip',
),
'Traits': (
'any',
'formattingbase',
'html_attributed',
'html_element_coreattrs',
'html_element_eventsattrs',
'html_element_i18nattrs',
'lassoapp_capabilities',
'lassoapp_resource',
'lassoapp_source',
'queriable_asstring',
'session_driver',
'trait_array',
'trait_asstring',
'trait_backcontractible',
'trait_backended',
'trait_backexpandable',
'trait_close',
'trait_contractible',
'trait_decompose_assignment',
'trait_doubleended',
'trait_each_sub',
'trait_encodeurl',
'trait_endedfullymutable',
'trait_expandable',
'trait_file',
'trait_finite',
'trait_finiteforeach',
'trait_foreach',
'trait_foreachtextelement',
'trait_frontcontractible',
'trait_frontended',
'trait_frontexpandable',
'trait_fullymutable',
'trait_generator',
'trait_generatorcentric',
'trait_hashable',
'trait_json_serialize',
'trait_keyed',
'trait_keyedfinite',
'trait_keyedforeach',
'trait_keyedmutable',
'trait_list',
'trait_map',
'trait_net',
'trait_pathcomponents',
'trait_positionallykeyed',
'trait_positionallysearchable',
'trait_queriable',
'trait_queriablelambda',
'trait_readbytes',
'trait_readstring',
'trait_scalar',
'trait_searchable',
'trait_serializable',
'trait_setencoding',
'trait_setoperations',
'trait_stack',
'trait_treenode',
'trait_writebytes',
'trait_writestring',
'trait_xml_elementcompat',
'trait_xml_nodecompat',
'web_connection',
'web_node_container',
'web_node_content_css_specialized',
'web_node_content_document',
'web_node_content_html_specialized',
'web_node_content_js_specialized',
'web_node_content_json_specialized',
'web_node_content_representation',
'web_node_content',
'web_node_postable',
'web_node',
),
'Unbound Methods': (
'abort_clear',
'abort_now',
'abort',
'action_param',
'action_params',
'action_statement',
'admin_authorization',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'admin_getpref',
'admin_initialize',
'admin_lassoservicepath',
'admin_removepref',
'admin_setpref',
'admin_userexists',
'all',
'auth_admin',
'auth_check',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'bw',
'capture_nearestloopabort',
'capture_nearestloopcontinue',
'capture_nearestloopcount',
'checked',
'cipher_decrypt_private',
'cipher_decrypt_public',
'cipher_decrypt',
'cipher_digest',
'cipher_encrypt_private',
'cipher_encrypt_public',
'cipher_encrypt',
'cipher_generate_key',
'cipher_hmac',
'cipher_keylength',
'cipher_list',
'cipher_open',
'cipher_seal',
'cipher_sign',
'cipher_verify',
'client_addr',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparam',
'client_getparams',
'client_headers',
'client_integertoip',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparam',
'client_postparams',
'client_type',
'client_url',
'client_username',
'cn',
'column_name',
'column_names',
'column_type',
'column',
'compress',
'content_addheader',
'content_body',
'content_encoding',
'content_header',
'content_replaceheader',
'content_type',
'cookie_set',
'cookie',
'curl_easy_cleanup',
'curl_easy_duphandle',
'curl_easy_getinfo',
'curl_easy_init',
'curl_easy_reset',
'curl_easy_setopt',
'curl_easy_strerror',
'curl_getdate',
'curl_http_version_1_0',
'curl_http_version_1_1',
'curl_http_version_none',
'curl_ipresolve_v4',
'curl_ipresolve_v6',
'curl_ipresolve_whatever',
'curl_multi_perform',
'curl_multi_result',
'curl_netrc_ignored',
'curl_netrc_optional',
'curl_netrc_required',
'curl_version_asynchdns',
'curl_version_debug',
'curl_version_gssnegotiate',
'curl_version_idn',
'curl_version_info',
'curl_version_ipv6',
'curl_version_kerberos4',
'curl_version_largefile',
'curl_version_libz',
'curl_version_ntlm',
'curl_version_spnego',
'curl_version_ssl',
'curl_version',
'curlauth_any',
'curlauth_anysafe',
'curlauth_basic',
'curlauth_digest',
'curlauth_gssnegotiate',
'curlauth_none',
'curlauth_ntlm',
'curle_aborted_by_callback',
'curle_bad_calling_order',
'curle_bad_content_encoding',
'curle_bad_download_resume',
'curle_bad_function_argument',
'curle_bad_password_entered',
'curle_couldnt_connect',
'curle_couldnt_resolve_host',
'curle_couldnt_resolve_proxy',
'curle_failed_init',
'curle_file_couldnt_read_file',
'curle_filesize_exceeded',
'curle_ftp_access_denied',
'curle_ftp_cant_get_host',
'curle_ftp_cant_reconnect',
'curle_ftp_couldnt_get_size',
'curle_ftp_couldnt_retr_file',
'curle_ftp_couldnt_set_ascii',
'curle_ftp_couldnt_set_binary',
'curle_ftp_couldnt_use_rest',
'curle_ftp_port_failed',
'curle_ftp_quote_error',
'curle_ftp_ssl_failed',
'curle_ftp_user_password_incorrect',
'curle_ftp_weird_227_format',
'curle_ftp_weird_pass_reply',
'curle_ftp_weird_pasv_reply',
'curle_ftp_weird_server_reply',
'curle_ftp_weird_user_reply',
'curle_ftp_write_error',
'curle_function_not_found',
'curle_got_nothing',
'curle_http_post_error',
'curle_http_range_error',
'curle_http_returned_error',
'curle_interface_failed',
'curle_ldap_cannot_bind',
'curle_ldap_invalid_url',
'curle_ldap_search_failed',
'curle_library_not_found',
'curle_login_denied',
'curle_malformat_user',
'curle_obsolete',
'curle_ok',
'curle_operation_timeouted',
'curle_out_of_memory',
'curle_partial_file',
'curle_read_error',
'curle_recv_error',
'curle_send_error',
'curle_send_fail_rewind',
'curle_share_in_use',
'curle_ssl_cacert',
'curle_ssl_certproblem',
'curle_ssl_cipher',
'curle_ssl_connect_error',
'curle_ssl_engine_initfailed',
'curle_ssl_engine_notfound',
'curle_ssl_engine_setfailed',
'curle_ssl_peer_certificate',
'curle_telnet_option_syntax',
'curle_too_many_redirects',
'curle_unknown_telnet_option',
'curle_unsupported_protocol',
'curle_url_malformat_user',
'curle_url_malformat',
'curle_write_error',
'curlftpauth_default',
'curlftpauth_ssl',
'curlftpauth_tls',
'curlftpssl_all',
'curlftpssl_control',
'curlftpssl_last',
'curlftpssl_none',
'curlftpssl_try',
'curlinfo_connect_time',
'curlinfo_content_length_download',
'curlinfo_content_length_upload',
'curlinfo_content_type',
'curlinfo_effective_url',
'curlinfo_filetime',
'curlinfo_header_size',
'curlinfo_http_connectcode',
'curlinfo_httpauth_avail',
'curlinfo_namelookup_time',
'curlinfo_num_connects',
'curlinfo_os_errno',
'curlinfo_pretransfer_time',
'curlinfo_proxyauth_avail',
'curlinfo_redirect_count',
'curlinfo_redirect_time',
'curlinfo_request_size',
'curlinfo_response_code',
'curlinfo_size_download',
'curlinfo_size_upload',
'curlinfo_speed_download',
'curlinfo_speed_upload',
'curlinfo_ssl_engines',
'curlinfo_ssl_verifyresult',
'curlinfo_starttransfer_time',
'curlinfo_total_time',
'curlmsg_done',
'curlopt_autoreferer',
'curlopt_buffersize',
'curlopt_cainfo',
'curlopt_capath',
'curlopt_connecttimeout',
'curlopt_cookie',
'curlopt_cookiefile',
'curlopt_cookiejar',
'curlopt_cookiesession',
'curlopt_crlf',
'curlopt_customrequest',
'curlopt_dns_use_global_cache',
'curlopt_egdsocket',
'curlopt_encoding',
'curlopt_failonerror',
'curlopt_filetime',
'curlopt_followlocation',
'curlopt_forbid_reuse',
'curlopt_fresh_connect',
'curlopt_ftp_account',
'curlopt_ftp_create_missing_dirs',
'curlopt_ftp_response_timeout',
'curlopt_ftp_ssl',
'curlopt_ftp_use_eprt',
'curlopt_ftp_use_epsv',
'curlopt_ftpappend',
'curlopt_ftplistonly',
'curlopt_ftpport',
'curlopt_ftpsslauth',
'curlopt_header',
'curlopt_http_version',
'curlopt_http200aliases',
'curlopt_httpauth',
'curlopt_httpget',
'curlopt_httpheader',
'curlopt_httppost',
'curlopt_httpproxytunnel',
'curlopt_infilesize_large',
'curlopt_infilesize',
'curlopt_interface',
'curlopt_ipresolve',
'curlopt_krb4level',
'curlopt_low_speed_limit',
'curlopt_low_speed_time',
'curlopt_mail_from',
'curlopt_mail_rcpt',
'curlopt_maxconnects',
'curlopt_maxfilesize_large',
'curlopt_maxfilesize',
'curlopt_maxredirs',
'curlopt_netrc_file',
'curlopt_netrc',
'curlopt_nobody',
'curlopt_noprogress',
'curlopt_port',
'curlopt_post',
'curlopt_postfields',
'curlopt_postfieldsize_large',
'curlopt_postfieldsize',
'curlopt_postquote',
'curlopt_prequote',
'curlopt_proxy',
'curlopt_proxyauth',
'curlopt_proxyport',
'curlopt_proxytype',
'curlopt_proxyuserpwd',
'curlopt_put',
'curlopt_quote',
'curlopt_random_file',
'curlopt_range',
'curlopt_readdata',
'curlopt_referer',
'curlopt_resume_from_large',
'curlopt_resume_from',
'curlopt_ssl_cipher_list',
'curlopt_ssl_verifyhost',
'curlopt_ssl_verifypeer',
'curlopt_sslcert',
'curlopt_sslcerttype',
'curlopt_sslengine_default',
'curlopt_sslengine',
'curlopt_sslkey',
'curlopt_sslkeypasswd',
'curlopt_sslkeytype',
'curlopt_sslversion',
'curlopt_tcp_nodelay',
'curlopt_timecondition',
'curlopt_timeout',
'curlopt_timevalue',
'curlopt_transfertext',
'curlopt_unrestricted_auth',
'curlopt_upload',
'curlopt_url',
'curlopt_use_ssl',
'curlopt_useragent',
'curlopt_userpwd',
'curlopt_verbose',
'curlopt_writedata',
'curlproxy_http',
'curlproxy_socks4',
'curlproxy_socks5',
'database_adddefaultsqlitehost',
'database_database',
'database_initialize',
'database_name',
'database_qs',
'database_table_database_tables',
'database_table_datasource_databases',
'database_table_datasource_hosts',
'database_table_datasources',
'database_table_table_fields',
'database_util_cleanpath',
'dbgp_stop_stack_name',
'debugging_break',
'debugging_breakpoint_get',
'debugging_breakpoint_list',
'debugging_breakpoint_remove',
'debugging_breakpoint_set',
'debugging_breakpoint_update',
'debugging_context_locals',
'debugging_context_self',
'debugging_context_vars',
'debugging_detach',
'debugging_enabled',
'debugging_get_context',
'debugging_get_stack',
'debugging_run',
'debugging_step_in',
'debugging_step_out',
'debugging_step_over',
'debugging_stop',
'debugging_terminate',
'decimal_random',
'decompress',
'decrypt_blowfish',
'define_atbegin',
'define_atend',
'dns_default',
'dns_lookup',
'document',
'email_attachment_mime_type',
'email_batch',
'email_digestchallenge',
'email_digestresponse',
'email_extract',
'email_findemails',
'email_fix_address_list',
'email_fix_address',
'email_fs_error_clean',
'email_immediate',
'email_initialize',
'email_merge',
'email_mxlookup',
'email_pop_priv_extract',
'email_pop_priv_quote',
'email_pop_priv_substring',
'email_queue',
'email_result',
'email_safeemail',
'email_send',
'email_status',
'email_token',
'email_translatebreakstocrlf',
'encode_qheader',
'encoding_iso88591',
'encoding_utf8',
'encrypt_blowfish',
'encrypt_crammd5',
'encrypt_hmac',
'encrypt_md5',
'eol',
'eq',
'error_code_aborted',
'error_code_dividebyzero',
'error_code_filenotfound',
'error_code_invalidparameter',
'error_code_methodnotfound',
'error_code_networkerror',
'error_code_noerror',
'error_code_resnotfound',
'error_code_runtimeassertion',
'error_code',
'error_msg_aborted',
'error_msg_dividebyzero',
'error_msg_filenotfound',
'error_msg_invalidparameter',
'error_msg_methodnotfound',
'error_msg_networkerror',
'error_msg_noerror',
'error_msg_resnotfound',
'error_msg_runtimeassertion',
'error_msg',
'error_obj',
'error_pop',
'error_push',
'error_reset',
'error_stack',
'escape_tag',
'evdns_resolve_ipv4',
'evdns_resolve_ipv6',
'evdns_resolve_reverse_ipv6',
'evdns_resolve_reverse',
'ew',
'fail_if',
'fail_ifnot',
'fail_now',
'fail',
'failure_clear',
'fastcgi_createfcgirequest',
'fastcgi_handlecon',
'fastcgi_handlereq',
'fastcgi_initialize',
'fastcgi_initiate_request',
'fcgi_abort_request',
'fcgi_authorize',
'fcgi_begin_request',
'fcgi_bodychunksize',
'fcgi_cant_mpx_conn',
'fcgi_data',
'fcgi_end_request',
'fcgi_filter',
'fcgi_get_values_result',
'fcgi_get_values',
'fcgi_keep_conn',
'fcgi_makeendrequestbody',
'fcgi_makestdoutbody',
'fcgi_max_conns',
'fcgi_max_reqs',
'fcgi_mpxs_conns',
'fcgi_null_request_id',
'fcgi_overloaded',
'fcgi_params',
'fcgi_read_timeout_seconds',
'fcgi_readparam',
'fcgi_request_complete',
'fcgi_responder',
'fcgi_stderr',
'fcgi_stdin',
'fcgi_stdout',
'fcgi_unknown_role',
'fcgi_unknown_type',
'fcgi_version_1',
'fcgi_x_stdin',
'field_name',
'field_names',
'field',
'file_copybuffersize',
'file_defaultencoding',
'file_forceroot',
'file_modechar',
'file_modeline',
'file_stderr',
'file_stdin',
'file_stdout',
'file_tempfile',
'filemakerds_initialize',
'filemakerds',
'found_count',
'ft',
'ftp_deletefile',
'ftp_getdata',
'ftp_getfile',
'ftp_getlisting',
'ftp_putdata',
'ftp_putfile',
'full',
'generateforeach',
'gt',
'gte',
'handle_failure',
'handle',
'hash_primes',
'html_comment',
'http_char_colon',
'http_char_cr',
'http_char_htab',
'http_char_lf',
'http_char_question',
'http_char_space',
'http_default_files',
'http_read_headers',
'http_read_timeout_secs',
'http_server_apps_path',
'http_server_request_logger',
'if_empty',
'if_false',
'if_null',
'if_true',
'include_cache_compare',
'include_currentpath',
'include_filepath',
'include_localpath',
'include_once',
'include_path',
'include_raw',
'include_url',
'include',
'includes',
'inline_colinfo_name_pos',
'inline_colinfo_type_pos',
'inline_colinfo_valuelist_pos',
'inline_columninfo_pos',
'inline_foundcount_pos',
'inline_namedget',
'inline_namedput',
'inline_resultrows_pos',
'inline_scopeget',
'inline_scopepop',
'inline_scopepush',
'inline',
'integer_bitor',
'integer_random',
'io_dir_dt_blk',
'io_dir_dt_chr',
'io_dir_dt_dir',
'io_dir_dt_fifo',
'io_dir_dt_lnk',
'io_dir_dt_reg',
'io_dir_dt_sock',
'io_dir_dt_unknown',
'io_dir_dt_wht',
'io_file_access',
'io_file_chdir',
'io_file_chmod',
'io_file_chown',
'io_file_dirname',
'io_file_f_dupfd',
'io_file_f_getfd',
'io_file_f_getfl',
'io_file_f_getlk',
'io_file_f_rdlck',
'io_file_f_setfd',
'io_file_f_setfl',
'io_file_f_setlk',
'io_file_f_setlkw',
'io_file_f_test',
'io_file_f_tlock',
'io_file_f_ulock',
'io_file_f_unlck',
'io_file_f_wrlck',
'io_file_fd_cloexec',
'io_file_fioasync',
'io_file_fioclex',
'io_file_fiodtype',
'io_file_fiogetown',
'io_file_fionbio',
'io_file_fionclex',
'io_file_fionread',
'io_file_fiosetown',
'io_file_getcwd',
'io_file_lchown',
'io_file_link',
'io_file_lockf',
'io_file_lstat_atime',
'io_file_lstat_mode',
'io_file_lstat_mtime',
'io_file_lstat_size',
'io_file_mkdir',
'io_file_mkfifo',
'io_file_mkstemp',
'io_file_o_append',
'io_file_o_async',
'io_file_o_creat',
'io_file_o_excl',
'io_file_o_exlock',
'io_file_o_fsync',
'io_file_o_nofollow',
'io_file_o_nonblock',
'io_file_o_rdonly',
'io_file_o_rdwr',
'io_file_o_shlock',
'io_file_o_sync',
'io_file_o_trunc',
'io_file_o_wronly',
'io_file_pipe',
'io_file_readlink',
'io_file_realpath',
'io_file_remove',
'io_file_rename',
'io_file_rmdir',
'io_file_s_ifblk',
'io_file_s_ifchr',
'io_file_s_ifdir',
'io_file_s_ififo',
'io_file_s_iflnk',
'io_file_s_ifmt',
'io_file_s_ifreg',
'io_file_s_ifsock',
'io_file_s_irgrp',
'io_file_s_iroth',
'io_file_s_irusr',
'io_file_s_irwxg',
'io_file_s_irwxo',
'io_file_s_irwxu',
'io_file_s_isgid',
'io_file_s_isuid',
'io_file_s_isvtx',
'io_file_s_iwgrp',
'io_file_s_iwoth',
'io_file_s_iwusr',
'io_file_s_ixgrp',
'io_file_s_ixoth',
'io_file_s_ixusr',
'io_file_seek_cur',
'io_file_seek_end',
'io_file_seek_set',
'io_file_stat_atime',
'io_file_stat_mode',
'io_file_stat_mtime',
'io_file_stat_size',
'io_file_stderr',
'io_file_stdin',
'io_file_stdout',
'io_file_symlink',
'io_file_tempnam',
'io_file_truncate',
'io_file_umask',
'io_file_unlink',
'io_net_accept',
'io_net_af_inet',
'io_net_af_inet6',
'io_net_af_unix',
'io_net_bind',
'io_net_connect',
'io_net_getpeername',
'io_net_getsockname',
'io_net_ipproto_ip',
'io_net_ipproto_udp',
'io_net_listen',
'io_net_msg_oob',
'io_net_msg_peek',
'io_net_msg_waitall',
'io_net_recv',
'io_net_recvfrom',
'io_net_send',
'io_net_sendto',
'io_net_shut_rd',
'io_net_shut_rdwr',
'io_net_shut_wr',
'io_net_shutdown',
'io_net_so_acceptconn',
'io_net_so_broadcast',
'io_net_so_debug',
'io_net_so_dontroute',
'io_net_so_error',
'io_net_so_keepalive',
'io_net_so_linger',
'io_net_so_oobinline',
'io_net_so_rcvbuf',
'io_net_so_rcvlowat',
'io_net_so_rcvtimeo',
'io_net_so_reuseaddr',
'io_net_so_sndbuf',
'io_net_so_sndlowat',
'io_net_so_sndtimeo',
'io_net_so_timestamp',
'io_net_so_type',
'io_net_so_useloopback',
'io_net_sock_dgram',
'io_net_sock_raw',
'io_net_sock_rdm',
'io_net_sock_seqpacket',
'io_net_sock_stream',
'io_net_socket',
'io_net_sol_socket',
'io_net_ssl_accept',
'io_net_ssl_begin',
'io_net_ssl_connect',
'io_net_ssl_end',
'io_net_ssl_error',
'io_net_ssl_errorstring',
'io_net_ssl_funcerrorstring',
'io_net_ssl_liberrorstring',
'io_net_ssl_read',
'io_net_ssl_reasonerrorstring',
'io_net_ssl_setacceptstate',
'io_net_ssl_setconnectstate',
'io_net_ssl_setverifylocations',
'io_net_ssl_shutdown',
'io_net_ssl_usecertificatechainfile',
'io_net_ssl_useprivatekeyfile',
'io_net_ssl_write',
'java_jvm_create',
'java_jvm_getenv',
'jdbc_initialize',
'json_back_slash',
'json_back_space',
'json_close_array',
'json_close_object',
'json_colon',
'json_comma',
'json_consume_array',
'json_consume_object',
'json_consume_string',
'json_consume_token',
'json_cr',
'json_debug',
'json_deserialize',
'json_e_lower',
'json_e_upper',
'json_f_lower',
'json_form_feed',
'json_forward_slash',
'json_lf',
'json_n_lower',
'json_negative',
'json_open_array',
'json_open_object',
'json_period',
'json_quote_double',
'json_rpccall',
'json_serialize',
'json_t_lower',
'json_tab',
'json_white_space',
'keycolumn_name',
'keycolumn_value',
'keyfield_name',
'keyfield_value',
'lasso_currentaction',
'lasso_errorreporting',
'lasso_executiontimelimit',
'lasso_methodexists',
'lasso_tagexists',
'lasso_uniqueid',
'lasso_version',
'lassoapp_current_app',
'lassoapp_current_include',
'lassoapp_do_with_include',
'lassoapp_exists',
'lassoapp_find_missing_file',
'lassoapp_format_mod_date',
'lassoapp_get_capabilities_name',
'lassoapp_include_current',
'lassoapp_include',
'lassoapp_initialize_db',
'lassoapp_initialize',
'lassoapp_invoke_resource',
'lassoapp_issourcefileextension',
'lassoapp_link',
'lassoapp_load_module',
'lassoapp_mime_get',
'lassoapp_mime_type_appcache',
'lassoapp_mime_type_css',
'lassoapp_mime_type_csv',
'lassoapp_mime_type_doc',
'lassoapp_mime_type_docx',
'lassoapp_mime_type_eof',
'lassoapp_mime_type_eot',
'lassoapp_mime_type_gif',
'lassoapp_mime_type_html',
'lassoapp_mime_type_ico',
'lassoapp_mime_type_jpg',
'lassoapp_mime_type_js',
'lassoapp_mime_type_lasso',
'lassoapp_mime_type_map',
'lassoapp_mime_type_pdf',
'lassoapp_mime_type_png',
'lassoapp_mime_type_ppt',
'lassoapp_mime_type_rss',
'lassoapp_mime_type_svg',
'lassoapp_mime_type_swf',
'lassoapp_mime_type_tif',
'lassoapp_mime_type_ttf',
'lassoapp_mime_type_txt',
'lassoapp_mime_type_woff',
'lassoapp_mime_type_xaml',
'lassoapp_mime_type_xap',
'lassoapp_mime_type_xbap',
'lassoapp_mime_type_xhr',
'lassoapp_mime_type_xml',
'lassoapp_mime_type_zip',
'lassoapp_path_to_method_name',
'lassoapp_settingsdb',
'layout_name',
'lcapi_datasourceadd',
'lcapi_datasourcecloseconnection',
'lcapi_datasourcedelete',
'lcapi_datasourceduplicate',
'lcapi_datasourceexecsql',
'lcapi_datasourcefindall',
'lcapi_datasourceimage',
'lcapi_datasourceinfo',
'lcapi_datasourceinit',
'lcapi_datasourcematchesname',
'lcapi_datasourcenames',
'lcapi_datasourcenothing',
'lcapi_datasourceopand',
'lcapi_datasourceopany',
'lcapi_datasourceopbw',
'lcapi_datasourceopct',
'lcapi_datasourceopeq',
'lcapi_datasourceopew',
'lcapi_datasourceopft',
'lcapi_datasourceopgt',
'lcapi_datasourceopgteq',
'lcapi_datasourceopin',
'lcapi_datasourceoplt',
'lcapi_datasourceoplteq',
'lcapi_datasourceopnbw',
'lcapi_datasourceopnct',
'lcapi_datasourceopneq',
'lcapi_datasourceopnew',
'lcapi_datasourceopnin',
'lcapi_datasourceopno',
'lcapi_datasourceopnot',
'lcapi_datasourceopnrx',
'lcapi_datasourceopor',
'lcapi_datasourceoprx',
'lcapi_datasourcepreparesql',
'lcapi_datasourceprotectionnone',
'lcapi_datasourceprotectionreadonly',
'lcapi_datasourcerandom',
'lcapi_datasourceschemanames',
'lcapi_datasourcescripts',
'lcapi_datasourcesearch',
'lcapi_datasourcesortascending',
'lcapi_datasourcesortcustom',
'lcapi_datasourcesortdescending',
'lcapi_datasourcetablenames',
'lcapi_datasourceterm',
'lcapi_datasourcetickle',
'lcapi_datasourcetypeblob',
'lcapi_datasourcetypeboolean',
'lcapi_datasourcetypedate',
'lcapi_datasourcetypedecimal',
'lcapi_datasourcetypeinteger',
'lcapi_datasourcetypestring',
'lcapi_datasourceunpreparesql',
'lcapi_datasourceupdate',
'lcapi_fourchartointeger',
'lcapi_listdatasources',
'lcapi_loadmodule',
'lcapi_loadmodules',
'lcapi_updatedatasourceslist',
'ldap_scope_base',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'library_once',
'library',
'ljapi_initialize',
'locale_availablelocales',
'locale_canada',
'locale_canadafrench',
'locale_china',
'locale_chinese',
'locale_default',
'locale_english',
'locale_format_style_date_time',
'locale_format_style_default',
'locale_format_style_full',
'locale_format_style_long',
'locale_format_style_medium',
'locale_format_style_none',
'locale_format_style_short',
'locale_format',
'locale_france',
'locale_french',
'locale_german',
'locale_germany',
'locale_isocountries',
'locale_isolanguages',
'locale_italian',
'locale_italy',
'locale_japan',
'locale_japanese',
'locale_korea',
'locale_korean',
'locale_prc',
'locale_setdefault',
'locale_simplifiedchinese',
'locale_taiwan',
'locale_traditionalchinese',
'locale_uk',
'locale_us',
'log_always',
'log_critical',
'log_deprecated',
'log_destination_console',
'log_destination_database',
'log_destination_file',
'log_detail',
'log_initialize',
'log_level_critical',
'log_level_deprecated',
'log_level_detail',
'log_level_sql',
'log_level_warning',
'log_max_file_size',
'log_setdestination',
'log_sql',
'log_trim_file_size',
'log_warning',
'log',
'loop_abort',
'loop_continue',
'loop_count',
'loop_key_pop',
'loop_key_push',
'loop_key',
'loop_pop',
'loop_push',
'loop_value_pop',
'loop_value_push',
'loop_value',
'loop',
'lt',
'lte',
'main_thread_only',
'max',
'maxrecords_value',
'median',
'method_name',
'micros',
'millis',
'min',
'minimal',
'mongo_insert_continue_on_error',
'mongo_insert_no_validate',
'mongo_insert_none',
'mongo_query_await_data',
'mongo_query_exhaust',
'mongo_query_no_cursor_timeout',
'mongo_query_none',
'mongo_query_oplog_replay',
'mongo_query_partial',
'mongo_query_slave_ok',
'mongo_query_tailable_cursor',
'mongo_remove_none',
'mongo_remove_single_remove',
'mongo_update_multi_update',
'mongo_update_no_validate',
'mongo_update_none',
'mongo_update_upsert',
'mustache_compile_file',
'mustache_compile_string',
'mustache_include',
'mysqlds',
'namespace_global',
'namespace_import',
'namespace_using',
'nbw',
'ncn',
'neq',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'new',
'none',
'nrx',
'nslookup',
'odbc_session_driver_mssql',
'odbc',
'output_none',
'output',
'pdf_package',
'pdf_rectangle',
'pdf_serve',
'pi',
'portal',
'postgresql',
'process',
'protect_now',
'protect',
'queriable_average',
'queriable_defaultcompare',
'queriable_do',
'queriable_internal_combinebindings',
'queriable_max',
'queriable_min',
'queriable_qsort',
'queriable_reversecompare',
'queriable_sum',
'random_seed',
'range',
'records_array',
'records_map',
'records',
'redirect_url',
'referer_url',
'referrer_url',
'register_thread',
'register',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'response_root',
'resultset_count',
'resultset',
'resultsets',
'rows_array',
'rows_impl',
'rows',
'rx',
'schema_name',
'security_database',
'security_default_realm',
'security_initialize',
'security_table_groups',
'security_table_ug_map',
'security_table_users',
'selected',
'series',
'server_admin',
'server_ip',
'server_name',
'server_port',
'server_protocol',
'server_push',
'server_signature',
'server_software',
'session_abort',
'session_addvar',
'session_decorate',
'session_deleteexpired',
'session_end',
'session_getdefaultdriver',
'session_id',
'session_initialize',
'session_removevar',
'session_result',
'session_setdefaultdriver',
'session_start',
'shown_count',
'shown_first',
'shown_last',
'site_id',
'site_name',
'skiprecords_value',
'sleep',
'split_thread',
'sqlite_abort',
'sqlite_auth',
'sqlite_blob',
'sqlite_busy',
'sqlite_cantopen',
'sqlite_constraint',
'sqlite_corrupt',
'sqlite_createdb',
'sqlite_done',
'sqlite_empty',
'sqlite_error',
'sqlite_float',
'sqlite_format',
'sqlite_full',
'sqlite_integer',
'sqlite_internal',
'sqlite_interrupt',
'sqlite_ioerr',
'sqlite_locked',
'sqlite_mismatch',
'sqlite_misuse',
'sqlite_nolfs',
'sqlite_nomem',
'sqlite_notadb',
'sqlite_notfound',
'sqlite_null',
'sqlite_ok',
'sqlite_perm',
'sqlite_protocol',
'sqlite_range',
'sqlite_readonly',
'sqlite_row',
'sqlite_schema',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'sqlite_text',
'sqlite_toobig',
'sqliteconnector',
'staticarray_join',
'stdout',
'stdoutnl',
'string_validcharset',
'suspend',
'sys_appspath',
'sys_chroot',
'sys_clock',
'sys_clockspersec',
'sys_credits',
'sys_databasespath',
'sys_detach_exec',
'sys_difftime',
'sys_dll_ext',
'sys_drand48',
'sys_environ',
'sys_eol',
'sys_erand48',
'sys_errno',
'sys_exec_pid_to_os_pid',
'sys_exec',
'sys_exit',
'sys_fork',
'sys_garbagecollect',
'sys_getbytessincegc',
'sys_getchar',
'sys_getegid',
'sys_getenv',
'sys_geteuid',
'sys_getgid',
'sys_getgrnam',
'sys_getheapfreebytes',
'sys_getheapsize',
'sys_getlogin',
'sys_getpid',
'sys_getppid',
'sys_getpwnam',
'sys_getpwuid',
'sys_getstartclock',
'sys_getthreadcount',
'sys_getuid',
'sys_growheapby',
'sys_homepath',
'sys_is_full_path',
'sys_is_windows',
'sys_isfullpath',
'sys_iswindows',
'sys_iterate',
'sys_jrand48',
'sys_kill_exec',
'sys_kill',
'sys_lcong48',
'sys_librariespath',
'sys_listtraits',
'sys_listtypes',
'sys_listunboundmethods',
'sys_loadlibrary',
'sys_lrand48',
'sys_masterhomepath',
'sys_mrand48',
'sys_nrand48',
'sys_pid_exec',
'sys_pointersize',
'sys_rand',
'sys_random',
'sys_seed48',
'sys_setenv',
'sys_setgid',
'sys_setsid',
'sys_setuid',
'sys_sigabrt',
'sys_sigalrm',
'sys_sigbus',
'sys_sigchld',
'sys_sigcont',
'sys_sigfpe',
'sys_sighup',
'sys_sigill',
'sys_sigint',
'sys_sigkill',
'sys_sigpipe',
'sys_sigprof',
'sys_sigquit',
'sys_sigsegv',
'sys_sigstop',
'sys_sigsys',
'sys_sigterm',
'sys_sigtrap',
'sys_sigtstp',
'sys_sigttin',
'sys_sigttou',
'sys_sigurg',
'sys_sigusr1',
'sys_sigusr2',
'sys_sigvtalrm',
'sys_sigxcpu',
'sys_sigxfsz',
'sys_srand',
'sys_srand48',
'sys_srandom',
'sys_strerror',
'sys_supportpath',
'sys_test_exec',
'sys_time',
'sys_uname',
'sys_unsetenv',
'sys_usercapimodulepath',
'sys_userstartuppath',
'sys_version',
'sys_wait_exec',
'sys_waitpid',
'sys_wcontinued',
'sys_while',
'sys_wnohang',
'sys_wuntraced',
'table_name',
'tag_exists',
'tag_name',
'thread_var_get',
'thread_var_pop',
'thread_var_push',
'threadvar_find',
'threadvar_get',
'threadvar_set_asrt',
'threadvar_set',
'timer',
'token_value',
'treemap',
'u_lb_alphabetic',
'u_lb_ambiguous',
'u_lb_break_after',
'u_lb_break_before',
'u_lb_break_both',
'u_lb_break_symbols',
'u_lb_carriage_return',
'u_lb_close_punctuation',
'u_lb_combining_mark',
'u_lb_complex_context',
'u_lb_contingent_break',
'u_lb_exclamation',
'u_lb_glue',
'u_lb_h2',
'u_lb_h3',
'u_lb_hyphen',
'u_lb_ideographic',
'u_lb_infix_numeric',
'u_lb_inseparable',
'u_lb_jl',
'u_lb_jt',
'u_lb_jv',
'u_lb_line_feed',
'u_lb_mandatory_break',
'u_lb_next_line',
'u_lb_nonstarter',
'u_lb_numeric',
'u_lb_open_punctuation',
'u_lb_postfix_numeric',
'u_lb_prefix_numeric',
'u_lb_quotation',
'u_lb_space',
'u_lb_surrogate',
'u_lb_unknown',
'u_lb_word_joiner',
'u_lb_zwspace',
'u_nt_decimal',
'u_nt_digit',
'u_nt_none',
'u_nt_numeric',
'u_sb_aterm',
'u_sb_close',
'u_sb_format',
'u_sb_lower',
'u_sb_numeric',
'u_sb_oletter',
'u_sb_other',
'u_sb_sep',
'u_sb_sp',
'u_sb_sterm',
'u_sb_upper',
'u_wb_aletter',
'u_wb_extendnumlet',
'u_wb_format',
'u_wb_katakana',
'u_wb_midletter',
'u_wb_midnum',
'u_wb_numeric',
'u_wb_other',
'ucal_ampm',
'ucal_dayofmonth',
'ucal_dayofweek',
'ucal_dayofweekinmonth',
'ucal_dayofyear',
'ucal_daysinfirstweek',
'ucal_dowlocal',
'ucal_dstoffset',
'ucal_era',
'ucal_extendedyear',
'ucal_firstdayofweek',
'ucal_hour',
'ucal_hourofday',
'ucal_julianday',
'ucal_lenient',
'ucal_listtimezones',
'ucal_millisecond',
'ucal_millisecondsinday',
'ucal_minute',
'ucal_month',
'ucal_second',
'ucal_weekofmonth',
'ucal_weekofyear',
'ucal_year',
'ucal_yearwoy',
'ucal_zoneoffset',
'uchar_age',
'uchar_alphabetic',
'uchar_ascii_hex_digit',
'uchar_bidi_class',
'uchar_bidi_control',
'uchar_bidi_mirrored',
'uchar_bidi_mirroring_glyph',
'uchar_block',
'uchar_canonical_combining_class',
'uchar_case_folding',
'uchar_case_sensitive',
'uchar_dash',
'uchar_decomposition_type',
'uchar_default_ignorable_code_point',
'uchar_deprecated',
'uchar_diacritic',
'uchar_east_asian_width',
'uchar_extender',
'uchar_full_composition_exclusion',
'uchar_general_category_mask',
'uchar_general_category',
'uchar_grapheme_base',
'uchar_grapheme_cluster_break',
'uchar_grapheme_extend',
'uchar_grapheme_link',
'uchar_hangul_syllable_type',
'uchar_hex_digit',
'uchar_hyphen',
'uchar_id_continue',
'uchar_ideographic',
'uchar_ids_binary_operator',
'uchar_ids_trinary_operator',
'uchar_iso_comment',
'uchar_join_control',
'uchar_joining_group',
'uchar_joining_type',
'uchar_lead_canonical_combining_class',
'uchar_line_break',
'uchar_logical_order_exception',
'uchar_lowercase_mapping',
'uchar_lowercase',
'uchar_math',
'uchar_name',
'uchar_nfc_inert',
'uchar_nfc_quick_check',
'uchar_nfd_inert',
'uchar_nfd_quick_check',
'uchar_nfkc_inert',
'uchar_nfkc_quick_check',
'uchar_nfkd_inert',
'uchar_nfkd_quick_check',
'uchar_noncharacter_code_point',
'uchar_numeric_type',
'uchar_numeric_value',
'uchar_pattern_syntax',
'uchar_pattern_white_space',
'uchar_posix_alnum',
'uchar_posix_blank',
'uchar_posix_graph',
'uchar_posix_print',
'uchar_posix_xdigit',
'uchar_quotation_mark',
'uchar_radical',
'uchar_s_term',
'uchar_script',
'uchar_segment_starter',
'uchar_sentence_break',
'uchar_simple_case_folding',
'uchar_simple_lowercase_mapping',
'uchar_simple_titlecase_mapping',
'uchar_simple_uppercase_mapping',
'uchar_soft_dotted',
'uchar_terminal_punctuation',
'uchar_titlecase_mapping',
'uchar_trail_canonical_combining_class',
'uchar_unicode_1_name',
'uchar_unified_ideograph',
'uchar_uppercase_mapping',
'uchar_uppercase',
'uchar_variation_selector',
'uchar_white_space',
'uchar_word_break',
'uchar_xid_continue',
'uncompress',
'usage',
'uuid_compare',
'uuid_copy',
'uuid_generate_random',
'uuid_generate_time',
'uuid_generate',
'uuid_is_null',
'uuid_parse',
'uuid_unparse_lower',
'uuid_unparse_upper',
'uuid_unparse',
'value_list',
'value_listitem',
'valuelistitem',
'var_keys',
'var_values',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxcolumns',
'wap_maxhorzpixels',
'wap_maxrows',
'wap_maxvertpixels',
'web_handlefcgirequest',
'web_node_content_representation_css',
'web_node_content_representation_html',
'web_node_content_representation_js',
'web_node_content_representation_xhr',
'web_node_forpath',
'web_nodes_initialize',
'web_nodes_normalizeextension',
'web_nodes_processcontentnode',
'web_nodes_requesthandler',
'web_response_nodesentry',
'web_router_database',
'web_router_initialize',
'websocket_handler_timeout',
'wexitstatus',
'wifcontinued',
'wifexited',
'wifsignaled',
'wifstopped',
'wstopsig',
'wtermsig',
'xml_transform',
'xml',
'zip_add_dir',
'zip_add',
'zip_checkcons',
'zip_close',
'zip_cm_bzip2',
'zip_cm_default',
'zip_cm_deflate',
'zip_cm_deflate64',
'zip_cm_implode',
'zip_cm_pkware_implode',
'zip_cm_reduce_1',
'zip_cm_reduce_2',
'zip_cm_reduce_3',
'zip_cm_reduce_4',
'zip_cm_shrink',
'zip_cm_store',
'zip_create',
'zip_delete',
'zip_em_3des_112',
'zip_em_3des_168',
'zip_em_aes_128',
'zip_em_aes_192',
'zip_em_aes_256',
'zip_em_des',
'zip_em_none',
'zip_em_rc2_old',
'zip_em_rc2',
'zip_em_rc4',
'zip_em_trad_pkware',
'zip_em_unknown',
'zip_er_changed',
'zip_er_close',
'zip_er_compnotsupp',
'zip_er_crc',
'zip_er_deleted',
'zip_er_eof',
'zip_er_exists',
'zip_er_incons',
'zip_er_internal',
'zip_er_inval',
'zip_er_memory',
'zip_er_multidisk',
'zip_er_noent',
'zip_er_nozip',
'zip_er_ok',
'zip_er_open',
'zip_er_read',
'zip_er_remove',
'zip_er_rename',
'zip_er_seek',
'zip_er_tmpopen',
'zip_er_write',
'zip_er_zipclosed',
'zip_er_zlib',
'zip_error_get_sys_type',
'zip_error_get',
'zip_error_to_str',
'zip_et_none',
'zip_et_sys',
'zip_et_zlib',
'zip_excl',
'zip_fclose',
'zip_file_error_get',
'zip_file_strerror',
'zip_fl_compressed',
'zip_fl_nocase',
'zip_fl_nodir',
'zip_fl_unchanged',
'zip_fopen_index',
'zip_fopen',
'zip_fread',
'zip_get_archive_comment',
'zip_get_file_comment',
'zip_get_name',
'zip_get_num_files',
'zip_name_locate',
'zip_open',
'zip_rename',
'zip_replace',
'zip_set_archive_comment',
'zip_set_file_comment',
'zip_stat_index',
'zip_stat',
'zip_strerror',
'zip_unchange_all',
'zip_unchange_archive',
'zip_unchange',
'zlib_version',
),
'Lasso 8 Tags': (
'__char',
'__sync_timestamp__',
'_admin_addgroup',
'_admin_adduser',
'_admin_defaultconnector',
'_admin_defaultconnectornames',
'_admin_defaultdatabase',
'_admin_defaultfield',
'_admin_defaultgroup',
'_admin_defaulthost',
'_admin_defaulttable',
'_admin_defaultuser',
'_admin_deleteconnector',
'_admin_deletedatabase',
'_admin_deletefield',
'_admin_deletegroup',
'_admin_deletehost',
'_admin_deletetable',
'_admin_deleteuser',
'_admin_duplicategroup',
'_admin_internaldatabase',
'_admin_listconnectors',
'_admin_listdatabases',
'_admin_listfields',
'_admin_listgroups',
'_admin_listhosts',
'_admin_listtables',
'_admin_listusers',
'_admin_refreshconnector',
'_admin_refreshsecurity',
'_admin_servicepath',
'_admin_updateconnector',
'_admin_updatedatabase',
'_admin_updatefield',
'_admin_updategroup',
'_admin_updatehost',
'_admin_updatetable',
'_admin_updateuser',
'_chartfx_activation_string',
'_chartfx_getchallengestring',
'_chop_args',
'_chop_mimes',
'_client_addr_old',
'_client_address_old',
'_client_ip_old',
'_database_names',
'_datasource_reload',
'_date_current',
'_date_format',
'_date_msec',
'_date_parse',
'_execution_timelimit',
'_file_chmod',
'_initialize',
'_jdbc_acceptsurl',
'_jdbc_debug',
'_jdbc_deletehost',
'_jdbc_driverclasses',
'_jdbc_driverinfo',
'_jdbc_metainfo',
'_jdbc_propertyinfo',
'_jdbc_setdriver',
'_lasso_param',
'_log_helper',
'_proc_noparam',
'_proc_withparam',
'_recursion_limit',
'_request_param',
'_security_binaryexpiration',
'_security_flushcaches',
'_security_isserialized',
'_security_serialexpiration',
'_srand',
'_strict_literals',
'_substring',
'_xmlrpc_exconverter',
'_xmlrpc_inconverter',
'_xmlrpc_xmlinconverter',
'abort',
'action_addinfo',
'action_addrecord',
'action_param',
'action_params',
'action_setfoundcount',
'action_setrecordid',
'action_settotalcount',
'action_statement',
'admin_allowedfileroots',
'admin_changeuser',
'admin_createuser',
'admin_currentgroups',
'admin_currentuserid',
'admin_currentusername',
'admin_getpref',
'admin_groupassignuser',
'admin_grouplistusers',
'admin_groupremoveuser',
'admin_lassoservicepath',
'admin_listgroups',
'admin_refreshlicensing',
'admin_refreshsecurity',
'admin_reloaddatasource',
'admin_removepref',
'admin_setpref',
'admin_userexists',
'admin_userlistgroups',
'all',
'and',
'array',
'array_iterator',
'auth',
'auth_admin',
'auth_auth',
'auth_custom',
'auth_group',
'auth_prompt',
'auth_user',
'base64',
'bean',
'bigint',
'bom_utf16be',
'bom_utf16le',
'bom_utf32be',
'bom_utf32le',
'bom_utf8',
'boolean',
'bw',
'bytes',
'cache',
'cache_delete',
'cache_empty',
'cache_exists',
'cache_fetch',
'cache_internal',
'cache_maintenance',
'cache_object',
'cache_preferences',
'cache_store',
'case',
'chartfx',
'chartfx_records',
'chartfx_serve',
'checked',
'choice_list',
'choice_listitem',
'choicelistitem',
'cipher_decrypt',
'cipher_digest',
'cipher_encrypt',
'cipher_hmac',
'cipher_keylength',
'cipher_list',
'click_text',
'client_addr',
'client_address',
'client_authorization',
'client_browser',
'client_contentlength',
'client_contenttype',
'client_cookielist',
'client_cookies',
'client_encoding',
'client_formmethod',
'client_getargs',
'client_getparams',
'client_headers',
'client_ip',
'client_ipfrominteger',
'client_iptointeger',
'client_password',
'client_postargs',
'client_postparams',
'client_type',
'client_url',
'client_username',
'cn',
'column',
'column_name',
'column_names',
'compare_beginswith',
'compare_contains',
'compare_endswith',
'compare_equalto',
'compare_greaterthan',
'compare_greaterthanorequals',
'compare_greaterthanorequls',
'compare_lessthan',
'compare_lessthanorequals',
'compare_notbeginswith',
'compare_notcontains',
'compare_notendswith',
'compare_notequalto',
'compare_notregexp',
'compare_regexp',
'compare_strictequalto',
'compare_strictnotequalto',
'compiler_removecacheddoc',
'compiler_setdefaultparserflags',
'compress',
'content_body',
'content_encoding',
'content_header',
'content_type',
'cookie',
'cookie_set',
'curl_ftp_getfile',
'curl_ftp_getlisting',
'curl_ftp_putfile',
'curl_include_url',
'currency',
'database_changecolumn',
'database_changefield',
'database_createcolumn',
'database_createfield',
'database_createtable',
'database_fmcontainer',
'database_hostinfo',
'database_inline',
'database_name',
'database_nameitem',
'database_names',
'database_realname',
'database_removecolumn',
'database_removefield',
'database_removetable',
'database_repeating',
'database_repeating_valueitem',
'database_repeatingvalueitem',
'database_schemanameitem',
'database_schemanames',
'database_tablecolumn',
'database_tablenameitem',
'database_tablenames',
'datasource_name',
'datasource_register',
'date',
'date__date_current',
'date__date_format',
'date__date_msec',
'date__date_parse',
'date_add',
'date_date',
'date_difference',
'date_duration',
'date_format',
'date_getcurrentdate',
'date_getday',
'date_getdayofweek',
'date_gethour',
'date_getlocaltimezone',
'date_getminute',
'date_getmonth',
'date_getsecond',
'date_gettime',
'date_getyear',
'date_gmttolocal',
'date_localtogmt',
'date_maximum',
'date_minimum',
'date_msec',
'date_setformat',
'date_subtract',
'db_layoutnameitem',
'db_layoutnames',
'db_nameitem',
'db_names',
'db_tablenameitem',
'db_tablenames',
'dbi_column_names',
'dbi_field_names',
'decimal',
'decimal_setglobaldefaultprecision',
'decode_base64',
'decode_bheader',
'decode_hex',
'decode_html',
'decode_json',
'decode_qheader',
'decode_quotedprintable',
'decode_quotedprintablebytes',
'decode_url',
'decode_xml',
'decompress',
'decrypt_blowfish',
'decrypt_blowfish2',
'default',
'define_atbegin',
'define_atend',
'define_constant',
'define_prototype',
'define_tag',
'define_tagp',
'define_type',
'define_typep',
'deserialize',
'directory_directorynameitem',
'directory_lister',
'directory_nameitem',
'directorynameitem',
'dns_default',
'dns_lookup',
'dns_response',
'duration',
'else',
'email_batch',
'email_compose',
'email_digestchallenge',
'email_digestresponse',
'email_extract',
'email_findemails',
'email_immediate',
'email_merge',
'email_mxerror',
'email_mxlookup',
'email_parse',
'email_pop',
'email_queue',
'email_result',
'email_safeemail',
'email_send',
'email_smtp',
'email_status',
'email_token',
'email_translatebreakstocrlf',
'encode_base64',
'encode_bheader',
'encode_break',
'encode_breaks',
'encode_crc32',
'encode_hex',
'encode_html',
'encode_htmltoxml',
'encode_json',
'encode_qheader',
'encode_quotedprintable',
'encode_quotedprintablebytes',
'encode_set',
'encode_smart',
'encode_sql',
'encode_sql92',
'encode_stricturl',
'encode_url',
'encode_xml',
'encrypt_blowfish',
'encrypt_blowfish2',
'encrypt_crammd5',
'encrypt_hmac',
'encrypt_md5',
'eq',
'error_adderror',
'error_code',
'error_code_aborted',
'error_code_assert',
'error_code_bof',
'error_code_connectioninvalid',
'error_code_couldnotclosefile',
'error_code_couldnotcreateoropenfile',
'error_code_couldnotdeletefile',
'error_code_couldnotdisposememory',
'error_code_couldnotlockmemory',
'error_code_couldnotreadfromfile',
'error_code_couldnotunlockmemory',
'error_code_couldnotwritetofile',
'error_code_criterianotmet',
'error_code_datasourceerror',
'error_code_directoryfull',
'error_code_diskfull',
'error_code_dividebyzero',
'error_code_eof',
'error_code_failure',
'error_code_fieldrestriction',
'error_code_file',
'error_code_filealreadyexists',
'error_code_filecorrupt',
'error_code_fileinvalid',
'error_code_fileinvalidaccessmode',
'error_code_fileisclosed',
'error_code_fileisopen',
'error_code_filelocked',
'error_code_filenotfound',
'error_code_fileunlocked',
'error_code_httpfilenotfound',
'error_code_illegalinstruction',
'error_code_illegaluseoffrozeninstance',
'error_code_invaliddatabase',
'error_code_invalidfilename',
'error_code_invalidmemoryobject',
'error_code_invalidparameter',
'error_code_invalidpassword',
'error_code_invalidpathname',
'error_code_invalidusername',
'error_code_ioerror',
'error_code_loopaborted',
'error_code_memory',
'error_code_network',
'error_code_nilpointer',
'error_code_noerr',
'error_code_nopermission',
'error_code_outofmemory',
'error_code_outofstackspace',
'error_code_overflow',
'error_code_postconditionfailed',
'error_code_preconditionfailed',
'error_code_resnotfound',
'error_code_resource',
'error_code_streamreaderror',
'error_code_streamwriteerror',
'error_code_syntaxerror',
'error_code_tagnotfound',
'error_code_unknownerror',
'error_code_varnotfound',
'error_code_volumedoesnotexist',
'error_code_webactionnotsupported',
'error_code_webadderror',
'error_code_webdeleteerror',
'error_code_webmodulenotfound',
'error_code_webnosuchobject',
'error_code_webrepeatingrelatedfield',
'error_code_webrequiredfieldmissing',
'error_code_webtimeout',
'error_code_webupdateerror',
'error_columnrestriction',
'error_currenterror',
'error_databaseconnectionunavailable',
'error_databasetimeout',
'error_deleteerror',
'error_fieldrestriction',
'error_filenotfound',
'error_invaliddatabase',
'error_invalidpassword',
'error_invalidusername',
'error_modulenotfound',
'error_msg',
'error_msg_aborted',
'error_msg_assert',
'error_msg_bof',
'error_msg_connectioninvalid',
'error_msg_couldnotclosefile',
'error_msg_couldnotcreateoropenfile',
'error_msg_couldnotdeletefile',
'error_msg_couldnotdisposememory',
'error_msg_couldnotlockmemory',
'error_msg_couldnotreadfromfile',
'error_msg_couldnotunlockmemory',
'error_msg_couldnotwritetofile',
'error_msg_criterianotmet',
'error_msg_datasourceerror',
'error_msg_directoryfull',
'error_msg_diskfull',
'error_msg_dividebyzero',
'error_msg_eof',
'error_msg_failure',
'error_msg_fieldrestriction',
'error_msg_file',
'error_msg_filealreadyexists',
'error_msg_filecorrupt',
'error_msg_fileinvalid',
'error_msg_fileinvalidaccessmode',
'error_msg_fileisclosed',
'error_msg_fileisopen',
'error_msg_filelocked',
'error_msg_filenotfound',
'error_msg_fileunlocked',
'error_msg_httpfilenotfound',
'error_msg_illegalinstruction',
'error_msg_illegaluseoffrozeninstance',
'error_msg_invaliddatabase',
'error_msg_invalidfilename',
'error_msg_invalidmemoryobject',
'error_msg_invalidparameter',
'error_msg_invalidpassword',
'error_msg_invalidpathname',
'error_msg_invalidusername',
'error_msg_ioerror',
'error_msg_loopaborted',
'error_msg_memory',
'error_msg_network',
'error_msg_nilpointer',
'error_msg_noerr',
'error_msg_nopermission',
'error_msg_outofmemory',
'error_msg_outofstackspace',
'error_msg_overflow',
'error_msg_postconditionfailed',
'error_msg_preconditionfailed',
'error_msg_resnotfound',
'error_msg_resource',
'error_msg_streamreaderror',
'error_msg_streamwriteerror',
'error_msg_syntaxerror',
'error_msg_tagnotfound',
'error_msg_unknownerror',
'error_msg_varnotfound',
'error_msg_volumedoesnotexist',
'error_msg_webactionnotsupported',
'error_msg_webadderror',
'error_msg_webdeleteerror',
'error_msg_webmodulenotfound',
'error_msg_webnosuchobject',
'error_msg_webrepeatingrelatedfield',
'error_msg_webrequiredfieldmissing',
'error_msg_webtimeout',
'error_msg_webupdateerror',
'error_noerror',
'error_nopermission',
'error_norecordsfound',
'error_outofmemory',
'error_pop',
'error_push',
'error_reqcolumnmissing',
'error_reqfieldmissing',
'error_requiredcolumnmissing',
'error_requiredfieldmissing',
'error_reset',
'error_seterrorcode',
'error_seterrormessage',
'error_updateerror',
'euro',
'event_schedule',
'ew',
'fail',
'fail_if',
'false',
'field',
'field_name',
'field_names',
'file',
'file_autoresolvefullpaths',
'file_chmod',
'file_control',
'file_copy',
'file_create',
'file_creationdate',
'file_currenterror',
'file_delete',
'file_exists',
'file_getlinecount',
'file_getsize',
'file_isdirectory',
'file_listdirectory',
'file_moddate',
'file_modechar',
'file_modeline',
'file_move',
'file_openread',
'file_openreadwrite',
'file_openwrite',
'file_openwriteappend',
'file_openwritetruncate',
'file_probeeol',
'file_processuploads',
'file_read',
'file_readline',
'file_rename',
'file_serve',
'file_setsize',
'file_stream',
'file_streamcopy',
'file_uploads',
'file_waitread',
'file_waittimeout',
'file_waitwrite',
'file_write',
'find_soap_ops',
'form_param',
'found_count',
'ft',
'ftp_getfile',
'ftp_getlisting',
'ftp_putfile',
'full',
'global',
'global_defined',
'global_remove',
'global_reset',
'globals',
'gt',
'gte',
'handle',
'handle_error',
'header',
'html_comment',
'http_getfile',
'ical_alarm',
'ical_attribute',
'ical_calendar',
'ical_daylight',
'ical_event',
'ical_freebusy',
'ical_item',
'ical_journal',
'ical_parse',
'ical_standard',
'ical_timezone',
'ical_todo',
'if',
'if_empty',
'if_false',
'if_null',
'if_true',
'image',
'image_url',
'img',
'include',
'include_cgi',
'include_currentpath',
'include_once',
'include_raw',
'include_url',
'inline',
'integer',
'iterate',
'iterator',
'java',
'java_bean',
'json_records',
'json_rpccall',
'keycolumn_name',
'keycolumn_value',
'keyfield_name',
'keyfield_value',
'lasso_comment',
'lasso_currentaction',
'lasso_datasourceis',
'lasso_datasourceis4d',
'lasso_datasourceisfilemaker',
'lasso_datasourceisfilemaker7',
'lasso_datasourceisfilemaker9',
'lasso_datasourceisfilemakersa',
'lasso_datasourceisjdbc',
'lasso_datasourceislassomysql',
'lasso_datasourceismysql',
'lasso_datasourceisodbc',
'lasso_datasourceisopenbase',
'lasso_datasourceisoracle',
'lasso_datasourceispostgresql',
'lasso_datasourceisspotlight',
'lasso_datasourceissqlite',
'lasso_datasourceissqlserver',
'lasso_datasourcemodulename',
'lasso_datatype',
'lasso_disableondemand',
'lasso_errorreporting',
'lasso_executiontimelimit',
'lasso_parser',
'lasso_process',
'lasso_sessionid',
'lasso_siteid',
'lasso_siteisrunning',
'lasso_sitename',
'lasso_siterestart',
'lasso_sitestart',
'lasso_sitestop',
'lasso_tagexists',
'lasso_tagmodulename',
'lasso_uniqueid',
'lasso_updatecheck',
'lasso_uptime',
'lasso_version',
'lassoapp_create',
'lassoapp_dump',
'lassoapp_flattendir',
'lassoapp_getappdata',
'lassoapp_link',
'lassoapp_list',
'lassoapp_process',
'lassoapp_unitize',
'layout_name',
'ldap',
'ldap_scope_base',
'ldap_scope_onelevel',
'ldap_scope_subtree',
'ldml',
'ldml_ldml',
'library',
'library_once',
'link',
'link_currentaction',
'link_currentactionparams',
'link_currentactionurl',
'link_currentgroup',
'link_currentgroupparams',
'link_currentgroupurl',
'link_currentrecord',
'link_currentrecordparams',
'link_currentrecordurl',
'link_currentsearch',
'link_currentsearchparams',
'link_currentsearchurl',
'link_detail',
'link_detailparams',
'link_detailurl',
'link_firstgroup',
'link_firstgroupparams',
'link_firstgroupurl',
'link_firstrecord',
'link_firstrecordparams',
'link_firstrecordurl',
'link_lastgroup',
'link_lastgroupparams',
'link_lastgroupurl',
'link_lastrecord',
'link_lastrecordparams',
'link_lastrecordurl',
'link_nextgroup',
'link_nextgroupparams',
'link_nextgroupurl',
'link_nextrecord',
'link_nextrecordparams',
'link_nextrecordurl',
'link_params',
'link_prevgroup',
'link_prevgroupparams',
'link_prevgroupurl',
'link_prevrecord',
'link_prevrecordparams',
'link_prevrecordurl',
'link_setformat',
'link_url',
'list',
'list_additem',
'list_fromlist',
'list_fromstring',
'list_getitem',
'list_itemcount',
'list_iterator',
'list_removeitem',
'list_replaceitem',
'list_reverseiterator',
'list_tostring',
'literal',
'ljax_end',
'ljax_hastarget',
'ljax_include',
'ljax_start',
'ljax_target',
'local',
'local_defined',
'local_remove',
'local_reset',
'locale_format',
'locals',
'log',
'log_always',
'log_critical',
'log_deprecated',
'log_destination_console',
'log_destination_database',
'log_destination_file',
'log_detail',
'log_level_critical',
'log_level_deprecated',
'log_level_detail',
'log_level_sql',
'log_level_warning',
'log_setdestination',
'log_sql',
'log_warning',
'logicalop_value',
'logicaloperator_value',
'loop',
'loop_abort',
'loop_continue',
'loop_count',
'lt',
'lte',
'magick_image',
'map',
'map_iterator',
'match_comparator',
'match_notrange',
'match_notregexp',
'match_range',
'match_regexp',
'math_abs',
'math_acos',
'math_add',
'math_asin',
'math_atan',
'math_atan2',
'math_ceil',
'math_converteuro',
'math_cos',
'math_div',
'math_exp',
'math_floor',
'math_internal_rand',
'math_internal_randmax',
'math_internal_srand',
'math_ln',
'math_log',
'math_log10',
'math_max',
'math_min',
'math_mod',
'math_mult',
'math_pow',
'math_random',
'math_range',
'math_rint',
'math_roman',
'math_round',
'math_sin',
'math_sqrt',
'math_sub',
'math_tan',
'maxrecords_value',
'memory_session_driver',
'mime_type',
'minimal',
'misc__srand',
'misc_randomnumber',
'misc_roman',
'misc_valid_creditcard',
'mysql_session_driver',
'named_param',
'namespace_current',
'namespace_delimiter',
'namespace_exists',
'namespace_file_fullpathexists',
'namespace_global',
'namespace_import',
'namespace_load',
'namespace_page',
'namespace_unload',
'namespace_using',
'neq',
'net',
'net_connectinprogress',
'net_connectok',
'net_typessl',
'net_typessltcp',
'net_typessludp',
'net_typetcp',
'net_typeudp',
'net_waitread',
'net_waittimeout',
'net_waitwrite',
'no_default_output',
'none',
'noprocess',
'not',
'nrx',
'nslookup',
'null',
'object',
'once',
'oneoff',
'op_logicalvalue',
'operator_logicalvalue',
'option',
'or',
'os_process',
'output',
'output_none',
'pair',
'params_up',
'pdf_barcode',
'pdf_color',
'pdf_doc',
'pdf_font',
'pdf_image',
'pdf_list',
'pdf_read',
'pdf_serve',
'pdf_table',
'pdf_text',
'percent',
'portal',
'postcondition',
'precondition',
'prettyprintingnsmap',
'prettyprintingtypemap',
'priorityqueue',
'private',
'proc_convert',
'proc_convertbody',
'proc_convertone',
'proc_extract',
'proc_extractone',
'proc_find',
'proc_first',
'proc_foreach',
'proc_get',
'proc_join',
'proc_lasso',
'proc_last',
'proc_map_entry',
'proc_null',
'proc_regexp',
'proc_xml',
'proc_xslt',
'process',
'protect',
'queue',
'rand',
'randomnumber',
'raw',
'recid_value',
'record_count',
'recordcount',
'recordid_value',
'records',
'records_array',
'records_map',
'redirect_url',
'reference',
'referer',
'referer_url',
'referrer',
'referrer_url',
'regexp',
'repeating',
'repeating_valueitem',
'repeatingvalueitem',
'repetition',
'req_column',
'req_field',
'required_column',
'required_field',
'response_fileexists',
'response_filepath',
'response_localpath',
'response_path',
'response_realm',
'resultset',
'resultset_count',
'return',
'return_value',
'reverseiterator',
'roman',
'row_count',
'rows',
'rows_array',
'run_children',
'rx',
'schema_name',
'scientific',
'search_args',
'search_arguments',
'search_columnitem',
'search_fielditem',
'search_operatoritem',
'search_opitem',
'search_valueitem',
'searchfielditem',
'searchoperatoritem',
'searchopitem',
'searchvalueitem',
'select',
'selected',
'self',
'serialize',
'series',
'server_date',
'server_day',
'server_ip',
'server_name',
'server_port',
'server_push',
'server_siteisrunning',
'server_sitestart',
'server_sitestop',
'server_time',
'session_abort',
'session_addoutputfilter',
'session_addvar',
'session_addvariable',
'session_deleteexpired',
'session_driver',
'session_end',
'session_id',
'session_removevar',
'session_removevariable',
'session_result',
'session_setdriver',
'session_start',
'set',
'set_iterator',
'set_reverseiterator',
'shown_count',
'shown_first',
'shown_last',
'site_atbegin',
'site_id',
'site_name',
'site_restart',
'skiprecords_value',
'sleep',
'soap_convertpartstopairs',
'soap_definetag',
'soap_info',
'soap_lastrequest',
'soap_lastresponse',
'soap_stub',
'sort_args',
'sort_arguments',
'sort_columnitem',
'sort_fielditem',
'sort_orderitem',
'sortcolumnitem',
'sortfielditem',
'sortorderitem',
'sqlite_createdb',
'sqlite_session_driver',
'sqlite_setsleepmillis',
'sqlite_setsleeptries',
'srand',
'stack',
'stock_quote',
'string',
'string_charfromname',
'string_concatenate',
'string_countfields',
'string_endswith',
'string_extract',
'string_findposition',
'string_findregexp',
'string_fordigit',
'string_getfield',
'string_getunicodeversion',
'string_insert',
'string_isalpha',
'string_isalphanumeric',
'string_isdigit',
'string_ishexdigit',
'string_islower',
'string_isnumeric',
'string_ispunctuation',
'string_isspace',
'string_isupper',
'string_length',
'string_lowercase',
'string_remove',
'string_removeleading',
'string_removetrailing',
'string_replace',
'string_replaceregexp',
'string_todecimal',
'string_tointeger',
'string_uppercase',
'string_validcharset',
'table_name',
'table_realname',
'tag',
'tag_name',
'tags',
'tags_find',
'tags_list',
'tcp_close',
'tcp_open',
'tcp_send',
'tcp_tcp_close',
'tcp_tcp_open',
'tcp_tcp_send',
'thread_abort',
'thread_atomic',
'thread_event',
'thread_exists',
'thread_getcurrentid',
'thread_getpriority',
'thread_info',
'thread_list',
'thread_lock',
'thread_pipe',
'thread_priority_default',
'thread_priority_high',
'thread_priority_low',
'thread_rwlock',
'thread_semaphore',
'thread_setpriority',
'token_value',
'total_records',
'treemap',
'treemap_iterator',
'true',
'url_rewrite',
'valid_creditcard',
'valid_date',
'valid_email',
'valid_url',
'value_list',
'value_listitem',
'valuelistitem',
'var',
'var_defined',
'var_remove',
'var_reset',
'var_set',
'variable',
'variable_defined',
'variable_set',
'variables',
'variant_count',
'vars',
'wap_isenabled',
'wap_maxbuttons',
'wap_maxcolumns',
'wap_maxhorzpixels',
'wap_maxrows',
'wap_maxvertpixels',
'while',
'wsdl_extract',
'wsdl_getbinding',
'wsdl_getbindingforoperation',
'wsdl_getbindingoperations',
'wsdl_getmessagenamed',
'wsdl_getmessageparts',
'wsdl_getmessagetriofromporttype',
'wsdl_getopbodystyle',
'wsdl_getopbodyuse',
'wsdl_getoperation',
'wsdl_getoplocation',
'wsdl_getopmessagetypes',
'wsdl_getopsoapaction',
'wsdl_getportaddress',
'wsdl_getportsforservice',
'wsdl_getporttype',
'wsdl_getporttypeoperation',
'wsdl_getservicedocumentation',
'wsdl_getservices',
'wsdl_gettargetnamespace',
'wsdl_issoapoperation',
'wsdl_listoperations',
'wsdl_maketest',
'xml',
'xml_extract',
'xml_rpc',
'xml_rpccall',
'xml_rw',
'xml_serve',
'xml_transform',
'xml_xml',
'xml_xmlstream',
'xmlstream',
'xsd_attribute',
'xsd_blankarraybase',
'xsd_blankbase',
'xsd_buildtype',
'xsd_cache',
'xsd_checkcardinality',
'xsd_continueall',
'xsd_continueannotation',
'xsd_continueany',
'xsd_continueanyattribute',
'xsd_continueattribute',
'xsd_continueattributegroup',
'xsd_continuechoice',
'xsd_continuecomplexcontent',
'xsd_continuecomplextype',
'xsd_continuedocumentation',
'xsd_continueextension',
'xsd_continuegroup',
'xsd_continuekey',
'xsd_continuelist',
'xsd_continuerestriction',
'xsd_continuesequence',
'xsd_continuesimplecontent',
'xsd_continuesimpletype',
'xsd_continueunion',
'xsd_deserialize',
'xsd_fullyqualifyname',
'xsd_generate',
'xsd_generateblankfromtype',
'xsd_generateblanksimpletype',
'xsd_generatetype',
'xsd_getschematype',
'xsd_issimpletype',
'xsd_loadschema',
'xsd_lookupnamespaceuri',
'xsd_lookuptype',
'xsd_processany',
'xsd_processattribute',
'xsd_processattributegroup',
'xsd_processcomplextype',
'xsd_processelement',
'xsd_processgroup',
'xsd_processimport',
'xsd_processinclude',
'xsd_processschema',
'xsd_processsimpletype',
'xsd_ref',
'xsd_type',
)
}
MEMBERS = {
'Member Methods': (
'abort',
'abs',
'accept_charset',
'accept',
'acceptconnections',
'acceptdeserializedelement',
'acceptnossl',
'acceptpost',
'accesskey',
'acos',
'acosh',
'action',
'actionparams',
'active_tick',
'add',
'addatend',
'addattachment',
'addbarcode',
'addchapter',
'addcheckbox',
'addcolumninfo',
'addcombobox',
'addcomment',
'addcomponent',
'addcomponents',
'addcss',
'adddatabasetable',
'adddatasource',
'adddatasourcedatabase',
'adddatasourcehost',
'adddir',
'adddirpath',
'addendjs',
'addendjstext',
'adderror',
'addfavicon',
'addfile',
'addgroup',
'addheader',
'addhiddenfield',
'addhtmlpart',
'addimage',
'addjavascript',
'addjs',
'addjstext',
'addlist',
'addmathfunctions',
'addmember',
'addoneheaderline',
'addpage',
'addparagraph',
'addpart',
'addpasswordfield',
'addphrase',
'addpostdispatch',
'addpredispatch',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addrow',
'addsection',
'addselectlist',
'addset',
'addsubmitbutton',
'addsubnode',
'addtable',
'addtask',
'addtext',
'addtextarea',
'addtextfield',
'addtextpart',
'addtobuffer',
'addtrait',
'adduser',
'addusertogroup',
'addwarning',
'addzip',
'allocobject',
'am',
'ampm',
'annotate',
'answer',
'apop',
'append',
'appendarray',
'appendarraybegin',
'appendarrayend',
'appendbool',
'appendbytes',
'appendchar',
'appendchild',
'appendcolon',
'appendcomma',
'appenddata',
'appenddatetime',
'appenddbpointer',
'appenddecimal',
'appenddocument',
'appendimagetolist',
'appendinteger',
'appendnowutc',
'appendnull',
'appendoid',
'appendregex',
'appendreplacement',
'appendstring',
'appendtail',
'appendtime',
'applyheatcolors',
'appmessage',
'appname',
'appprefix',
'appstatus',
'arc',
'archive',
'arguments',
'argumentvalue',
'asarray',
'asarraystring',
'asasync',
'asbytes',
'ascopy',
'ascopydeep',
'asdecimal',
'asgenerator',
'asin',
'asinh',
'asinteger',
'askeyedgenerator',
'aslazystring',
'aslist',
'asraw',
'asstaticarray',
'asstring',
'asstringhex',
'asstringoct',
'asxml',
'atan',
'atan2',
'atanh',
'atend',
'atends',
'atime',
'attributecount',
'attributes',
'attrs',
'auth',
'authenticate',
'authorize',
'autocollectbuffer',
'average',
'back',
'basename',
'basepaths',
'baseuri',
'bcc',
'beginssl',
'beginswith',
'begintls',
'bestcharset',
'bind_blob',
'bind_double',
'bind_int',
'bind_null',
'bind_parameter_index',
'bind_text',
'bind',
'bindcount',
'bindone',
'bindparam',
'bitand',
'bitclear',
'bitflip',
'bitformat',
'bitnot',
'bitor',
'bitset',
'bitshiftleft',
'bitshiftright',
'bittest',
'bitxor',
'blur',
'body',
'bodybytes',
'boundary',
'bptoxml',
'bptypetostr',
'bucketnumber',
'buff',
'buildquery',
'businessdaysbetween',
'by',
'bytes',
'cachedappprefix',
'cachedroot',
'callboolean',
'callbooleanmethod',
'callbytemethod',
'callcharmethod',
'calldoublemethod',
'calledname',
'callfirst',
'callfloat',
'callfloatmethod',
'callint',
'callintmethod',
'calllongmethod',
'callnonvirtualbooleanmethod',
'callnonvirtualbytemethod',
'callnonvirtualcharmethod',
'callnonvirtualdoublemethod',
'callnonvirtualfloatmethod',
'callnonvirtualintmethod',
'callnonvirtuallongmethod',
'callnonvirtualobjectmethod',
'callnonvirtualshortmethod',
'callnonvirtualvoidmethod',
'callobject',
'callobjectmethod',
'callshortmethod',
'callsite_col',
'callsite_file',
'callsite_line',
'callstack',
'callstaticboolean',
'callstaticbooleanmethod',
'callstaticbytemethod',
'callstaticcharmethod',
'callstaticdoublemethod',
'callstaticfloatmethod',
'callstaticint',
'callstaticintmethod',
'callstaticlongmethod',
'callstaticobject',
'callstaticobjectmethod',
'callstaticshortmethod',
'callstaticstring',
'callstaticvoidmethod',
'callstring',
'callvoid',
'callvoidmethod',
'cancel',
'cap',
'capa',
'capabilities',
'capi',
'cbrt',
'cc',
'ceil',
'chardigitvalue',
'charname',
'charset',
'chartype',
'checkdebugging',
'checked',
'checkuser',
'childnodes',
'chk',
'chmod',
'choosecolumntype',
'chown',
'chunked',
'circle',
'class',
'classid',
'clear',
'clonenode',
'close',
'closepath',
'closeprepared',
'closewrite',
'code',
'codebase',
'codetype',
'colmap',
'colorspace',
'column_blob',
'column_count',
'column_decltype',
'column_double',
'column_int64',
'column_name',
'column_text',
'column_type',
'command',
'comments',
'compare',
'comparecodepointorder',
'componentdelimiter',
'components',
'composite',
'compress',
'concat',
'condtoint',
'configureds',
'configuredskeys',
'connect',
'connection',
'connectionhandler',
'connhandler',
'consume_domain',
'consume_label',
'consume_message',
'consume_rdata',
'consume_string',
'contains',
'content_disposition',
'content_transfer_encoding',
'content_type',
'content',
'contentlength',
'contents',
'contenttype',
'continuation',
'continuationpacket',
'continuationpoint',
'continuationstack',
'continue',
'contrast',
'conventionaltop',
'convert',
'cookie',
'cookies',
'cookiesarray',
'cookiesary',
'copyto',
'cos',
'cosh',
'count',
'countkeys',
'country',
'countusersbygroup',
'crc',
'create',
'createattribute',
'createattributens',
'createcdatasection',
'createcomment',
'createdocument',
'createdocumentfragment',
'createdocumenttype',
'createelement',
'createelementns',
'createentityreference',
'createindex',
'createprocessinginstruction',
'createtable',
'createtextnode',
'criteria',
'crop',
'csscontent',
'curl',
'current',
'currentfile',
'curveto',
'd',
'data',
'databasecolumnnames',
'databasecolumns',
'databasemap',
'databasename',
'datasourcecolumnnames',
'datasourcecolumns',
'datasourcemap',
'date',
'day',
'dayofmonth',
'dayofweek',
'dayofweekinmonth',
'dayofyear',
'days',
'daysbetween',
'db',
'dbtablestable',
'debug',
'declare',
'decodebase64',
'decodehex',
'decodehtml',
'decodeqp',
'decodeurl',
'decodexml',
'decompose',
'decomposeassignment',
'defaultcontentrepresentation',
'defer',
'deg2rad',
'dele',
'delete',
'deletedata',
'deleteglobalref',
'deletelocalref',
'delim',
'depth',
'dereferencepointer',
'describe',
'description',
'deserialize',
'detach',
'detectcharset',
'didinclude',
'difference',
'digit',
'dir',
'displaycountry',
'displaylanguage',
'displayname',
'displayscript',
'displayvariant',
'div',
'dns_response',
'do',
'doatbegins',
'doatends',
'doccomment',
'doclose',
'doctype',
'document',
'documentelement',
'documentroot',
'domainbody',
'done',
'dosessions',
'dowithclose',
'dowlocal',
'download',
'drawtext',
'drop',
'dropindex',
'dsdbtable',
'dshoststable',
'dsinfo',
'dst',
'dstable',
'dstoffset',
'dtdid',
'dup',
'dup2',
'each',
'eachbyte',
'eachcharacter',
'eachchild',
'eachcomponent',
'eachdir',
'eachdirpath',
'eachdirpathrecursive',
'eachentry',
'eachfile',
'eachfilename',
'eachfilepath',
'eachfilepathrecursive',
'eachkey',
'eachline',
'eachlinebreak',
'eachmatch',
'eachnode',
'eachpair',
'eachpath',
'eachpathrecursive',
'eachrow',
'eachsub',
'eachword',
'eachwordbreak',
'element',
'eligiblepath',
'eligiblepaths',
'encodebase64',
'encodehex',
'encodehtml',
'encodehtmltoxml',
'encodemd5',
'encodepassword',
'encodeqp',
'encodesql',
'encodesql92',
'encodeurl',
'encodevalue',
'encodexml',
'encoding',
'enctype',
'end',
'endjs',
'endssl',
'endswith',
'endtls',
'enhance',
'ensurestopped',
'entities',
'entry',
'env',
'equals',
'era',
'erf',
'erfc',
'err',
'errcode',
'errmsg',
'error',
'errors',
'errstack',
'escape_member',
'establisherrorstate',
'exceptioncheck',
'exceptionclear',
'exceptiondescribe',
'exceptionoccurred',
'exchange',
'execinits',
'execinstalls',
'execute',
'executelazy',
'executenow',
'exists',
'exit',
'exitcode',
'exp',
'expire',
'expireminutes',
'expiresminutes',
'expm1',
'export16bits',
'export32bits',
'export64bits',
'export8bits',
'exportas',
'exportbytes',
'exportfdf',
'exportpointerbits',
'exportsigned16bits',
'exportsigned32bits',
'exportsigned64bits',
'exportsigned8bits',
'exportstring',
'expose',
'extendedyear',
'extensiondelimiter',
'extensions',
'extract',
'extractfast',
'extractfastone',
'extractimage',
'extractone',
'f',
'fabs',
'fail',
'failnoconnectionhandler',
'family',
'fatalerror',
'fcgireq',
'fchdir',
'fchmod',
'fchown',
'fd',
'features',
'fetchdata',
'fieldnames',
'fieldposition',
'fieldstable',
'fieldtype',
'fieldvalue',
'file',
'filename',
'filenames',
'filequeue',
'fileuploads',
'fileuploadsary',
'filterinputcolumn',
'finalize',
'find',
'findall',
'findandmodify',
'findbucket',
'findcase',
'findclass',
'findcount',
'finddescendant',
'findfirst',
'findinclude',
'findinctx',
'findindex',
'findlast',
'findpattern',
'findposition',
'findsymbols',
'first',
'firstchild',
'firstcomponent',
'firstdayofweek',
'firstnode',
'fixformat',
'flags',
'fliph',
'flipv',
'floor',
'flush',
'foldcase',
'foo',
'for',
'forcedrowid',
'foreach',
'foreachaccept',
'foreachbyte',
'foreachcharacter',
'foreachchild',
'foreachday',
'foreachentry',
'foreachfile',
'foreachfilename',
'foreachkey',
'foreachline',
'foreachlinebreak',
'foreachmatch',
'foreachnode',
'foreachpair',
'foreachpathcomponent',
'foreachrow',
'foreachspool',
'foreachsub',
'foreachwordbreak',
'form',
'format',
'formatas',
'formatcontextelement',
'formatcontextelements',
'formatnumber',
'free',
'frexp',
'from',
'fromname',
'fromport',
'fromreflectedfield',
'fromreflectedmethod',
'front',
'fsync',
'ftpdeletefile',
'ftpgetlisting',
'ftruncate',
'fullpath',
'fx',
'gamma',
'gatewayinterface',
'gen',
'generatechecksum',
'get',
'getabswidth',
'getalignment',
'getappsource',
'getarraylength',
'getattr',
'getattribute',
'getattributenamespace',
'getattributenode',
'getattributenodens',
'getattributens',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getbold',
'getbooleanarrayelements',
'getbooleanarrayregion',
'getbooleanfield',
'getbordercolor',
'getborderwidth',
'getbytearrayelements',
'getbytearrayregion',
'getbytefield',
'getchararrayelements',
'getchararrayregion',
'getcharfield',
'getclass',
'getcode',
'getcolor',
'getcolumn',
'getcolumncount',
'getcolumns',
'getdatabasebyalias',
'getdatabasebyid',
'getdatabasebyname',
'getdatabasehost',
'getdatabasetable',
'getdatabasetablebyalias',
'getdatabasetablebyid',
'getdatabasetablepart',
'getdatasource',
'getdatasourcedatabase',
'getdatasourcedatabasebyid',
'getdatasourcehost',
'getdatasourceid',
'getdatasourcename',
'getdefaultstorage',
'getdoublearrayelements',
'getdoublearrayregion',
'getdoublefield',
'getelementbyid',
'getelementsbytagname',
'getelementsbytagnamens',
'getencoding',
'getface',
'getfield',
'getfieldid',
'getfile',
'getfloatarrayelements',
'getfloatarrayregion',
'getfloatfield',
'getfont',
'getformat',
'getfullfontname',
'getgroup',
'getgroupid',
'getheader',
'getheaders',
'gethostdatabase',
'gethtmlattr',
'gethtmlattrstring',
'getinclude',
'getintarrayelements',
'getintarrayregion',
'getintfield',
'getisocomment',
'getitalic',
'getlasterror',
'getlcapitype',
'getlibrary',
'getlongarrayelements',
'getlongarrayregion',
'getlongfield',
'getmargins',
'getmethodid',
'getmode',
'getnameditem',
'getnameditemns',
'getnode',
'getnumericvalue',
'getobjectarrayelement',
'getobjectclass',
'getobjectfield',
'getpadding',
'getpagenumber',
'getparts',
'getprefs',
'getpropertyvalue',
'getprowcount',
'getpsfontname',
'getrange',
'getrowcount',
'getset',
'getshortarrayelements',
'getshortarrayregion',
'getshortfield',
'getsize',
'getsortfieldspart',
'getspacing',
'getstaticbooleanfield',
'getstaticbytefield',
'getstaticcharfield',
'getstaticdoublefield',
'getstaticfieldid',
'getstaticfloatfield',
'getstaticintfield',
'getstaticlongfield',
'getstaticmethodid',
'getstaticobjectfield',
'getstaticshortfield',
'getstatus',
'getstringchars',
'getstringlength',
'getstyle',
'getsupportedencodings',
'gettablebyid',
'gettext',
'gettextalignment',
'gettextsize',
'gettrigger',
'gettype',
'getunderline',
'getuniquealiasname',
'getuser',
'getuserbykey',
'getuserid',
'getversion',
'getzipfilebytes',
'givenblock',
'gmt',
'gotconnection',
'gotfileupload',
'groupby',
'groupcolumns',
'groupcount',
'groupjoin',
'handlebreakpointget',
'handlebreakpointlist',
'handlebreakpointremove',
'handlebreakpointset',
'handlebreakpointupdate',
'handlecontextget',
'handlecontextnames',
'handlecontinuation',
'handledefinitionbody',
'handledefinitionhead',
'handledefinitionresource',
'handledevconnection',
'handleevalexpired',
'handlefeatureget',
'handlefeatureset',
'handlelassoappcontent',
'handlelassoappresponse',
'handlenested',
'handlenormalconnection',
'handlepop',
'handleresource',
'handlesource',
'handlestackget',
'handlestderr',
'handlestdin',
'handlestdout',
'handshake',
'hasattribute',
'hasattributens',
'hasattributes',
'hasbinaryproperty',
'haschildnodes',
'hasexpired',
'hasfeature',
'hasfield',
'hash',
'hashtmlattr',
'hasmethod',
'hastable',
'hastrailingcomponent',
'hasvalue',
'head',
'header',
'headerbytes',
'headers',
'headersarray',
'headersmap',
'height',
'histogram',
'home',
'host',
'hostcolumnnames',
'hostcolumnnames2',
'hostcolumns',
'hostcolumns2',
'hostdatasource',
'hostextra',
'hostid',
'hostisdynamic',
'hostmap',
'hostmap2',
'hostname',
'hostpassword',
'hostport',
'hostschema',
'hosttableencoding',
'hosttonet16',
'hosttonet32',
'hosttonet64',
'hostusername',
'hour',
'hourofampm',
'hourofday',
'hoursbetween',
'href',
'hreflang',
'htmlcontent',
'htmlizestacktrace',
'htmlizestacktracelink',
'httpaccept',
'httpacceptencoding',
'httpacceptlanguage',
'httpauthorization',
'httpcachecontrol',
'httpconnection',
'httpcookie',
'httpequiv',
'httphost',
'httpreferer',
'httpreferrer',
'httpuseragent',
'hypot',
'id',
'idealinmemory',
'idle',
'idmap',
'ifempty',
'ifkey',
'ifnotempty',
'ifnotkey',
'ignorecase',
'ilogb',
'imgptr',
'implementation',
'import16bits',
'import32bits',
'import64bits',
'import8bits',
'importas',
'importbytes',
'importfdf',
'importnode',
'importpointer',
'importstring',
'in',
'include',
'includebytes',
'includelibrary',
'includelibraryonce',
'includeonce',
'includes',
'includestack',
'indaylighttime',
'index',
'init',
'initialize',
'initrequest',
'inits',
'inneroncompare',
'input',
'inputcolumns',
'inputtype',
'insert',
'insertback',
'insertbefore',
'insertdata',
'insertfirst',
'insertfrom',
'insertfront',
'insertinternal',
'insertlast',
'insertpage',
'install',
'installs',
'integer',
'internalsubset',
'interrupt',
'intersection',
'inttocond',
'invoke',
'invokeautocollect',
'invokeuntil',
'invokewhile',
'ioctl',
'isa',
'isalive',
'isallof',
'isalnum',
'isalpha',
'isanyof',
'isbase',
'isblank',
'iscntrl',
'isdigit',
'isdir',
'isempty',
'isemptyelement',
'isfirststep',
'isfullpath',
'isgraph',
'ishttps',
'isidle',
'isinstanceof',
'islink',
'islower',
'ismultipart',
'isnan',
'isnota',
'isnotempty',
'isnothing',
'iso3country',
'iso3language',
'isopen',
'isprint',
'ispunct',
'issameobject',
'isset',
'issourcefile',
'isspace',
'isssl',
'issupported',
'istitle',
'istruetype',
'istype',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'isvalid',
'iswhitespace',
'isxdigit',
'isxhr',
'item',
'j0',
'j1',
'javascript',
'jbarcode',
'jcolor',
'jfont',
'jimage',
'jlist',
'jn',
'jobjectisa',
'join',
'jread',
'jscontent',
'jsonfornode',
'jsonhtml',
'jsonisleaf',
'jsonlabel',
'jtable',
'jtext',
'julianday',
'kernel',
'key',
'keycolumns',
'keys',
'keywords',
'kill',
'label',
'lang',
'language',
'last_insert_rowid',
'last',
'lastaccessdate',
'lastaccesstime',
'lastchild',
'lastcomponent',
'lasterror',
'lastinsertid',
'lastnode',
'lastpoint',
'lasttouched',
'lazyvalue',
'ldexp',
'leaveopen',
'left',
'length',
'lgamma',
'line',
'linediffers',
'linkto',
'linktype',
'list',
'listactivedatasources',
'listalldatabases',
'listalltables',
'listdatabasetables',
'listdatasourcedatabases',
'listdatasourcehosts',
'listdatasources',
'listen',
'listgroups',
'listgroupsbyuser',
'listhostdatabases',
'listhosts',
'listmethods',
'listnode',
'listusers',
'listusersbygroup',
'loadcerts',
'loaddatasourcehostinfo',
'loaddatasourceinfo',
'loadlibrary',
'localaddress',
'localname',
'locals',
'lock',
'log',
'log10',
'log1p',
'logb',
'lookupnamespace',
'lop',
'lowagiefont',
'lowercase',
'makecolor',
'makecolumnlist',
'makecolumnmap',
'makecookieyumyum',
'makefullpath',
'makeinheritedcopy',
'makenonrelative',
'makeurl',
'map',
'marker',
'matches',
'matchesstart',
'matchposition',
'matchstring',
'matchtriggers',
'max',
'maxinmemory',
'maxlength',
'maxrows',
'maxworkers',
'maybeslash',
'maybevalue',
'md5hex',
'media',
'members',
'merge',
'meta',
'method',
'methodname',
'millisecond',
'millisecondsinday',
'mime_boundary',
'mime_contenttype',
'mime_hdrs',
'mime',
'mimes',
'min',
'minute',
'minutesbetween',
'moddatestr',
'mode',
'modf',
'modificationdate',
'modificationtime',
'modulate',
'monitorenter',
'monitorexit',
'month',
'moveto',
'movetoattribute',
'movetoattributenamespace',
'movetoelement',
'movetofirstattribute',
'movetonextattribute',
'msg',
'mtime',
'multiple',
'n',
'name',
'named',
'namespaceuri',
'needinitialization',
'net',
'nettohost16',
'nettohost32',
'nettohost64',
'new',
'newbooleanarray',
'newbytearray',
'newchararray',
'newdoublearray',
'newfloatarray',
'newglobalref',
'newintarray',
'newlongarray',
'newobject',
'newobjectarray',
'newshortarray',
'newstring',
'next',
'nextafter',
'nextnode',
'nextprime',
'nextprune',
'nextprunedelta',
'nextsibling',
'nodeforpath',
'nodelist',
'nodename',
'nodetype',
'nodevalue',
'noop',
'normalize',
'notationname',
'notations',
'novaluelists',
'numsets',
'object',
'objects',
'objecttype',
'onclick',
'oncompare',
'oncomparestrict',
'onconvert',
'oncreate',
'ondblclick',
'onkeydown',
'onkeypress',
'onkeyup',
'onmousedown',
'onmousemove',
'onmouseout',
'onmouseover',
'onmouseup',
'onreset',
'onsubmit',
'ontop',
'open',
'openappend',
'openread',
'opentruncate',
'openwith',
'openwrite',
'openwriteonly',
'orderby',
'orderbydescending',
'out',
'output',
'outputencoding',
'ownerdocument',
'ownerelement',
'padleading',
'padtrailing',
'padzero',
'pagecount',
'pagerotation',
'pagesize',
'param',
'paramdescs',
'params',
'parent',
'parentdir',
'parentnode',
'parse_body',
'parse_boundary',
'parse_charset',
'parse_content_disposition',
'parse_content_transfer_encoding',
'parse_content_type',
'parse_hdrs',
'parse_mode',
'parse_msg',
'parse_parts',
'parse_rawhdrs',
'parse',
'parseas',
'parsedocument',
'parsenumber',
'parseoneheaderline',
'pass',
'path',
'pathinfo',
'pathtouri',
'pathtranslated',
'pause',
'payload',
'pdifference',
'perform',
'performonce',
'perms',
'pid',
'pixel',
'pm',
'polldbg',
'pollide',
'pop_capa',
'pop_cmd',
'pop_debug',
'pop_err',
'pop_get',
'pop_ids',
'pop_index',
'pop_log',
'pop_mode',
'pop_net',
'pop_res',
'pop_server',
'pop_timeout',
'pop_token',
'pop',
'popctx',
'popinclude',
'populate',
'port',
'position',
'postdispatch',
'postparam',
'postparams',
'postparamsary',
'poststring',
'pow',
'predispatch',
'prefix',
'preflight',
'prepare',
'prepared',
'pretty',
'prev',
'previoussibling',
'printsimplemsg',
'private_compare',
'private_find',
'private_findlast',
'private_merge',
'private_rebalanceforinsert',
'private_rebalanceforremove',
'private_replaceall',
'private_replacefirst',
'private_rotateleft',
'private_rotateright',
'private_setrange',
'private_split',
'probemimetype',
'provides',
'proxying',
'prune',
'publicid',
'pullhttpheader',
'pullmimepost',
'pulloneheaderline',
'pullpost',
'pullrawpost',
'pullrawpostchunks',
'pullrequest',
'pullrequestline',
'push',
'pushctx',
'pushinclude',
'qdarray',
'qdcount',
'queryparam',
'queryparams',
'queryparamsary',
'querystring',
'queue_maintenance',
'queue_messages',
'queue_status',
'queue',
'quit',
'r',
'raw',
'rawcontent',
'rawdiff',
'rawheader',
'rawheaders',
'rawinvokable',
'read',
'readattributevalue',
'readbytes',
'readbytesfully',
'readdestinations',
'readerror',
'readidobjects',
'readline',
'readmessage',
'readnumber',
'readobject',
'readobjecttcp',
'readpacket',
'readsomebytes',
'readstring',
'ready',
'realdoc',
'realpath',
'receivefd',
'recipients',
'recover',
'rect',
'rectype',
'red',
'redirectto',
'referrals',
'refid',
'refobj',
'refresh',
'rel',
'remainder',
'remoteaddr',
'remoteaddress',
'remoteport',
'remove',
'removeall',
'removeattribute',
'removeattributenode',
'removeattributens',
'removeback',
'removechild',
'removedatabasetable',
'removedatasource',
'removedatasourcedatabase',
'removedatasourcehost',
'removefield',
'removefirst',
'removefront',
'removegroup',
'removelast',
'removeleading',
'removenameditem',
'removenameditemns',
'removenode',
'removesubnode',
'removetrailing',
'removeuser',
'removeuserfromallgroups',
'removeuserfromgroup',
'rename',
'renderbytes',
'renderdocumentbytes',
'renderstring',
'replace',
'replaceall',
'replacechild',
'replacedata',
'replacefirst',
'replaceheader',
'replacepattern',
'representnode',
'representnoderesult',
'reqid',
'requestid',
'requestmethod',
'requestparams',
'requesturi',
'requires',
'reserve',
'reset',
'resize',
'resolutionh',
'resolutionv',
'resolvelinks',
'resourcedata',
'resourceinvokable',
'resourcename',
'resources',
'respond',
'restart',
'restname',
'result',
'results',
'resume',
'retr',
'retrieve',
'returncolumns',
'returntype',
'rev',
'reverse',
'rewind',
'right',
'rint',
'roll',
'root',
'rootmap',
'rotate',
'route',
'rowsfound',
'rset',
'rule',
'rules',
'run',
'running',
'runonce',
's',
'sa',
'safeexport8bits',
'sameas',
'save',
'savedata',
'scalb',
'scale',
'scanfordatasource',
'scantasks',
'scanworkers',
'schemaname',
'scheme',
'script',
'scriptextensions',
'scriptfilename',
'scriptname',
'scripttype',
'scripturi',
'scripturl',
'scrubkeywords',
'search',
'searchinbucket',
'searchurl',
'second',
'secondsbetween',
'seek',
'select',
'selected',
'selectmany',
'self',
'send',
'sendchunk',
'sendfd',
'sendfile',
'sendpacket',
'sendresponse',
'separator',
'serializationelements',
'serialize',
'serveraddr',
'serveradmin',
'servername',
'serverport',
'serverprotocol',
'serversignature',
'serversoftware',
'sessionsdump',
'sessionsmap',
'set',
'setalignment',
'setattr',
'setattribute',
'setattributenode',
'setattributenodens',
'setattributens',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setbold',
'setbooleanarrayregion',
'setbooleanfield',
'setbordercolor',
'setborderwidth',
'setbytearrayregion',
'setbytefield',
'setchararrayregion',
'setcharfield',
'setcode',
'setcolor',
'setcolorspace',
'setcookie',
'setcwd',
'setdefaultstorage',
'setdestination',
'setdoublearrayregion',
'setdoublefield',
'setencoding',
'setface',
'setfieldvalue',
'setfindpattern',
'setfloatarrayregion',
'setfloatfield',
'setfont',
'setformat',
'setgeneratechecksum',
'setheaders',
'sethtmlattr',
'setignorecase',
'setinput',
'setintarrayregion',
'setintfield',
'setitalic',
'setlinewidth',
'setlongarrayregion',
'setlongfield',
'setmarker',
'setmaxfilesize',
'setmode',
'setname',
'setnameditem',
'setnameditemns',
'setobjectarrayelement',
'setobjectfield',
'setpadding',
'setpagenumber',
'setpagerange',
'setposition',
'setrange',
'setreplacepattern',
'setshortarrayregion',
'setshortfield',
'setshowchecksum',
'setsize',
'setspacing',
'setstaticbooleanfield',
'setstaticbytefield',
'setstaticcharfield',
'setstaticdoublefield',
'setstaticfloatfield',
'setstaticintfield',
'setstaticlongfield',
'setstaticobjectfield',
'setstaticshortfield',
'setstatus',
'settextalignment',
'settextsize',
'settimezone',
'settrait',
'setunderline',
'sharpen',
'shouldabort',
'shouldclose',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'shutdownrd',
'shutdownrdwr',
'shutdownwr',
'sin',
'sinh',
'size',
'skip',
'skiprows',
'sort',
'sortcolumns',
'source',
'sourcecolumn',
'sourcefile',
'sourceline',
'specified',
'split',
'splitconnection',
'splitdebuggingthread',
'splitextension',
'splittext',
'splitthread',
'splittoprivatedev',
'splituppath',
'sql',
'sqlite3',
'sqrt',
'src',
'srcpath',
'sslerrfail',
'stack',
'standby',
'start',
'startone',
'startup',
'stat',
'statement',
'statementonly',
'stats',
'status',
'statuscode',
'statusmsg',
'stdin',
'step',
'stls',
'stop',
'stoprunning',
'storedata',
'stripfirstcomponent',
'striplastcomponent',
'style',
'styletype',
'sub',
'subject',
'subnode',
'subnodes',
'substringdata',
'subtract',
'subtraits',
'sum',
'supportscontentrepresentation',
'swapbytes',
'systemid',
't',
'tabindex',
'table',
'tablecolumnnames',
'tablecolumns',
'tablehascolumn',
'tableizestacktrace',
'tableizestacktracelink',
'tablemap',
'tablename',
'tables',
'tabs',
'tabstr',
'tag',
'tagname',
'take',
'tan',
'tanh',
'target',
'tasks',
'tb',
'tell',
'testexitcode',
'testlock',
'textwidth',
'thenby',
'thenbydescending',
'threadreaddesc',
'throw',
'thrownew',
'time',
'timezone',
'title',
'titlecase',
'to',
'token',
'tolower',
'top',
'toreflectedfield',
'toreflectedmethod',
'total_changes',
'totitle',
'touch',
'toupper',
'toxmlstring',
'trace',
'trackingid',
'trait',
'transform',
'trigger',
'trim',
'trunk',
'tryfinderrorfile',
'trylock',
'tryreadobject',
'type',
'typename',
'uidl',
'uncompress',
'unescape',
'union',
'uniqueid',
'unlock',
'unspool',
'up',
'update',
'updategroup',
'upload',
'uppercase',
'url',
'used',
'usemap',
'user',
'usercolumns',
'valid',
'validate',
'validatesessionstable',
'value',
'values',
'valuetype',
'variant',
'version',
'wait',
'waitforcompletion',
'warnings',
'week',
'weekofmonth',
'weekofyear',
'where',
'width',
'workers',
'workinginputcolumns',
'workingkeycolumns',
'workingkeyfield_name',
'workingreturncolumns',
'workingsortcolumns',
'write',
'writebodybytes',
'writebytes',
'writeheader',
'writeheaderbytes',
'writeheaderline',
'writeid',
'writemessage',
'writeobject',
'writeobjecttcp',
'writestring',
'wroteheaders',
'xhtml',
'xmllang',
'y0',
'y1',
'year',
'yearwoy',
'yn',
'z',
'zip',
'zipfile',
'zipfilename',
'zipname',
'zips',
'zoneoffset',
),
'Lasso 8 Member Tags': (
'accept',
'add',
'addattachment',
'addattribute',
'addbarcode',
'addchapter',
'addcheckbox',
'addchild',
'addcombobox',
'addcomment',
'addcontent',
'addhiddenfield',
'addhtmlpart',
'addimage',
'addjavascript',
'addlist',
'addnamespace',
'addnextsibling',
'addpage',
'addparagraph',
'addparenttype',
'addpart',
'addpasswordfield',
'addphrase',
'addprevsibling',
'addradiobutton',
'addradiogroup',
'addresetbutton',
'addsection',
'addselectlist',
'addsibling',
'addsubmitbutton',
'addtable',
'addtext',
'addtextarea',
'addtextfield',
'addtextpart',
'alarms',
'annotate',
'answer',
'append',
'appendreplacement',
'appendtail',
'arc',
'asasync',
'astype',
'atbegin',
'atbottom',
'atend',
'atfarleft',
'atfarright',
'attop',
'attributecount',
'attributes',
'authenticate',
'authorize',
'backward',
'baseuri',
'bcc',
'beanproperties',
'beginswith',
'bind',
'bitand',
'bitclear',
'bitflip',
'bitformat',
'bitnot',
'bitor',
'bitset',
'bitshiftleft',
'bitshiftright',
'bittest',
'bitxor',
'blur',
'body',
'boundary',
'bytes',
'call',
'cancel',
'capabilities',
'cc',
'chardigitvalue',
'charname',
'charset',
'chartype',
'children',
'circle',
'close',
'closepath',
'closewrite',
'code',
'colorspace',
'command',
'comments',
'compare',
'comparecodepointorder',
'compile',
'composite',
'connect',
'contains',
'content_disposition',
'content_transfer_encoding',
'content_type',
'contents',
'contrast',
'convert',
'crop',
'curveto',
'data',
'date',
'day',
'daylights',
'dayofweek',
'dayofyear',
'decrement',
'delete',
'depth',
'describe',
'description',
'deserialize',
'detach',
'detachreference',
'difference',
'digit',
'document',
'down',
'drawtext',
'dst',
'dump',
'endswith',
'enhance',
'equals',
'errors',
'eval',
'events',
'execute',
'export16bits',
'export32bits',
'export64bits',
'export8bits',
'exportfdf',
'exportstring',
'extract',
'extractone',
'fieldnames',
'fieldtype',
'fieldvalue',
'file',
'find',
'findindex',
'findnamespace',
'findnamespacebyhref',
'findpattern',
'findposition',
'first',
'firstchild',
'fliph',
'flipv',
'flush',
'foldcase',
'foreach',
'format',
'forward',
'freebusies',
'freezetype',
'freezevalue',
'from',
'fulltype',
'generatechecksum',
'get',
'getabswidth',
'getalignment',
'getattribute',
'getattributenamespace',
'getbarheight',
'getbarmultiplier',
'getbarwidth',
'getbaseline',
'getbordercolor',
'getborderwidth',
'getcode',
'getcolor',
'getcolumncount',
'getencoding',
'getface',
'getfont',
'getformat',
'getfullfontname',
'getheaders',
'getmargins',
'getmethod',
'getnumericvalue',
'getpadding',
'getpagenumber',
'getparams',
'getproperty',
'getpsfontname',
'getrange',
'getrowcount',
'getsize',
'getspacing',
'getsupportedencodings',
'gettextalignment',
'gettextsize',
'gettype',
'gmt',
'groupcount',
'hasattribute',
'haschildren',
'hasvalue',
'header',
'headers',
'height',
'histogram',
'hosttonet16',
'hosttonet32',
'hour',
'id',
'ignorecase',
'import16bits',
'import32bits',
'import64bits',
'import8bits',
'importfdf',
'importstring',
'increment',
'input',
'insert',
'insertatcurrent',
'insertfirst',
'insertfrom',
'insertlast',
'insertpage',
'integer',
'intersection',
'invoke',
'isa',
'isalnum',
'isalpha',
'isbase',
'iscntrl',
'isdigit',
'isemptyelement',
'islower',
'isopen',
'isprint',
'isspace',
'istitle',
'istruetype',
'isualphabetic',
'isulowercase',
'isupper',
'isuuppercase',
'isuwhitespace',
'iswhitespace',
'iterator',
'javascript',
'join',
'journals',
'key',
'keys',
'last',
'lastchild',
'lasterror',
'left',
'length',
'line',
'listen',
'localaddress',
'localname',
'lock',
'lookupnamespace',
'lowercase',
'marker',
'matches',
'matchesstart',
'matchposition',
'matchstring',
'merge',
'millisecond',
'minute',
'mode',
'modulate',
'month',
'moveto',
'movetoattributenamespace',
'movetoelement',
'movetofirstattribute',
'movetonextattribute',
'name',
'namespaces',
'namespaceuri',
'nettohost16',
'nettohost32',
'newchild',
'next',
'nextsibling',
'nodetype',
'open',
'output',
'padleading',
'padtrailing',
'pagecount',
'pagesize',
'paraminfo',
'params',
'parent',
'path',
'pixel',
'position',
'prefix',
'previoussibling',
'properties',
'rawheaders',
'read',
'readattributevalue',
'readerror',
'readfrom',
'readline',
'readlock',
'readstring',
'readunlock',
'recipients',
'rect',
'refcount',
'referrals',
'remoteaddress',
'remove',
'removeall',
'removeattribute',
'removechild',
'removecurrent',
'removefirst',
'removelast',
'removeleading',
'removenamespace',
'removetrailing',
'render',
'replace',
'replaceall',
'replacefirst',
'replacepattern',
'replacewith',
'reserve',
'reset',
'resolutionh',
'resolutionv',
'response',
'results',
'retrieve',
'returntype',
'reverse',
'reverseiterator',
'right',
'rotate',
'run',
'save',
'scale',
'search',
'second',
'send',
'serialize',
'set',
'setalignment',
'setbarheight',
'setbarmultiplier',
'setbarwidth',
'setbaseline',
'setblocking',
'setbordercolor',
'setborderwidth',
'setbytes',
'setcode',
'setcolor',
'setcolorspace',
'setdatatype',
'setencoding',
'setface',
'setfieldvalue',
'setfont',
'setformat',
'setgeneratechecksum',
'setheight',
'setlassodata',
'setlinewidth',
'setmarker',
'setmode',
'setname',
'setpadding',
'setpagenumber',
'setpagerange',
'setposition',
'setproperty',
'setrange',
'setshowchecksum',
'setsize',
'setspacing',
'settemplate',
'settemplatestr',
'settextalignment',
'settextdata',
'settextsize',
'settype',
'setunderline',
'setwidth',
'setxmldata',
'sharpen',
'showchecksum',
'showcode39startstop',
'showeanguardbars',
'signal',
'signalall',
'size',
'smooth',
'sort',
'sortwith',
'split',
'standards',
'steal',
'subject',
'substring',
'subtract',
'swapbytes',
'textwidth',
'time',
'timezones',
'titlecase',
'to',
'todos',
'tolower',
'totitle',
'toupper',
'transform',
'trim',
'type',
'unescape',
'union',
'uniqueid',
'unlock',
'unserialize',
'up',
'uppercase',
'value',
'values',
'valuetype',
'wait',
'waskeyword',
'week',
'width',
'write',
'writelock',
'writeto',
'writeunlock',
'xmllang',
'xmlschematype',
'year',
)
}
| GarySparrow/mFlaskWeb | venv/Lib/site-packages/pygments/lexers/_lasso_builtins.py | Python | mit | 134,321 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
lastchange.py -- Chromium revision fetching utility.
"""
import re
import optparse
import os
import subprocess
import sys
_GIT_SVN_ID_REGEX = re.compile(r'.*git-svn-id:\s*([^@]*)@([0-9]+)', re.DOTALL)
class VersionInfo(object):
def __init__(self, url, revision):
self.url = url
self.revision = revision
def FetchSVNRevision(directory, svn_url_regex):
"""
Fetch the Subversion branch and revision for a given directory.
Errors are swallowed.
Returns:
A VersionInfo object or None on error.
"""
try:
proc = subprocess.Popen(['svn', 'info'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory,
shell=(sys.platform=='win32'))
except OSError:
# command is apparently either not installed or not executable.
return None
if not proc:
return None
attrs = {}
for line in proc.stdout:
line = line.strip()
if not line:
continue
key, val = line.split(': ', 1)
attrs[key] = val
try:
match = svn_url_regex.search(attrs['URL'])
if match:
url = match.group(2)
else:
url = ''
revision = attrs['Revision']
except KeyError:
return None
return VersionInfo(url, revision)
def RunGitCommand(directory, command):
"""
Launches git subcommand.
Errors are swallowed.
Returns:
A process object or None.
"""
command = ['git'] + command
# Force shell usage under cygwin. This is a workaround for
# mysterious loss of cwd while invoking cygwin's git.
# We can't just pass shell=True to Popen, as under win32 this will
# cause CMD to be used, while we explicitly want a cygwin shell.
if sys.platform == 'cygwin':
command = ['sh', '-c', ' '.join(command)]
try:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory,
shell=(sys.platform=='win32'))
return proc
except OSError:
return None
def FetchGitRevision(directory):
"""
Fetch the Git hash for a given directory.
Errors are swallowed.
Returns:
A VersionInfo object or None on error.
"""
hsh = ''
proc = RunGitCommand(directory, ['rev-parse', 'HEAD'])
if proc:
output = proc.communicate()[0].strip()
if proc.returncode == 0 and output:
hsh = output
if not hsh:
return None
pos = ''
proc = RunGitCommand(directory, ['show', '-s', '--format=%B', 'HEAD'])
if proc:
output = proc.communicate()[0]
if proc.returncode == 0 and output:
for line in reversed(output.splitlines()):
if line.startswith('Cr-Commit-Position:'):
pos = line.rsplit()[-1].strip()
if not pos:
return VersionInfo('git', hsh)
return VersionInfo('git', '%s-%s' % (hsh, pos))
def FetchGitSVNURLAndRevision(directory, svn_url_regex):
"""
Fetch the Subversion URL and revision through Git.
Errors are swallowed.
Returns:
A tuple containing the Subversion URL and revision.
"""
proc = RunGitCommand(directory, ['log', '-1', '--format=%b'])
if proc:
output = proc.communicate()[0].strip()
if proc.returncode == 0 and output:
# Extract the latest SVN revision and the SVN URL.
# The target line is the last "git-svn-id: ..." line like this:
# git-svn-id: svn://svn.chromium.org/chrome/trunk/src@85528 0039d316....
match = _GIT_SVN_ID_REGEX.search(output)
if match:
revision = match.group(2)
url_match = svn_url_regex.search(match.group(1))
if url_match:
url = url_match.group(2)
else:
url = ''
return url, revision
return None, None
def FetchGitSVNRevision(directory, svn_url_regex):
"""
Fetch the Git-SVN identifier for the local tree.
Errors are swallowed.
"""
url, revision = FetchGitSVNURLAndRevision(directory, svn_url_regex)
if url and revision:
return VersionInfo(url, revision)
return None
def FetchVersionInfo(default_lastchange, directory=None,
directory_regex_prior_to_src_url='chrome|blink|svn'):
"""
Returns the last change (in the form of a branch, revision tuple),
from some appropriate revision control system.
"""
svn_url_regex = re.compile(
r'.*/(' + directory_regex_prior_to_src_url + r')(/.*)')
version_info = (FetchSVNRevision(directory, svn_url_regex) or
FetchGitSVNRevision(directory, svn_url_regex) or
FetchGitRevision(directory))
if not version_info:
if default_lastchange and os.path.exists(default_lastchange):
revision = open(default_lastchange, 'r').read().strip()
version_info = VersionInfo(None, revision)
else:
version_info = VersionInfo(None, None)
return version_info
def GetHeaderGuard(path):
"""
Returns the header #define guard for the given file path.
This treats everything after the last instance of "src/" as being a
relevant part of the guard. If there is no "src/", then the entire path
is used.
"""
src_index = path.rfind('src/')
if src_index != -1:
guard = path[src_index + 4:]
else:
guard = path
guard = guard.upper()
return guard.replace('/', '_').replace('.', '_').replace('\\', '_') + '_'
def GetHeaderContents(path, define, version):
"""
Returns what the contents of the header file should be that indicate the given
revision. Note that the #define is specified as a string, even though it's
currently always a SVN revision number, in case we need to move to git hashes.
"""
header_guard = GetHeaderGuard(path)
header_contents = """/* Generated by lastchange.py, do not edit.*/
#ifndef %(header_guard)s
#define %(header_guard)s
#define %(define)s "%(version)s"
#endif // %(header_guard)s
"""
header_contents = header_contents % { 'header_guard': header_guard,
'define': define,
'version': version }
return header_contents
def WriteIfChanged(file_name, contents):
"""
Writes the specified contents to the specified file_name
iff the contents are different than the current contents.
"""
try:
old_contents = open(file_name, 'r').read()
except EnvironmentError:
pass
else:
if contents == old_contents:
return
os.unlink(file_name)
open(file_name, 'w').write(contents)
def main(argv=None):
if argv is None:
argv = sys.argv
parser = optparse.OptionParser(usage="lastchange.py [options]")
parser.add_option("-d", "--default-lastchange", metavar="FILE",
help="Default last change input FILE.")
parser.add_option("-m", "--version-macro",
help="Name of C #define when using --header. Defaults to " +
"LAST_CHANGE.",
default="LAST_CHANGE")
parser.add_option("-o", "--output", metavar="FILE",
help="Write last change to FILE. " +
"Can be combined with --header to write both files.")
parser.add_option("", "--header", metavar="FILE",
help="Write last change to FILE as a C/C++ header. " +
"Can be combined with --output to write both files.")
parser.add_option("--revision-only", action='store_true',
help="Just print the SVN revision number. Overrides any " +
"file-output-related options.")
parser.add_option("-s", "--source-dir", metavar="DIR",
help="Use repository in the given directory.")
opts, args = parser.parse_args(argv[1:])
out_file = opts.output
header = opts.header
while len(args) and out_file is None:
if out_file is None:
out_file = args.pop(0)
if args:
sys.stderr.write('Unexpected arguments: %r\n\n' % args)
parser.print_help()
sys.exit(2)
if opts.source_dir:
src_dir = opts.source_dir
else:
src_dir = os.path.dirname(os.path.abspath(__file__))
version_info = FetchVersionInfo(opts.default_lastchange, src_dir)
if version_info.revision == None:
version_info.revision = '0'
if opts.revision_only:
print version_info.revision
else:
contents = "LASTCHANGE=%s\n" % version_info.revision
if not out_file and not opts.header:
sys.stdout.write(contents)
else:
if out_file:
WriteIfChanged(out_file, contents)
if header:
WriteIfChanged(header,
GetHeaderContents(header, opts.version_macro,
version_info.revision))
return 0
if __name__ == '__main__':
sys.exit(main())
| s20121035/rk3288_android5.1_repo | external/chromium_org/build/util/lastchange.py | Python | gpl-3.0 | 8,872 |
#!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_volume
version_added: "2.2"
short_description: Manage storage volumes (standard and thin)
description:
- Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
extends_documentation_fragment:
- netapp.eseries
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage
required: true
storage_pool_name:
description:
- "Required only when requested state is 'present'. The name of the storage pool the volume should exist on."
required: true
size_unit:
description:
- The unit used to interpret the size parameter
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
size:
description:
- "Required only when state = 'present'. The size of the volume in (size_unit)."
required: true
segment_size_kb:
description:
- The segment size of the new volume
default: 512
thin_provision:
description:
- Whether the volume should be thin provisioned. Thin volumes can only be created on disk pools (raidDiskPool).
type: bool
default: 'no'
thin_volume_repo_size:
description:
- Initial size of the thin volume repository volume (in size_unit)
required: True
thin_volume_max_repo_size:
description:
- Maximum size that the thin volume repository volume will automatically expand to
default: same as size (in size_unit)
ssd_cache_enabled:
description:
- Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
- The default value is to ignore existing SSD cache setting.
type: bool
data_assurance_enabled:
description:
- If data assurance should be enabled for the volume
type: bool
default: 'no'
# TODO: doc thin volume parameters
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
- name: No thin volume
netapp_e_volume:
ssid: "{{ ssid }}"
name: NewThinVolumeByAnsible
state: absent
log_path: /tmp/volume.log
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
when: check_volume
- name: No fat volume
netapp_e_volume:
ssid: "{{ ssid }}"
name: NewVolumeByAnsible
state: absent
log_path: /tmp/volume.log
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
when: check_volume
'''
RETURN = '''
---
msg:
description: State of volume
type: string
returned: always
sample: "Standard volume [workload_vol_1] has been created."
'''
import json
import logging
import time
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
def ifilter(predicate, iterable):
# python 2, 3 generic filtering.
if predicate is None:
predicate = bool
for x in iterable:
if predicate(x):
yield x
class NetAppESeriesVolume(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
storage_pool_name=dict(type='str'),
size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
type='str'),
size=dict(type='int'),
segment_size_kb=dict(default=128, choices=[8, 16, 32, 64, 128, 256, 512], type='int'),
ssd_cache_enabled=dict(type='bool'), # no default, leave existing setting alone
data_assurance_enabled=dict(default=False, type='bool'),
thin_provision=dict(default=False, type='bool'),
thin_volume_repo_size=dict(type='int'),
thin_volume_max_repo_size=dict(type='int'),
# TODO: add cache, owning controller support, thin expansion policy, etc
log_path=dict(type='str'),
))
self.module = AnsibleModule(argument_spec=argument_spec,
required_if=[
('state', 'present', ['storage_pool_name', 'size']),
('thin_provision', 'true', ['thin_volume_repo_size'])
],
supports_check_mode=True)
p = self.module.params
log_path = p['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
self.debug = self._logger.debug
if log_path:
logging.basicConfig(level=logging.DEBUG, filename=log_path)
self.state = p['state']
self.ssid = p['ssid']
self.name = p['name']
self.storage_pool_name = p['storage_pool_name']
self.size_unit = p['size_unit']
self.size = p['size']
self.segment_size_kb = p['segment_size_kb']
self.ssd_cache_enabled = p['ssd_cache_enabled']
self.data_assurance_enabled = p['data_assurance_enabled']
self.thin_provision = p['thin_provision']
self.thin_volume_repo_size = p['thin_volume_repo_size']
self.thin_volume_max_repo_size = p['thin_volume_max_repo_size']
if not self.thin_volume_max_repo_size:
self.thin_volume_max_repo_size = self.size
self.validate_certs = p['validate_certs']
try:
self.api_usr = p['api_username']
self.api_pwd = p['api_password']
self.api_url = p['api_url']
except KeyError:
self.module.fail_json(msg="You must pass in api_username "
"and api_password and api_url to the module.")
def get_volume(self, volume_name):
self.debug('fetching volumes')
# fetch the list of volume objects and look for one with a matching name (we'll need to merge volumes and thin-volumes)
try:
(rc, volumes) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception as err:
self.module.fail_json(
msg="Failed to obtain list of standard/thick volumes. Array Id [%s]. Error[%s]." % (self.ssid,
to_native(err)))
try:
self.debug('fetching thin-volumes')
(rc, thinvols) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception as err:
self.module.fail_json(
msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." % (self.ssid, to_native(err)))
volumes.extend(thinvols)
self.debug("searching for volume '%s'", volume_name)
volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
if volume_detail:
self.debug('found')
else:
self.debug('not found')
return volume_detail
def get_storage_pool(self, storage_pool_name):
self.debug("fetching storage pools")
# map the storage pool name to its id
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
headers=dict(Accept="application/json"), url_username=self.api_usr,
url_password=self.api_pwd, validate_certs=self.validate_certs)
except Exception as err:
self.module.fail_json(
msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, to_native(err)))
self.debug("searching for storage pool '%s'", storage_pool_name)
pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
if pool_detail:
self.debug('found')
else:
self.debug('not found')
return pool_detail
def create_volume(self, pool_id, name, size_unit, size, segment_size_kb, data_assurance_enabled):
volume_add_req = dict(
name=name,
poolId=pool_id,
sizeUnit=size_unit,
size=size,
segSize=segment_size_kb,
dataAssuranceEnabled=data_assurance_enabled,
)
self.debug("creating volume '%s'", name)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
data=json.dumps(volume_add_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs,
timeout=120)
except Exception as err:
self.module.fail_json(
msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
to_native(err)))
def create_thin_volume(self, pool_id, name, size_unit, size, thin_volume_repo_size,
thin_volume_max_repo_size, data_assurance_enabled):
thin_volume_add_req = dict(
name=name,
poolId=pool_id,
sizeUnit=size_unit,
virtualSize=size,
repositorySize=thin_volume_repo_size,
maximumRepositorySize=thin_volume_max_repo_size,
dataAssuranceEnabled=data_assurance_enabled,
)
self.debug("creating thin-volume '%s'", name)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
data=json.dumps(thin_volume_add_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs,
timeout=120)
except Exception as err:
self.module.fail_json(
msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
to_native(err)))
def delete_volume(self):
# delete the volume
self.debug("deleting volume '%s'", self.volume_detail['name'])
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
self.volume_detail['id']),
method='DELETE', url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs, timeout=120)
except Exception as err:
self.module.fail_json(
msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
to_native(err)))
@property
def volume_resource_name(self):
if self.volume_detail['thinProvisioned']:
return 'thin-volumes'
else:
return 'volumes'
@property
def volume_properties_changed(self):
return self.volume_ssdcache_setting_changed # or with other props here when extended
# TODO: add support for r/w cache settings, owning controller, scan settings, expansion policy, growth alert threshold
@property
def volume_ssdcache_setting_changed(self):
# None means ignore existing setting
if self.ssd_cache_enabled is not None and self.ssd_cache_enabled != self.volume_detail['flashCached']:
self.debug("flash cache setting changed")
return True
def update_volume_properties(self):
update_volume_req = dict()
# conditionally add values so we ignore unspecified props
if self.volume_ssdcache_setting_changed:
update_volume_req['flashCache'] = self.ssd_cache_enabled
self.debug("updating volume properties...")
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/%s/%s/" % (self.ssid, self.volume_resource_name,
self.volume_detail['id']),
data=json.dumps(update_volume_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except Exception as err:
self.module.fail_json(
msg="Failed to update volume properties. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
to_native(err)))
@property
def volume_needs_expansion(self):
current_size_bytes = int(self.volume_detail['capacity'])
requested_size_bytes = self.size * self._size_unit_map[self.size_unit]
# TODO: check requested/current repo volume size for thin-volumes as well
# TODO: do we need to build any kind of slop factor in here?
return requested_size_bytes > current_size_bytes
def expand_volume(self):
is_thin = self.volume_detail['thinProvisioned']
if is_thin:
# TODO: support manual repo expansion as well
self.debug('expanding thin volume')
thin_volume_expand_req = dict(
newVirtualSize=self.size,
sizeUnit=self.size_unit
)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes/%s/expand" % (self.ssid,
self.volume_detail[
'id']),
data=json.dumps(thin_volume_expand_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs, timeout=120)
except Exception as err:
self.module.fail_json(
msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
to_native(err)))
# TODO: check return code
else:
self.debug('expanding volume')
volume_expand_req = dict(
expansionSize=self.size,
sizeUnit=self.size_unit
)
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
self.volume_detail['id']),
data=json.dumps(volume_expand_req), headers=HEADERS, method='POST',
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
timeout=120)
except Exception as err:
self.module.fail_json(
msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
self.ssid,
to_native(err)))
self.debug('polling for completion...')
while True:
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
self.volume_detail[
'id']),
method='GET', url_username=self.api_usr, url_password=self.api_pwd,
validate_certs=self.validate_certs)
except Exception as err:
self.module.fail_json(
msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]. Error[%s]." % (
self.name, self.ssid, to_native(err)))
action = resp['action']
percent_complete = resp['percentComplete']
self.debug('expand action %s, %s complete...', action, percent_complete)
if action == 'none':
self.debug('expand complete')
break
else:
time.sleep(5)
def apply(self):
changed = False
volume_exists = False
msg = None
self.volume_detail = self.get_volume(self.name)
if self.volume_detail:
volume_exists = True
if self.state == 'absent':
self.debug("CHANGED: volume exists, but requested state is 'absent'")
changed = True
elif self.state == 'present':
# check requested volume size, see if expansion is necessary
if self.volume_needs_expansion:
self.debug("CHANGED: requested volume size %s%s is larger than current size %sb",
self.size, self.size_unit, self.volume_detail['capacity'])
changed = True
if self.volume_properties_changed:
self.debug("CHANGED: one or more volume properties have changed")
changed = True
else:
if self.state == 'present':
self.debug("CHANGED: volume does not exist, but requested state is 'present'")
changed = True
if changed:
if self.module.check_mode:
self.debug('skipping changes due to check mode')
else:
if self.state == 'present':
if not volume_exists:
pool_detail = self.get_storage_pool(self.storage_pool_name)
if not pool_detail:
self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
if self.thin_provision and not pool_detail['diskPool']:
self.module.fail_json(
msg='Thin provisioned volumes can only be located on disk pools (not volume groups)')
pool_id = pool_detail['id']
if not self.thin_provision:
self.create_volume(pool_id, self.name, self.size_unit, self.size, self.segment_size_kb,
self.data_assurance_enabled)
msg = "Standard volume [%s] has been created." % (self.name)
else:
self.create_thin_volume(pool_id, self.name, self.size_unit, self.size,
self.thin_volume_repo_size, self.thin_volume_max_repo_size,
self.data_assurance_enabled)
msg = "Thin volume [%s] has been created." % (self.name)
else: # volume exists but differs, modify...
if self.volume_needs_expansion:
self.expand_volume()
msg = "Volume [%s] has been expanded." % (self.name)
# this stuff always needs to run on present (since props can't be set on creation)
if self.volume_properties_changed:
self.update_volume_properties()
msg = "Properties of volume [%s] has been updated." % (self.name)
elif self.state == 'absent':
self.delete_volume()
msg = "Volume [%s] has been deleted." % (self.name)
else:
self.debug("exiting with no changes")
if self.state == 'absent':
msg = "Volume [%s] did not exist." % (self.name)
else:
msg = "Volume [%s] already exists." % (self.name)
self.module.exit_json(msg=msg, changed=changed)
def main():
v = NetAppESeriesVolume()
try:
v.apply()
except Exception as e:
v.debug("Exception in apply(): \n%s", format_exc())
v.module.fail_json(msg="Module failed. Error [%s]." % to_native(e))
if __name__ == '__main__':
main()
| hryamzik/ansible | lib/ansible/modules/storage/netapp/netapp_e_volume.py | Python | gpl-3.0 | 22,936 |
# -*- coding: utf-8 -*-
"""
E2E tests for the LMS.
"""
from .helpers import UniqueCourseTest
from ..pages.studio.auto_auth import AutoAuthPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.annotation_component import AnnotationComponentPage
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from textwrap import dedent
def _correctness(choice, target):
if choice == target:
return "correct"
elif abs(choice - target) == 1:
return "partially-correct"
else:
return "incorrect"
class AnnotatableProblemTest(UniqueCourseTest):
"""
Tests for annotation components.
"""
USERNAME = "STAFF_TESTER"
EMAIL = "[email protected]"
DATA_TEMPLATE = dedent("""\
<annotatable>
<instructions>Instruction text</instructions>
<p>{}</p>
</annotatable>
""")
ANNOTATION_TEMPLATE = dedent("""\
Before {0}.
<annotation title="region {0}" body="Comment {0}" highlight="yellow" problem="{0}">
Region Contents {0}
</annotation>
After {0}.
""")
PROBLEM_TEMPLATE = dedent("""\
<problem max_attempts="1" weight="">
<annotationresponse>
<annotationinput>
<title>Question {number}</title>
<text>Region Contents {number}</text>
<comment>What number is this region?</comment>
<comment_prompt>Type your response below:</comment_prompt>
<tag_prompt>What number is this region?</tag_prompt>
<options>
{options}
</options>
</annotationinput>
</annotationresponse>
<solution>
This problem is checking region {number}
</solution>
</problem>
""")
OPTION_TEMPLATE = """<option choice="{correctness}">{number}</option>"""
def setUp(self):
super(AnnotatableProblemTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with two annotations and two annotations problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.annotation_count = 2
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Annotation Vertical').add_children(
XBlockFixtureDesc('annotatable', 'Test Annotation Module',
data=self.DATA_TEMPLATE.format("\n".join(
self.ANNOTATION_TEMPLATE.format(i) for i in xrange(self.annotation_count)
))),
XBlockFixtureDesc('problem', 'Test Annotation Problem 0',
data=self.PROBLEM_TEMPLATE.format(number=0, options="\n".join(
self.OPTION_TEMPLATE.format(
number=k,
correctness=_correctness(k, 0))
for k in xrange(self.annotation_count)
))),
XBlockFixtureDesc('problem', 'Test Annotation Problem 1',
data=self.PROBLEM_TEMPLATE.format(number=1, options="\n".join(
self.OPTION_TEMPLATE.format(
number=k,
correctness=_correctness(k, 1))
for k in xrange(self.annotation_count)
)))
)
)
)
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
def _goto_annotation_component_page(self):
"""
Open annotation component page with assertion.
"""
self.courseware_page.visit()
annotation_component_page = AnnotationComponentPage(self.browser)
self.assertEqual(
annotation_component_page.component_name, 'TEST ANNOTATION MODULE'.format()
)
return annotation_component_page
def test_annotation_component(self):
"""
Test annotation components links to annotation problems.
"""
annotation_component_page = self._goto_annotation_component_page()
for i in xrange(self.annotation_count):
annotation_component_page.click_reply_annotation(i)
self.assertTrue(annotation_component_page.check_scroll_to_problem())
annotation_component_page.answer_problem()
self.assertTrue(annotation_component_page.check_feedback())
annotation_component_page.click_return_to_annotation()
self.assertTrue(annotation_component_page.check_scroll_to_annotation())
| ahmadiga/min_edx | common/test/acceptance/tests/test_annotatable.py | Python | agpl-3.0 | 5,304 |
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from nose.tools import assert_raises
import gevent
from zerorpc import zmq
import zerorpc
from testutils import teardown, random_ipc_endpoint
def test_close_server_hbchan():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.XREP)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=1)
client_hbchan.emit('openthat', None)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=1)
server_hbchan.recv()
gevent.sleep(3)
print 'CLOSE SERVER SOCKET!!!'
server_hbchan.close()
with assert_raises(zerorpc.LostRemote):
client_hbchan.recv()
print 'CLIENT LOST SERVER :)'
client_hbchan.close()
server.close()
client.close()
def test_close_client_hbchan():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.XREP)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=1)
client_hbchan.emit('openthat', None)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=1)
server_hbchan.recv()
gevent.sleep(3)
print 'CLOSE CLIENT SOCKET!!!'
client_hbchan.close()
with assert_raises(zerorpc.LostRemote):
server_hbchan.recv()
print 'SERVER LOST CLIENT :)'
server_hbchan.close()
server.close()
client.close()
def test_heartbeat_can_open_channel_server_close():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.XREP)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=1)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=1)
gevent.sleep(3)
print 'CLOSE SERVER SOCKET!!!'
server_hbchan.close()
with assert_raises(zerorpc.LostRemote):
client_hbchan.recv()
print 'CLIENT LOST SERVER :)'
client_hbchan.close()
server.close()
client.close()
def test_heartbeat_can_open_channel_client_close():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.XREP)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=1)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=1)
gevent.sleep(3)
print 'CLOSE CLIENT SOCKET!!!'
client_hbchan.close()
client.close()
with assert_raises(zerorpc.LostRemote):
server_hbchan.recv()
print 'SERVER LOST CLIENT :)'
server_hbchan.close()
server.close()
def test_do_some_req_rep():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.XREP)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=1)
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=1)
def client_do():
for x in xrange(20):
client_hbchan.emit('add', (x, x * x))
event = client_hbchan.recv()
assert event.name == 'OK'
assert event.args == (x + x * x,)
client_hbchan.close()
client_task = gevent.spawn(client_do)
def server_do():
for x in xrange(20):
event = server_hbchan.recv()
assert event.name == 'add'
server_hbchan.emit('OK', (sum(event.args),))
server_hbchan.close()
server_task = gevent.spawn(server_do)
server_task.get()
client_task.get()
client.close()
server.close()
def test_do_some_req_rep_lost_server():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.XREP)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
print 'running'
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=1)
for x in xrange(10):
client_hbchan.emit('add', (x, x * x))
event = client_hbchan.recv()
assert event.name == 'OK'
assert event.args == (x + x * x,)
client_hbchan.emit('add', (x, x * x))
with assert_raises(zerorpc.LostRemote):
event = client_hbchan.recv()
client_hbchan.close()
client_task = gevent.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=1)
for x in xrange(10):
event = server_hbchan.recv()
assert event.name == 'add'
server_hbchan.emit('OK', (sum(event.args),))
server_hbchan.close()
server_task = gevent.spawn(server_do)
server_task.get()
client_task.get()
client.close()
server.close()
def test_do_some_req_rep_lost_client():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.XREP)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=1)
for x in xrange(10):
client_hbchan.emit('add', (x, x * x))
event = client_hbchan.recv()
assert event.name == 'OK'
assert event.args == (x + x * x,)
client_hbchan.close()
client_task = gevent.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=1)
for x in xrange(10):
event = server_hbchan.recv()
assert event.name == 'add'
server_hbchan.emit('OK', (sum(event.args),))
with assert_raises(zerorpc.LostRemote):
event = server_hbchan.recv()
server_hbchan.close()
server_task = gevent.spawn(server_do)
server_task.get()
client_task.get()
client.close()
server.close()
def test_do_some_req_rep_client_timeout():
endpoint = random_ipc_endpoint()
server_events = zerorpc.Events(zmq.XREP)
server_events.bind(endpoint)
server = zerorpc.ChannelMultiplexer(server_events)
client_events = zerorpc.Events(zmq.XREQ)
client_events.connect(endpoint)
client = zerorpc.ChannelMultiplexer(client_events, ignore_broadcast=True)
def client_do():
client_channel = client.channel()
client_hbchan = zerorpc.HeartBeatOnChannel(client_channel, freq=1)
with assert_raises(zerorpc.TimeoutExpired):
for x in xrange(10):
client_hbchan.emit('sleep', (x,))
event = client_hbchan.recv(timeout=3)
assert event.name == 'OK'
assert event.args == (x,)
client_hbchan.close()
client_task = gevent.spawn(client_do)
def server_do():
event = server.recv()
server_channel = server.channel(event)
server_hbchan = zerorpc.HeartBeatOnChannel(server_channel, freq=1)
with assert_raises(zerorpc.LostRemote):
for x in xrange(20):
event = server_hbchan.recv()
assert event.name == 'sleep'
gevent.sleep(event.args[0])
server_hbchan.emit('OK', event.args)
server_hbchan.close()
server_task = gevent.spawn(server_do)
server_task.get()
client_task.get()
client.close()
server.close()
| adieu/zerorpc-python | tests/test_heartbeat.py | Python | mit | 10,552 |
"""
TODO: add a docstring.
"""
from pystache import TemplateSpec
class UnicodeInput(TemplateSpec):
template_encoding = 'utf8'
def age(self):
return 156
| zzeleznick/zDjango | venv/lib/python2.7/site-packages/pystache/tests/examples/unicode_input.py | Python | mit | 174 |
def foo():
a = 1
b = 2
<caret>
x = 42 | asedunov/intellij-community | python/testData/copyPaste/UseExistingIndentWhenCaretAtFirstColumn.dst.py | Python | apache-2.0 | 45 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes, os
import webnotes.model.doc
from webnotes.modules import scrub, get_module_path, lower_case_files_for, scrub_dt_dn
from webnotes.plugins import get_plugin_path
def export_doc(doc):
export_to_files([[doc.doctype, doc.name]])
def export_to_files(record_list=None, record_module=None, verbose=0, plugin=None, create_init=None):
"""
Export record_list to files. record_list is a list of lists ([doctype],[docname] ) ,
"""
if webnotes.flags.in_import:
return
module_doclist =[]
if record_list:
for record in record_list:
write_document_file(webnotes.model.doc.get(record[0], record[1]),
record_module, plugin=plugin, create_init=create_init)
def write_document_file(doclist, record_module=None, plugin=None, create_init=None):
from webnotes.modules.utils import pprint_doclist
doclist = [filter_fields(d.fields) for d in doclist]
module = record_module or get_module_name(doclist)
if create_init is None:
create_init = doclist[0]['doctype'] in lower_case_files_for
# create folder
folder = create_folder(module, doclist[0]['doctype'], doclist[0]['name'], create_init, plugin=plugin)
# write the data file
fname = (doclist[0]['doctype'] in lower_case_files_for and scrub(doclist[0]['name'])) or doclist[0]['name']
with open(os.path.join(folder, fname +'.txt'),'w+') as txtfile:
txtfile.write(pprint_doclist(doclist))
def filter_fields(doc):
from webnotes.model.doctype import get
from webnotes.model import default_fields
doctypelist = get(doc.doctype, False)
valid_fields = [d.fieldname for d in doctypelist.get({"parent":doc.doctype,
"doctype":"DocField"})]
to_remove = []
for key in doc:
if (not key in default_fields) and (not key in valid_fields):
to_remove.append(key)
elif doc[key]==None:
to_remove.append(key)
for key in to_remove:
del doc[key]
return doc
def get_module_name(doclist):
if doclist[0]['doctype'] == 'Module Def':
module = doclist[0]['name']
elif doclist[0]['doctype']=='Control Panel':
module = 'Core'
elif doclist[0]['doctype']=="Workflow":
module = webnotes.conn.get_value("DocType", doclist[0]["document_type"], "module")
else:
module = doclist[0]['module']
return module
def create_folder(module, dt, dn, create_init, plugin=None):
if plugin:
module_path = os.path.join(get_plugin_path(plugin), scrub(module))
else:
module_path = get_module_path(module)
dt, dn = scrub_dt_dn(dt, dn)
# create folder
folder = os.path.join(module_path, dt, dn)
webnotes.create_folder(folder)
# create init_py_files
if create_init:
create_init_py(module_path, dt, dn)
return folder
def create_init_py(module_path, dt, dn):
def create_if_not_exists(path):
initpy = os.path.join(path, '__init__.py')
if not os.path.exists(initpy):
open(initpy, 'w').close()
create_if_not_exists(os.path.join(module_path))
create_if_not_exists(os.path.join(module_path, dt))
create_if_not_exists(os.path.join(module_path, dt, dn))
| gangadhar-kadam/nassimlib | webnotes/modules/export_file.py | Python | mit | 3,110 |
from mongoengine.common import _import_class
from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db
__all__ = ("switch_db", "switch_collection", "no_dereference",
"no_sub_classes", "query_counter")
class switch_db(object):
""" switch_db alias context manager.
Example ::
# Register connections
register_connection('default', 'mongoenginetest')
register_connection('testdb-1', 'mongoenginetest2')
class Group(Document):
name = StringField()
Group(name="test").save() # Saves in the default db
with switch_db(Group, 'testdb-1') as Group:
Group(name="hello testdb!").save() # Saves in testdb-1
"""
def __init__(self, cls, db_alias):
""" Construct the switch_db context manager
:param cls: the class to change the registered db
:param db_alias: the name of the specific database to use
"""
self.cls = cls
self.collection = cls._get_collection()
self.db_alias = db_alias
self.ori_db_alias = cls._meta.get("db_alias", DEFAULT_CONNECTION_NAME)
def __enter__(self):
""" change the db_alias and clear the cached collection """
self.cls._meta["db_alias"] = self.db_alias
self.cls._collection = None
return self.cls
def __exit__(self, t, value, traceback):
""" Reset the db_alias and collection """
self.cls._meta["db_alias"] = self.ori_db_alias
self.cls._collection = self.collection
class switch_collection(object):
""" switch_collection alias context manager.
Example ::
class Group(Document):
name = StringField()
Group(name="test").save() # Saves in the default db
with switch_collection(Group, 'group1') as Group:
Group(name="hello testdb!").save() # Saves in group1 collection
"""
def __init__(self, cls, collection_name):
""" Construct the switch_collection context manager
:param cls: the class to change the registered db
:param collection_name: the name of the collection to use
"""
self.cls = cls
self.ori_collection = cls._get_collection()
self.ori_get_collection_name = cls._get_collection_name
self.collection_name = collection_name
def __enter__(self):
""" change the _get_collection_name and clear the cached collection """
@classmethod
def _get_collection_name(cls):
return self.collection_name
self.cls._get_collection_name = _get_collection_name
self.cls._collection = None
return self.cls
def __exit__(self, t, value, traceback):
""" Reset the collection """
self.cls._collection = self.ori_collection
self.cls._get_collection_name = self.ori_get_collection_name
class no_dereference(object):
""" no_dereference context manager.
Turns off all dereferencing in Documents for the duration of the context
manager::
with no_dereference(Group) as Group:
Group.objects.find()
"""
def __init__(self, cls):
""" Construct the no_dereference context manager.
:param cls: the class to turn dereferencing off on
"""
self.cls = cls
ReferenceField = _import_class('ReferenceField')
GenericReferenceField = _import_class('GenericReferenceField')
ComplexBaseField = _import_class('ComplexBaseField')
self.deref_fields = [k for k, v in self.cls._fields.iteritems()
if isinstance(v, (ReferenceField,
GenericReferenceField,
ComplexBaseField))]
def __enter__(self):
""" change the objects default and _auto_dereference values"""
for field in self.deref_fields:
self.cls._fields[field]._auto_dereference = False
return self.cls
def __exit__(self, t, value, traceback):
""" Reset the default and _auto_dereference values"""
for field in self.deref_fields:
self.cls._fields[field]._auto_dereference = True
return self.cls
class no_sub_classes(object):
""" no_sub_classes context manager.
Only returns instances of this class and no sub (inherited) classes::
with no_sub_classes(Group) as Group:
Group.objects.find()
"""
def __init__(self, cls):
""" Construct the no_sub_classes context manager.
:param cls: the class to turn querying sub classes on
"""
self.cls = cls
def __enter__(self):
""" change the objects default and _auto_dereference values"""
self.cls._all_subclasses = self.cls._subclasses
self.cls._subclasses = (self.cls,)
return self.cls
def __exit__(self, t, value, traceback):
""" Reset the default and _auto_dereference values"""
self.cls._subclasses = self.cls._all_subclasses
delattr(self.cls, '_all_subclasses')
return self.cls
class query_counter(object):
""" Query_counter context manager to get the number of queries. """
def __init__(self):
""" Construct the query_counter. """
self.counter = 0
self.db = get_db()
def __enter__(self):
""" On every with block we need to drop the profile collection. """
self.db.set_profiling_level(0)
self.db.system.profile.drop()
self.db.set_profiling_level(2)
return self
def __exit__(self, t, value, traceback):
""" Reset the profiling level. """
self.db.set_profiling_level(0)
def __eq__(self, value):
""" == Compare querycounter. """
counter = self._get_count()
return value == counter
def __ne__(self, value):
""" != Compare querycounter. """
return not self.__eq__(value)
def __lt__(self, value):
""" < Compare querycounter. """
return self._get_count() < value
def __le__(self, value):
""" <= Compare querycounter. """
return self._get_count() <= value
def __gt__(self, value):
""" > Compare querycounter. """
return self._get_count() > value
def __ge__(self, value):
""" >= Compare querycounter. """
return self._get_count() >= value
def __int__(self):
""" int representation. """
return self._get_count()
def __repr__(self):
""" repr query_counter as the number of queries. """
return u"%s" % self._get_count()
def _get_count(self):
""" Get the number of queries. """
ignore_query = {"ns": {"$ne": "%s.system.indexes" % self.db.name}}
count = self.db.system.profile.find(ignore_query).count() - self.counter
self.counter += 1
return count
| elioth010/lugama | venv/lib/python2.7/site-packages/mongoengine/context_managers.py | Python | gpl-2.0 | 6,871 |
class MyClass(object):
def method(se<caret>lf):
pass
| jwren/intellij-community | python/testData/intentions/paramTypeInDocstringNotSuggestedForSelf.py | Python | apache-2.0 | 65 |
# test for-else statement
# test optimised range with simple else
for i in range(2):
print(i)
else:
print('else')
# test optimised range with break over else
for i in range(2):
print(i)
break
else:
print('else')
# test nested optimised range with continue in the else
for i in range(4):
print(i)
for j in range(4):
pass
else:
continue
break
# test optimised range with non-constant end value
N = 2
for i in range(N):
print(i)
else:
print('else')
# test generic iterator with simple else
for i in [0, 1]:
print(i)
else:
print('else')
# test generic iterator with break over else
for i in [0, 1]:
print(i)
break
else:
print('else')
| AriZuu/micropython | tests/basics/for_else.py | Python | mit | 716 |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| joostvdg/jenkins-job-builder | setup.py | Python | apache-2.0 | 686 |
import pytest
from utils import TestEnvironment, http, HTTP_OK, COLOR, CRLF
from httpie import ExitStatus
from httpie.output.formatters.colors import get_lexer
class TestVerboseFlag:
def test_verbose(self, httpbin):
r = http('--verbose',
'GET', httpbin.url + '/get', 'test-header:__test__')
assert HTTP_OK in r
assert r.count('__test__') == 2
def test_verbose_form(self, httpbin):
# https://github.com/jkbrzt/httpie/issues/53
r = http('--verbose', '--form', 'POST', httpbin.url + '/post',
'A=B', 'C=D')
assert HTTP_OK in r
assert 'A=B&C=D' in r
def test_verbose_json(self, httpbin):
r = http('--verbose',
'POST', httpbin.url + '/post', 'foo=bar', 'baz=bar')
assert HTTP_OK in r
assert '"baz": "bar"' in r
class TestColors:
@pytest.mark.parametrize('mime', [
'application/json',
'application/json+foo',
'application/foo+json',
'application/json-foo',
'application/x-json',
'foo/json',
'foo/json+bar',
'foo/bar+json',
'foo/json-foo',
'foo/x-json',
])
def test_get_lexer(self, mime):
lexer = get_lexer(mime)
assert lexer is not None
assert lexer.name == 'JSON'
def test_get_lexer_not_found(self):
assert get_lexer('xxx/yyy') is None
class TestPrettyOptions:
"""Test the --pretty flag handling."""
def test_pretty_enabled_by_default(self, httpbin):
env = TestEnvironment(colors=256)
r = http('GET', httpbin.url + '/get', env=env)
assert COLOR in r
def test_pretty_enabled_by_default_unless_stdout_redirected(self, httpbin):
r = http('GET', httpbin.url + '/get')
assert COLOR not in r
def test_force_pretty(self, httpbin):
env = TestEnvironment(stdout_isatty=False, colors=256)
r = http('--pretty=all', 'GET', httpbin.url + '/get', env=env, )
assert COLOR in r
def test_force_ugly(self, httpbin):
r = http('--pretty=none', 'GET', httpbin.url + '/get')
assert COLOR not in r
def test_subtype_based_pygments_lexer_match(self, httpbin):
"""Test that media subtype is used if type/subtype doesn't
match any lexer.
"""
env = TestEnvironment(colors=256)
r = http('--print=B', '--pretty=all', httpbin.url + '/post',
'Content-Type:text/foo+json', 'a=b', env=env)
assert COLOR in r
def test_colors_option(self, httpbin):
env = TestEnvironment(colors=256)
r = http('--print=B', '--pretty=colors',
'GET', httpbin.url + '/get', 'a=b',
env=env)
# Tests that the JSON data isn't formatted.
assert not r.strip().count('\n')
assert COLOR in r
def test_format_option(self, httpbin):
env = TestEnvironment(colors=256)
r = http('--print=B', '--pretty=format',
'GET', httpbin.url + '/get', 'a=b',
env=env)
# Tests that the JSON data is formatted.
assert r.strip().count('\n') == 2
assert COLOR not in r
class TestLineEndings:
"""
Test that CRLF is properly used in headers
and as the headers/body separator.
"""
def _validate_crlf(self, msg):
lines = iter(msg.splitlines(True))
for header in lines:
if header == CRLF:
break
assert header.endswith(CRLF), repr(header)
else:
assert 0, 'CRLF between headers and body not found in %r' % msg
body = ''.join(lines)
assert CRLF not in body
return body
def test_CRLF_headers_only(self, httpbin):
r = http('--headers', 'GET', httpbin.url + '/get')
body = self._validate_crlf(r)
assert not body, 'Garbage after headers: %r' % r
def test_CRLF_ugly_response(self, httpbin):
r = http('--pretty=none', 'GET', httpbin.url + '/get')
self._validate_crlf(r)
def test_CRLF_formatted_response(self, httpbin):
r = http('--pretty=format', 'GET', httpbin.url + '/get')
assert r.exit_status == ExitStatus.OK
self._validate_crlf(r)
def test_CRLF_ugly_request(self, httpbin):
r = http('--pretty=none', '--print=HB', 'GET', httpbin.url + '/get')
self._validate_crlf(r)
def test_CRLF_formatted_request(self, httpbin):
r = http('--pretty=format', '--print=HB', 'GET', httpbin.url + '/get')
self._validate_crlf(r)
| alfcrisci/httpie | tests/test_output.py | Python | bsd-3-clause | 4,568 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
if frappe.db.exists('Module Def', 'Fleet Management'):
frappe.db.sql("""delete from `tabModule Def`
where module_name = 'Fleet Management'""") | manassolanki/erpnext | erpnext/patches/v7_2/delete_fleet_management_module_def.py | Python | gpl-3.0 | 346 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
#-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustäbel <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
"""Read from and write to tar format archives.
"""
__version__ = "$Revision: 70528 $"
# $Source$
version = "0.9.0"
__author__ = "Lars Gustäbel ([email protected])"
__date__ = "$Date: 2009-03-22 17:29:48 -0400 (Sun, 22 Mar 2009) $"
__cvsid__ = "$Id: tarfile.py 70528 2009-03-22 21:29:48Z lars.gustaebel $"
__credits__ = "Gustavo Niemeyer, Niels Gustäbel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import shutil
import stat
import errno
import time
import struct
import copy
import re
import operator
if sys.platform == 'mac':
# This module needs work for MacOS9, especially in the area of pathname
# handling. In many places it is assumed a simple substitution of / by the
# local os.path.sep is good enough to convert pathnames, but this does not
# work with the mac rooted:path:name versus :nonrooted:path:name syntax
raise ImportError, "tarfile does not work for platform==mac"
try:
import grp, pwd
except ImportError:
grp = pwd = None
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = "\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = "ustar \0" # magic gnu tar string
POSIX_MAGIC = "ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = "0" # regular file
AREGTYPE = "\0" # regular file
LNKTYPE = "1" # link (inside tarfile)
SYMTYPE = "2" # symbolic link
CHRTYPE = "3" # character special device
BLKTYPE = "4" # block special device
DIRTYPE = "5" # directory
FIFOTYPE = "6" # fifo special device
CONTTYPE = "7" # contiguous file
GNUTYPE_LONGNAME = "L" # GNU tar longname
GNUTYPE_LONGLINK = "K" # GNU tar longlink
GNUTYPE_SPARSE = "S" # GNU tar sparse file
XHDTYPE = "x" # POSIX.1-2001 extended header
XGLTYPE = "g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = "X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0120000 # symbolic link
S_IFREG = 0100000 # regular file
S_IFBLK = 0060000 # block device
S_IFDIR = 0040000 # directory
S_IFCHR = 0020000 # character device
S_IFIFO = 0010000 # fifo
TSUID = 04000 # set UID on execution
TSGID = 02000 # set GID on execution
TSVTX = 01000 # reserved
TUREAD = 0400 # read by owner
TUWRITE = 0200 # write by owner
TUEXEC = 0100 # execute/search by owner
TGREAD = 0040 # read by group
TGWRITE = 0020 # write by group
TGEXEC = 0010 # execute/search by group
TOREAD = 0004 # read by other
TOWRITE = 0002 # write by other
TOEXEC = 0001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
ENCODING = sys.getfilesystemencoding()
if ENCODING is None:
ENCODING = sys.getdefaultencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length):
"""Convert a python string to a null-terminated string buffer.
"""
return s[:length] + (length - len(s)) * NUL
def nts(s):
"""Convert a null-terminated string field to a python string.
"""
# Use the string up to the first null char.
p = s.find("\0")
if p == -1:
return s
return s[:p]
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0200):
try:
n = int(nts(s) or "0", 8)
except ValueError:
raise HeaderError("invalid header")
else:
n = 0L
for i in xrange(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = "%0*o" % (digits - 1, n) + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = ""
for i in xrange(digits - 1):
s = chr(n & 0377) + s
n >>= 8
s = chr(0200) + s
return s
def uts(s, encoding, errors):
"""Convert a unicode object to a string.
"""
if errors == "utf-8":
# An extra error handler similar to the -o invalid=UTF-8 option
# in POSIX.1-2001. Replace untranslatable characters with their
# UTF-8 representation.
try:
return s.encode(encoding, "strict")
except UnicodeEncodeError:
x = []
for c in s:
try:
x.append(c.encode(encoding, "strict"))
except UnicodeEncodeError:
x.append(c.encode("utf8"))
return "".join(x)
else:
return s.encode(encoding, errors)
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in xrange(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
if os.sep != "/":
normpath = lambda path: os.path.normpath(path).replace(os.sep, "/")
else:
normpath = os.path.normpath
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Exception for invalid headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ""
self.pos = 0L
self.closed = False
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32("") & 0xffffffffL
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = ""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", long(time.time()))
self.__write("\037\213\010\010%s\002\377" % timestamp)
if self.name.endswith(".gz"):
self.name = self.name[:-3]
self.__write(self.name + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc) & 0xffffffffL
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = ""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffffL))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFFL))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = ""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != "\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != "\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in xrange(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
t.append(buf)
c += len(buf)
t = "".join(t)
self.dbuf = t[size:]
return t[:size]
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
t = [self.buf]
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
t.append(buf)
c += len(buf)
t = "".join(t)
self.buf = t[size:]
return t[:size]
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith("\037\213\010"):
return "gz"
if self.buf.startswith("BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = ""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
b = [self.buf]
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
try:
data = self.bz2obj.decompress(raw)
except EOFError:
break
b.append(data)
x += len(data)
self.buf = "".join(b)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, sparse=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.sparse = sparse
self.position = 0
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
if self.sparse is None:
return self.readnormal(size)
else:
return self.readsparse(size)
def readnormal(self, size):
"""Read operation for regular files.
"""
self.fileobj.seek(self.offset + self.position)
self.position += size
return self.fileobj.read(size)
def readsparse(self, size):
"""Read operation for sparse files.
"""
data = []
while size > 0:
buf = self.readsparsesection(size)
if not buf:
break
size -= len(buf)
data.append(buf)
return "".join(data)
def readsparsesection(self, size):
"""Read a single section of a sparse file.
"""
section = self.sparse.find(self.position)
if section is None:
return ""
size = min(size, section.offset + section.size - self.position)
if isinstance(section, _data):
realpos = section.realpos + self.position - section.offset
self.fileobj.seek(self.offset + realpos)
self.position += size
return self.fileobj.read(size)
else:
self.position += size
return NUL * size
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
getattr(tarinfo, "sparse", None))
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = ""
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = ""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = ""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if "\n" in self.buffer:
pos = self.buffer.find("\n") + 1
else:
buffers = [self.buffer]
while True:
buf = self.fileobj.read(self.blocksize)
buffers.append(buf)
if not buf or "\n" in buf:
self.buffer = "".join(buffers)
pos = self.buffer.find("\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = ""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "root" # user name
self.gname = "root" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self, encoding, errors):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": normpath(self.name),
"mode": self.mode & 07777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": normpath(self.linkname) if self.linkname else "",
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
for key in ("name", "linkname", "uname", "gname"):
if type(info[key]) is unicode:
info[key] = info[key].encode(encoding, errors)
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="strict"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info(encoding, errors)
if format == USTAR_FORMAT:
return self.create_ustar_header(info)
elif format == GNU_FORMAT:
return self.create_gnu_header(info)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding, errors)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT)
def create_gnu_header(self, info):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = ""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME)
return buf + self._create_header(info, GNU_FORMAT)
def create_pax_header(self, info, encoding, errors):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
val = info[name].decode(encoding, errors)
# Try to encode the string as ASCII.
try:
val.encode("ascii")
except UnicodeEncodeError:
pax_headers[hname] = val
continue
if len(info[name]) > length:
pax_headers[hname] = val
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = unicode(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers)
else:
buf = ""
return buf + self._create_header(info, USTAR_FORMAT)
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100),
itn(info.get("mode", 0) & 07777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100),
stn(info.get("magic", POSIX_MAGIC), 8),
stn(info.get("uname", "root"), 32),
stn(info.get("gname", "root"), 32),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155)
]
buf = struct.pack("%ds" % BLOCKSIZE, "".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + "%06o\0" % chksum + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name += NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
"""Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.
"""
records = []
for keyword, value in pax_headers.iteritems():
keyword = keyword.encode("utf8")
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records.append("%d %s=%s\n" % (p, keyword, value))
records = "".join(records)
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT) + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf):
"""Construct a TarInfo object from a 512 byte string buffer.
"""
if len(buf) != BLOCKSIZE:
raise HeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise HeaderError("empty header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise HeaderError("bad checksum")
obj = cls()
obj.buf = buf
obj.name = nts(buf[0:100])
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257])
obj.uname = nts(buf[265:297])
obj.gname = nts(buf[297:329])
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500])
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
if not buf:
return
obj = cls.frombuf(buf)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
next = self.fromtarfile(tarfile)
if next is None:
raise HeaderError("missing subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
buf = self.buf
sp = _ringbuffer()
pos = 386
lastpos = 0L
realpos = 0L
# There are 4 possible sparse structs in the
# first header.
for i in xrange(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[482])
origsize = nti(buf[483:495])
# If the isextended flag is given,
# there are extra headers to process.
while isextended == 1:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in xrange(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset > lastpos:
sp.append(_hole(lastpos, offset - lastpos))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = offset + numbytes
pos += 24
isextended = ord(buf[504])
if lastpos < origsize:
sp.append(_hole(lastpos, origsize - lastpos))
self.sparse = sp
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2001.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(r"(\d+) ([^=]+)=", re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
keyword = keyword.decode("utf8")
value = value.decode("utf8")
pax_headers[keyword] = value
pos += length
# Fetch the next header.
next = self.fromtarfile(tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
if next is None:
raise HeaderError("missing subsequent header")
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.iteritems():
if keyword not in PAX_FIELDS:
continue
if keyword == "path":
value = value.rstrip("/")
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
else:
value = uts(value, encoding, errors)
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.type == GNUTYPE_SPARSE
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 0 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors=None, pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
if errors is not None:
self.errors = errors
elif mode == "r":
self.errors = "utf-8"
else:
self.errors = "strict"
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
self.firstmember = None
while True:
if self.next() is None:
if self.offset > 0:
self.fileobj.seek(- BLOCKSIZE, 1)
break
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
def _getposix(self):
return self.format == USTAR_FORMAT
def _setposix(self, value):
import warnings
warnings.warn("use the format attribute instead", DeprecationWarning)
if value:
self.format = USTAR_FORMAT
else:
self.format = GNU_FORMAT
posix = property(_getposix, _setposix)
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError), e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
t = cls(name, filemode,
_Stream(name, filemode, comptype, fileobj, bufsize),
**kwargs)
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
if fileobj is None:
fileobj = bltn_open(name, mode + "b")
try:
t = cls.taropen(name, mode,
gzip.GzipFile(name, mode, compresslevel, fileobj),
**kwargs)
except IOError:
raise ReadError("not a gzip file")
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
arcname = normpath(arcname)
drv, arcname = os.path.splitdrive(arcname)
while arcname[0:1] == "/":
arcname = arcname[1:]
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if stat.S_ISREG(stmd):
tarinfo.size = statres.st_size
else:
tarinfo.size = 0L
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print filemode(tarinfo.mode),
print "%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid),
if tarinfo.ischr() or tarinfo.isblk():
print "%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)),
else:
print "%10d" % tarinfo.size,
print "%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6],
print tarinfo.name + ("/" if tarinfo.isdir() else ""),
if verbose:
if tarinfo.issym():
print "->", tarinfo.linkname,
if tarinfo.islnk():
print "link to", tarinfo.linkname,
print
def add(self, name, arcname=None, recursive=True, exclude=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None and exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
# Special case: The user wants to add the current
# working directory.
if name == ".":
if recursive:
if arcname == ".":
arcname = ""
for f in os.listdir(name):
self.add(f, os.path.join(arcname, f), recursive, exclude)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f), recursive, exclude)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0700
self.extract(tarinfo, path)
# Reverse sort directories.
directories.sort(key=operator.attrgetter('name'))
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path=""):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'.
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
except EnvironmentError, e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError, e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._getmember(tarinfo.linkname,
tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
if targetpath[-1:] == "/":
targetpath = targetpath[:-1]
targetpath = os.path.normpath(targetpath)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0700)
except EnvironmentError, e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.extractfile(tarinfo)
target = bltn_open(targetpath, "wb")
copyfileobj(source, target)
source.close()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
linkpath = tarinfo.linkname
try:
if tarinfo.issym():
os.symlink(linkpath, targetpath)
else:
# See extract().
os.link(tarinfo._link_target, targetpath)
except AttributeError:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
linkpath)
linkpath = normpath(linkpath)
try:
self._extract_member(self.getmember(linkpath), targetpath)
except (EnvironmentError, KeyError), e:
linkpath = os.path.normpath(linkpath)
try:
shutil.copy2(linkpath, targetpath)
except EnvironmentError, e:
raise IOError("link could not be created")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
try:
g = grp.getgrgid(tarinfo.gid)[2]
except KeyError:
g = os.getgid()
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
try:
u = pwd.getpwuid(tarinfo.uid)[2]
except KeyError:
u = os.getuid()
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError, e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError, e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError, e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
if tarinfo is None:
return
self.members.append(tarinfo)
except HeaderError, e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
else:
if self.offset == 0:
raise ReadError(str(e))
return None
break
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
if tarinfo is None:
end = len(members)
else:
end = members.index(tarinfo)
for i in xrange(end - 1, -1, -1):
if name == members[i].name:
return members[i]
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print >> sys.stderr, msg
# class TarFile
class TarIter:
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def next(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
# Helper classes for sparse file support
class _section:
"""Base class for _data and _hole.
"""
def __init__(self, offset, size):
self.offset = offset
self.size = size
def __contains__(self, offset):
return self.offset <= offset < self.offset + self.size
class _data(_section):
"""Represent a data section in a sparse file.
"""
def __init__(self, offset, size, realpos):
_section.__init__(self, offset, size)
self.realpos = realpos
class _hole(_section):
"""Represent a hole section in a sparse file.
"""
pass
class _ringbuffer(list):
"""Ringbuffer class which increases performance
over a regular list.
"""
def __init__(self):
self.idx = 0
def find(self, offset):
idx = self.idx
while True:
item = self[idx]
if offset in item:
break
idx += 1
if idx == len(self):
idx = 0
if idx == self.idx:
# End of File
return None
self.idx = idx
return item
#---------------------------------------------
# zipfile compatible TarFile class
#---------------------------------------------
TAR_PLAIN = 0 # zipfile.ZIP_STORED
TAR_GZIPPED = 8 # zipfile.ZIP_DEFLATED
class TarFileCompat:
"""TarFile class compatible with standard module zipfile's
ZipFile class.
"""
def __init__(self, file, mode="r", compression=TAR_PLAIN):
from warnings import warnpy3k
warnpy3k("the TarFileCompat class has been removed in Python 3.0",
stacklevel=2)
if compression == TAR_PLAIN:
self.tarfile = TarFile.taropen(file, mode)
elif compression == TAR_GZIPPED:
self.tarfile = TarFile.gzopen(file, mode)
else:
raise ValueError("unknown compression constant")
if mode[0:1] == "r":
members = self.tarfile.getmembers()
for m in members:
m.filename = m.name
m.file_size = m.size
m.date_time = time.gmtime(m.mtime)[:6]
def namelist(self):
return map(lambda m: m.name, self.infolist())
def infolist(self):
return filter(lambda m: m.type in REGULAR_TYPES,
self.tarfile.getmembers())
def printdir(self):
self.tarfile.list()
def testzip(self):
return
def getinfo(self, name):
return self.tarfile.getmember(name)
def read(self, name):
return self.tarfile.extractfile(self.tarfile.getmember(name)).read()
def write(self, filename, arcname=None, compress_type=None):
self.tarfile.add(filename, arcname)
def writestr(self, zinfo, bytes):
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import calendar
tinfo = TarInfo(zinfo.filename)
tinfo.size = len(bytes)
tinfo.mtime = calendar.timegm(zinfo.date_time)
self.tarfile.addfile(tinfo, StringIO(bytes))
def close(self):
self.tarfile.close()
#class TarFileCompat
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| leighpauls/k2cro4 | third_party/python_26/Lib/tarfile.py | Python | bsd-3-clause | 86,614 |
#!/usr/bin/env python
#
# Convert image to Javascript compatible base64 Data URI
# Copyright 2011 Joel Martin
# Licensed under MPL 2.0 (see docs/LICENSE.MPL-2.0)
#
import sys, base64
try:
from PIL import Image
except:
print "python PIL module required (python-imaging package)"
sys.exit(1)
if len(sys.argv) < 3:
print "Usage: %s IMAGE JS_VARIABLE" % sys.argv[0]
sys.exit(1)
fname = sys.argv[1]
var = sys.argv[2]
ext = fname.lower().split('.')[-1]
if ext == "png": mime = "image/png"
elif ext in ["jpg", "jpeg"]: mime = "image/jpeg"
elif ext == "gif": mime = "image/gif"
else:
print "Only PNG, JPEG and GIF images are supported"
sys.exit(1)
uri = "data:%s;base64," % mime
im = Image.open(fname)
w, h = im.size
raw = open(fname).read()
print '%s = {"width": %s, "height": %s, "data": "%s%s"};' % (
var, w, h, uri, base64.b64encode(raw))
| SoftwareKing/zstack-dashboard | zstack_dashboard/static/templates/console/utils/img2js.py | Python | apache-2.0 | 908 |
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{jelly} object serialization.
"""
import datetime
try:
import decimal
except ImportError:
decimal = None
from twisted.spread import jelly, pb
from twisted.python.compat import set, frozenset
from twisted.trial import unittest
class TestNode(object, jelly.Jellyable):
"""
An object to test jellyfying of new style class instances.
"""
classAttr = 4
def __init__(self, parent=None):
if parent:
self.id = parent.id + 1
parent.children.append(self)
else:
self.id = 1
self.parent = parent
self.children = []
class A:
"""
Dummy class.
"""
def amethod(self):
"""
Method tp be used in serialization tests.
"""
def afunc(self):
"""
A dummy function to test function serialization.
"""
class B:
"""
Dummy class.
"""
def bmethod(self):
"""
Method to be used in serialization tests.
"""
class C:
"""
Dummy class.
"""
def cmethod(self):
"""
Method to be used in serialization tests.
"""
class D(object):
"""
Dummy new-style class.
"""
class E(object):
"""
Dummy new-style class with slots.
"""
__slots__ = ("x", "y")
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def __getstate__(self):
return {"x" : self.x, "y" : self.y}
def __setstate__(self, state):
self.x = state["x"]
self.y = state["y"]
class SimpleJellyTest:
def __init__(self, x, y):
self.x = x
self.y = y
def isTheSameAs(self, other):
return self.__dict__ == other.__dict__
class JellyTestCase(unittest.TestCase):
"""
Testcases for L{jelly} module serialization.
@cvar decimalData: serialized version of decimal data, to be used in tests.
@type decimalData: C{list}
"""
def _testSecurity(self, inputList, atom):
"""
Helper test method to test security options for a type.
@param inputList: a sample input for the type.
@param inputList: C{list}
@param atom: atom identifier for the type.
@type atom: C{str}
"""
c = jelly.jelly(inputList)
taster = jelly.SecurityOptions()
taster.allowBasicTypes()
# By default, it should succeed
jelly.unjelly(c, taster)
taster.allowedTypes.pop(atom)
# But it should raise an exception when disallowed
self.assertRaises(jelly.InsecureJelly, jelly.unjelly, c, taster)
def test_methodSelfIdentity(self):
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
im_ = jelly.unjelly(jelly.jelly(b)).a.bmethod
self.assertEquals(im_.im_class, im_.im_self.__class__)
def test_methodsNotSelfIdentity(self):
"""
If a class change after an instance has been created, L{jelly.unjelly}
shoud raise a C{TypeError} when trying to unjelly the instance.
"""
a = A()
b = B()
c = C()
a.bmethod = c.cmethod
b.a = a
savecmethod = C.cmethod
del C.cmethod
try:
self.assertRaises(TypeError, jelly.unjelly, jelly.jelly(b))
finally:
C.cmethod = savecmethod
def test_newStyle(self):
n = D()
n.x = 1
n2 = D()
n.n2 = n2
n.n3 = n2
c = jelly.jelly(n)
m = jelly.unjelly(c)
self.assertIsInstance(m, D)
self.assertIdentical(m.n2, m.n3)
def test_newStyleWithSlots(self):
"""
A class defined with I{slots} can be jellied and unjellied with the
values for its attributes preserved.
"""
n = E()
n.x = 1
c = jelly.jelly(n)
m = jelly.unjelly(c)
self.assertIsInstance(m, E)
self.assertEquals(n.x, 1)
def test_typeOldStyle(self):
"""
Test that an old style class type can be jellied and unjellied
to the original type.
"""
t = [C]
r = jelly.unjelly(jelly.jelly(t))
self.assertEquals(t, r)
def test_typeNewStyle(self):
"""
Test that a new style class type can be jellied and unjellied
to the original type.
"""
t = [D]
r = jelly.unjelly(jelly.jelly(t))
self.assertEquals(t, r)
def test_typeBuiltin(self):
"""
Test that a builtin type can be jellied and unjellied to the original
type.
"""
t = [str]
r = jelly.unjelly(jelly.jelly(t))
self.assertEquals(t, r)
def test_dateTime(self):
dtn = datetime.datetime.now()
dtd = datetime.datetime.now() - dtn
input = [dtn, dtd]
c = jelly.jelly(input)
output = jelly.unjelly(c)
self.assertEquals(input, output)
self.assertNotIdentical(input, output)
def test_decimal(self):
"""
Jellying L{decimal.Decimal} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
inputList = [decimal.Decimal('9.95'),
decimal.Decimal(0),
decimal.Decimal(123456),
decimal.Decimal('-78.901')]
c = jelly.jelly(inputList)
output = jelly.unjelly(c)
self.assertEquals(inputList, output)
self.assertNotIdentical(inputList, output)
decimalData = ['list', ['decimal', 995, -2], ['decimal', 0, 0],
['decimal', 123456, 0], ['decimal', -78901, -3]]
def test_decimalUnjelly(self):
"""
Unjellying the s-expressions produced by jelly for L{decimal.Decimal}
instances should result in L{decimal.Decimal} instances with the values
represented by the s-expressions.
This test also verifies that C{self.decimalData} contains valid jellied
data. This is important since L{test_decimalMissing} re-uses
C{self.decimalData} and is expected to be unable to produce
L{decimal.Decimal} instances even though the s-expression correctly
represents a list of them.
"""
expected = [decimal.Decimal('9.95'),
decimal.Decimal(0),
decimal.Decimal(123456),
decimal.Decimal('-78.901')]
output = jelly.unjelly(self.decimalData)
self.assertEquals(output, expected)
def test_decimalMissing(self):
"""
If decimal is unavailable on the unjelly side, L{jelly.unjelly} should
gracefully return L{jelly.Unpersistable} objects.
"""
self.patch(jelly, 'decimal', None)
output = jelly.unjelly(self.decimalData)
self.assertEquals(len(output), 4)
for i in range(4):
self.assertIsInstance(output[i], jelly.Unpersistable)
self.assertEquals(output[0].reason,
"Could not unpersist decimal: 9.95")
self.assertEquals(output[1].reason,
"Could not unpersist decimal: 0")
self.assertEquals(output[2].reason,
"Could not unpersist decimal: 123456")
self.assertEquals(output[3].reason,
"Could not unpersist decimal: -78.901")
def test_decimalSecurity(self):
"""
By default, C{decimal} objects should be allowed by
L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise
L{jelly.InsecureJelly} when trying to unjelly it.
"""
inputList = [decimal.Decimal('9.95')]
self._testSecurity(inputList, "decimal")
if decimal is None:
skipReason = "decimal not available"
test_decimal.skip = skipReason
test_decimalUnjelly.skip = skipReason
test_decimalSecurity.skip = skipReason
def test_set(self):
"""
Jellying C{set} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
inputList = [set([1, 2, 3])]
output = jelly.unjelly(jelly.jelly(inputList))
self.assertEquals(inputList, output)
self.assertNotIdentical(inputList, output)
def test_frozenset(self):
"""
Jellying C{frozenset} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
inputList = [frozenset([1, 2, 3])]
output = jelly.unjelly(jelly.jelly(inputList))
self.assertEquals(inputList, output)
self.assertNotIdentical(inputList, output)
def test_setSecurity(self):
"""
By default, C{set} objects should be allowed by
L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise
L{jelly.InsecureJelly} when trying to unjelly it.
"""
inputList = [set([1, 2, 3])]
self._testSecurity(inputList, "set")
def test_frozensetSecurity(self):
"""
By default, C{frozenset} objects should be allowed by
L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise
L{jelly.InsecureJelly} when trying to unjelly it.
"""
inputList = [frozenset([1, 2, 3])]
self._testSecurity(inputList, "frozenset")
def test_oldSets(self):
"""
Test jellying C{sets.Set}: it should serialize to the same thing as
C{set} jelly, and be unjellied as C{set} if available.
"""
inputList = [jelly._sets.Set([1, 2, 3])]
inputJelly = jelly.jelly(inputList)
self.assertEquals(inputJelly, jelly.jelly([set([1, 2, 3])]))
output = jelly.unjelly(inputJelly)
# Even if the class is different, it should coerce to the same list
self.assertEquals(list(inputList[0]), list(output[0]))
if set is jelly._sets.Set:
self.assertIsInstance(output[0], jelly._sets.Set)
else:
self.assertIsInstance(output[0], set)
def test_oldImmutableSets(self):
"""
Test jellying C{sets.ImmutableSet}: it should serialize to the same
thing as C{frozenset} jelly, and be unjellied as C{frozenset} if
available.
"""
inputList = [jelly._sets.ImmutableSet([1, 2, 3])]
inputJelly = jelly.jelly(inputList)
self.assertEquals(inputJelly, jelly.jelly([frozenset([1, 2, 3])]))
output = jelly.unjelly(inputJelly)
# Even if the class is different, it should coerce to the same list
self.assertEquals(list(inputList[0]), list(output[0]))
if frozenset is jelly._sets.ImmutableSet:
self.assertIsInstance(output[0], jelly._sets.ImmutableSet)
else:
self.assertIsInstance(output[0], frozenset)
def test_simple(self):
"""
Simplest test case.
"""
self.failUnless(SimpleJellyTest('a', 'b').isTheSameAs(
SimpleJellyTest('a', 'b')))
a = SimpleJellyTest(1, 2)
cereal = jelly.jelly(a)
b = jelly.unjelly(cereal)
self.failUnless(a.isTheSameAs(b))
def test_identity(self):
"""
Test to make sure that objects retain identity properly.
"""
x = []
y = (x)
x.append(y)
x.append(y)
self.assertIdentical(x[0], x[1])
self.assertIdentical(x[0][0], x)
s = jelly.jelly(x)
z = jelly.unjelly(s)
self.assertIdentical(z[0], z[1])
self.assertIdentical(z[0][0], z)
def test_unicode(self):
x = unicode('blah')
y = jelly.unjelly(jelly.jelly(x))
self.assertEquals(x, y)
self.assertEquals(type(x), type(y))
def test_stressReferences(self):
reref = []
toplevelTuple = ({'list': reref}, reref)
reref.append(toplevelTuple)
s = jelly.jelly(toplevelTuple)
z = jelly.unjelly(s)
self.assertIdentical(z[0]['list'], z[1])
self.assertIdentical(z[0]['list'][0], z)
def test_moreReferences(self):
a = []
t = (a,)
a.append((t,))
s = jelly.jelly(t)
z = jelly.unjelly(s)
self.assertIdentical(z[0][0][0], z)
def test_typeSecurity(self):
"""
Test for type-level security of serialization.
"""
taster = jelly.SecurityOptions()
dct = jelly.jelly({})
self.assertRaises(jelly.InsecureJelly, jelly.unjelly, dct, taster)
def test_newStyleClasses(self):
j = jelly.jelly(D)
uj = jelly.unjelly(D)
self.assertIdentical(D, uj)
def test_lotsaTypes(self):
"""
Test for all types currently supported in jelly
"""
a = A()
jelly.unjelly(jelly.jelly(a))
jelly.unjelly(jelly.jelly(a.amethod))
items = [afunc, [1, 2, 3], not bool(1), bool(1), 'test', 20.3,
(1, 2, 3), None, A, unittest, {'a': 1}, A.amethod]
for i in items:
self.assertEquals(i, jelly.unjelly(jelly.jelly(i)))
def test_setState(self):
global TupleState
class TupleState:
def __init__(self, other):
self.other = other
def __getstate__(self):
return (self.other,)
def __setstate__(self, state):
self.other = state[0]
def __hash__(self):
return hash(self.other)
a = A()
t1 = TupleState(a)
t2 = TupleState(a)
t3 = TupleState((t1, t2))
d = {t1: t1, t2: t2, t3: t3, "t3": t3}
t3prime = jelly.unjelly(jelly.jelly(d))["t3"]
self.assertIdentical(t3prime.other[0].other, t3prime.other[1].other)
def test_classSecurity(self):
"""
Test for class-level security of serialization.
"""
taster = jelly.SecurityOptions()
taster.allowInstancesOf(A, B)
a = A()
b = B()
c = C()
# add a little complexity to the data
a.b = b
a.c = c
# and a backreference
a.x = b
b.c = c
# first, a friendly insecure serialization
friendly = jelly.jelly(a, taster)
x = jelly.unjelly(friendly, taster)
self.assertIsInstance(x.c, jelly.Unpersistable)
# now, a malicious one
mean = jelly.jelly(a)
self.assertRaises(jelly.InsecureJelly, jelly.unjelly, mean, taster)
self.assertIdentical(x.x, x.b, "Identity mismatch")
# test class serialization
friendly = jelly.jelly(A, taster)
x = jelly.unjelly(friendly, taster)
self.assertIdentical(x, A, "A came back: %s" % x)
def test_unjellyable(self):
"""
Test that if Unjellyable is used to deserialize a jellied object,
state comes out right.
"""
class JellyableTestClass(jelly.Jellyable):
pass
jelly.setUnjellyableForClass(JellyableTestClass, jelly.Unjellyable)
input = JellyableTestClass()
input.attribute = 'value'
output = jelly.unjelly(jelly.jelly(input))
self.assertEquals(output.attribute, 'value')
self.assertIsInstance(output, jelly.Unjellyable)
def test_persistentStorage(self):
perst = [{}, 1]
def persistentStore(obj, jel, perst = perst):
perst[1] = perst[1] + 1
perst[0][perst[1]] = obj
return str(perst[1])
def persistentLoad(pidstr, unj, perst = perst):
pid = int(pidstr)
return perst[0][pid]
a = SimpleJellyTest(1, 2)
b = SimpleJellyTest(3, 4)
c = SimpleJellyTest(5, 6)
a.b = b
a.c = c
c.b = b
jel = jelly.jelly(a, persistentStore = persistentStore)
x = jelly.unjelly(jel, persistentLoad = persistentLoad)
self.assertIdentical(x.b, x.c.b)
self.failUnless(perst[0], "persistentStore was not called.")
self.assertIdentical(x.b, a.b, "Persistent storage identity failure.")
def test_newStyleClassesAttributes(self):
n = TestNode()
n1 = TestNode(n)
n11 = TestNode(n1)
n2 = TestNode(n)
# Jelly it
jel = jelly.jelly(n)
m = jelly.unjelly(jel)
# Check that it has been restored ok
self._check_newstyle(n, m)
def _check_newstyle(self, a, b):
self.assertEqual(a.id, b.id)
self.assertEqual(a.classAttr, 4)
self.assertEqual(b.classAttr, 4)
self.assertEqual(len(a.children), len(b.children))
for x, y in zip(a.children, b.children):
self._check_newstyle(x, y)
class ClassA(pb.Copyable, pb.RemoteCopy):
def __init__(self):
self.ref = ClassB(self)
class ClassB(pb.Copyable, pb.RemoteCopy):
def __init__(self, ref):
self.ref = ref
class CircularReferenceTestCase(unittest.TestCase):
"""
Tests for circular references handling in the jelly/unjelly process.
"""
def test_simpleCircle(self):
jelly.setUnjellyableForClass(ClassA, ClassA)
jelly.setUnjellyableForClass(ClassB, ClassB)
a = jelly.unjelly(jelly.jelly(ClassA()))
self.assertIdentical(a.ref.ref, a,
"Identity not preserved in circular reference")
def test_circleWithInvoker(self):
class DummyInvokerClass:
pass
dummyInvoker = DummyInvokerClass()
dummyInvoker.serializingPerspective = None
a0 = ClassA()
jelly.setUnjellyableForClass(ClassA, ClassA)
jelly.setUnjellyableForClass(ClassB, ClassB)
j = jelly.jelly(a0, invoker=dummyInvoker)
a1 = jelly.unjelly(j)
self.failUnlessIdentical(a1.ref.ref, a1,
"Identity not preserved in circular reference")
def test_set(self):
"""
Check that a C{set} can contain a circular reference and be serialized
and unserialized without losing the reference.
"""
s = set()
a = SimpleJellyTest(s, None)
s.add(a)
res = jelly.unjelly(jelly.jelly(a))
self.assertIsInstance(res.x, set)
self.assertEquals(list(res.x), [res])
def test_frozenset(self):
"""
Check that a C{frozenset} can contain a circular reference and be
serializeserialized without losing the reference.
"""
a = SimpleJellyTest(None, None)
s = frozenset([a])
a.x = s
res = jelly.unjelly(jelly.jelly(a))
self.assertIsInstance(res.x, frozenset)
self.assertEquals(list(res.x), [res])
| eunchong/build | third_party/twisted_10_2/twisted/test/test_jelly.py | Python | bsd-3-clause | 18,738 |
# Authors: Alexandre Gramfort <[email protected]>
# Vincent Michel <[email protected]>
# Gilles Louppe <[email protected]>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import warnings
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..utils.metaestimators import if_delegate_has_method
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import check_cv
from ..cross_validation import _safe_split, _score
from ..metrics.scorer import check_scoring
from .base import SelectorMixin
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that ``ranking_[i]`` corresponds to the
ranking position of the i-th feature. Selected (i.e., estimated
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params=None, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
@property
def _estimator_type(self):
return self.estimator._estimator_type
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
return self._fit(X, y)
def _fit(self, X, y, step_score=None):
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(max(1, self.step * n_features))
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the value "
"is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
if step_score:
self.scores_ = []
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
if self.estimator_params:
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
# Get coefs
if hasattr(estimator, 'coef_'):
coefs = estimator.coef_
elif hasattr(estimator, 'feature_importances_'):
coefs = estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
# Get ranks
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
# Compute step score on the previous selection iteration
# because 'estimator' must use features
# that have not been eliminated yet
if step_score:
self.scores_.append(step_score(estimator, features))
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
features = np.arange(n_features)[support_]
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, features], y)
# Compute step score when only n_features_to_select features left
if step_score:
self.scores_.append(step_score(self.estimator_, features))
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
return self.estimator_.predict_log_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Read more in the :ref:`User Guide <rfe>`.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
This attribute is deprecated as of version 0.16 and will be removed in
0.18. Use estimator initialisation or set_params method instead.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
``grid_scores_[i]`` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Notes
-----
The size of ``grid_scores_`` is equal to ceil((n_features - 1) / step) + 1,
where step is the number of features removed at each iteration.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params=None, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
if self.estimator_params is not None:
warnings.warn("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. "
"The parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.", DeprecationWarning)
# Initialization
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
n_features = X.shape[1]
n_features_to_select = 1
# Determine the number of subsets of features
scores = []
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
rfe._fit(X_train, y_train, lambda estimator, features:
_score(estimator, X_test[:, features], y_test, scorer))
scores.append(np.array(rfe.scores_[::-1]).reshape(1, -1))
scores = np.sum(np.concatenate(scores, 0), 0)
# The index in 'scores' when 'n_features' features are selected
n_feature_index = np.ceil((n_features - n_features_to_select) /
float(self.step))
n_features_to_select = max(n_features_to_select,
n_features - ((n_feature_index -
np.argmax(scores)) *
self.step))
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=n_features_to_select,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
if self.estimator_params:
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
| beepee14/scikit-learn | sklearn/feature_selection/rfe.py | Python | bsd-3-clause | 17,509 |
[foo, bar] = (1, 2)
print(foo)
# <ref>
| asedunov/intellij-community | python/testData/resolve/ListAssignment.py | Python | apache-2.0 | 44 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class crm_lead(osv.osv):
_inherit = 'crm.lead'
def get_interested_action(self, cr, uid, interested, context=None):
try:
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'crm_lead_channel_interested_act')
except ValueError:
raise osv.except_osv(_('Error!'), _("The CRM Channel Interested Action is missing"))
action = self.pool[model].read(cr, uid, [action_id], context=context)[0]
action_context = eval(action['context'])
action_context['interested'] = interested
action['context'] = str(action_context)
return action
def case_interested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, True, context=context)
def case_disinterested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, False, context=context)
def assign_salesman_of_assigned_partner(self, cr, uid, ids, context=None):
salesmans_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
if (lead.stage_id.probability > 0 and lead.stage_id.probability < 100) or lead.stage_id.sequence == 1:
if lead.partner_assigned_id and lead.partner_assigned_id.user_id and lead.partner_assigned_id.user_id != lead.user_id:
salesman_id = lead.partner_assigned_id.user_id.id
if salesmans_leads.get(salesman_id):
salesmans_leads[salesman_id].append(lead.id)
else:
salesmans_leads[salesman_id] = [lead.id]
for salesman_id, lead_ids in salesmans_leads.items():
salesteam_id = self.on_change_user(cr, uid, lead_ids, salesman_id, context=None)['value'].get('section_id')
self.write(cr, uid, lead_ids, {'user_id': salesman_id, 'section_id': salesteam_id}, context=context)
| diogocs1/comps | web/addons/crm_partner_assign/crm_lead.py | Python | apache-2.0 | 2,985 |
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
mass-rename: update source files (gyp lists, #includes) to reflect
a rename. Expects "git diff --cached -M" to list a bunch of renames.
To use:
1) git mv foo1 bar1; git mv foo2 bar2; etc.
2) *without committing*, ./tools/git/mass-rename.py
3) look at git diff (without --cached) to see what the damage is
"""
import os
import subprocess
import sys
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
def main():
popen = subprocess.Popen('git diff --cached --raw -M',
shell=True, stdout=subprocess.PIPE)
out, _ = popen.communicate()
if popen.returncode != 0:
return 1
for line in out.splitlines():
parts = line.split('\t')
if len(parts) != 3:
print 'Skipping: %s -- not a rename?' % parts
continue
attrs, fro, to = parts
if attrs.split()[4].startswith('R'):
subprocess.check_call([
sys.executable,
os.path.join(BASE_DIR, 'move_source_file.py'),
'--already_moved',
'--no_error_for_non_source_file',
fro, to])
else:
print 'Skipping: %s -- not a rename?' % fro
return 0
if __name__ == '__main__':
sys.exit(main())
| GeyerA/android_external_chromium_org | tools/git/mass-rename.py | Python | bsd-3-clause | 1,344 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-11-24 14:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('relaydomains', '0006_auto_20170215_0948'),
]
operations = [
migrations.CreateModel(
name='RecipientAccess',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pattern', models.CharField(max_length=254, unique=True)),
('action', models.CharField(max_length=40)),
],
),
]
| tonioo/modoboa | modoboa/relaydomains/migrations/0007_recipientaccess.py | Python | isc | 670 |
keys = [email, person1, person2]
mylist = []
for k in keys:
mylist.append(k)
for i in traits:
mylist.append(i)
with open('traitify.csv', 'wb') as myfile:
w = csv.writer(myfile, quoting=csv.QUOTE_ALL)
w.writerow(mylist)
| meyhelm/traitify_consolidator | printCsv.py | Python | isc | 244 |
import requests
import json
import re
import warnings
from .filterhandler import filter_handler
from .habanero_utils import (
switch_classes,
check_json,
is_json,
parse_json_err,
make_ua,
filter_dict,
rename_query_filters,
ifelsestr,
)
from .exceptions import *
from .request_class import Request
def request(
mailto,
ua_string,
url,
path,
ids=None,
query=None,
filter=None,
offset=None,
limit=None,
sample=None,
sort=None,
order=None,
facet=None,
select=None,
works=None,
cursor=None,
cursor_max=None,
agency=False,
progress_bar=False,
should_warn=False,
**kwargs
):
warning_thrown = False
url = url + path
if cursor_max.__class__.__name__ != "NoneType":
if cursor_max.__class__ != int:
raise ValueError("cursor_max must be of class int")
filt = filter_handler(filter)
if select.__class__ is list:
select = ",".join(select)
payload = {
"query": query,
"filter": filt,
"offset": offset,
"rows": limit,
"sample": sample,
"sort": sort,
"order": order,
"facet": facet,
"select": select,
"cursor": cursor,
}
# convert limit/offset to str before removing None
# b/c 0 (zero) is falsey, so that param gets dropped
payload["offset"] = ifelsestr(payload["offset"])
payload["rows"] = ifelsestr(payload["rows"])
# remove params with value None
payload = dict((k, v) for k, v in payload.items() if v)
# add query filters
payload.update(filter_dict(kwargs))
# rename query filters
payload = rename_query_filters(payload)
if ids.__class__.__name__ == "NoneType":
url = url.strip("/")
try:
r = requests.get(url, params=payload, headers=make_ua(mailto, ua_string))
r.raise_for_status()
except requests.exceptions.HTTPError:
if is_json(r):
raise RequestError(r.status_code, parse_json_err(r))
else:
r.raise_for_status()
except requests.exceptions.RequestException as e:
raise e
check_json(r)
coll = r.json()
else:
if ids.__class__.__name__ == "str":
ids = ids.split()
if ids.__class__.__name__ == "int":
ids = [ids]
# should_warn = len(ids) > 1
coll = []
for i in range(len(ids)):
if works:
res = Request(
mailto,
ua_string,
url,
str(ids[i]) + "/works",
query,
filter,
offset,
limit,
sample,
sort,
order,
facet,
select,
cursor,
cursor_max,
None,
progress_bar,
**kwargs
).do_request(should_warn=should_warn)
coll.append(res)
else:
if agency:
endpt = url + str(ids[i]) + "/agency"
else:
endpt = url + str(ids[i])
endpt = endpt.strip("/")
r = requests.get(
endpt, params=payload, headers=make_ua(mailto, ua_string)
)
if r.status_code > 201 and should_warn:
warning_thrown = True
mssg = "%s on %s: %s" % (r.status_code, ids[i], r.reason)
warnings.warn(mssg)
else:
r.raise_for_status()
# try:
# r = requests.get(
# endpt, params=payload, headers=make_ua(mailto, ua_string)
# )
# if r.status_code > 201 and should_warn:
# warning_thrown = True
# mssg = '%s on %s: %s' % (r.status_code, ids[i], r.reason)
# warnings.warn(mssg)
# else:
# r.raise_for_status()
# except requests.exceptions.HTTPError:
# if is_json(r):
# raise RequestError(r.status_code, parse_json_err(r))
# else:
# r.raise_for_status()
# except requests.exceptions.RequestException as e:
# raise e
if warning_thrown:
coll.append(None)
else:
check_json(r)
js = r.json()
coll.append(js)
if len(coll) == 1:
coll = coll[0]
return coll
| sckott/habanero | habanero/request.py | Python | mit | 4,866 |
#!/usr/bin/env python
from __future__ import print_function
"""
branching.py
test branching dependencies
"""
import os
import sys
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
ruffus_name = os.path.basename(parent_dir)
ruffus = __import__ (ruffus_name)
try:
attrlist = ruffus.__all__
except AttributeError:
attrlist = dir (ruffus)
for attr in attrlist:
if attr[0:2] != "__":
globals()[attr] = getattr (ruffus, attr)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
from collections import defaultdict
import json
# use simplejson in place of json for python < 2.6
#try:
# import json
#except ImportError:
# import simplejson
# json = simplejson
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
species_list = defaultdict(list)
species_list["mammals"].append("cow" )
species_list["mammals"].append("horse" )
species_list["mammals"].append("sheep" )
species_list["reptiles"].append("snake" )
species_list["reptiles"].append("lizard" )
species_list["reptiles"].append("crocodile" )
species_list["fish" ].append("pufferfish")
tempdir = "temp_filesre_combine/"
def do_write(file_name, what):
with open(file_name, "a") as oo:
oo.write(what)
test_file = tempdir + "task.done"
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# task1
#
@follows(mkdir(tempdir, tempdir + "test"))
@posttask(lambda: do_write(test_file, "Task 1 Done\n"))
def prepare_files ():
for grouping in species_list.keys():
for species_name in species_list[grouping]:
filename = tempdir + "%s.%s.animal" % (species_name, grouping)
with open(filename, "w") as oo:
oo.write(species_name + "\n")
#
# task2
#
@files_re(tempdir + '*.animal', r'(.*/)(.*)\.(.*)\.animal', combine(r'\1\2.\3.animal'), r'\1\3.results')
@follows(prepare_files)
@posttask(lambda: do_write(test_file, "Task 2 Done\n"))
def summarise_by_grouping(infiles, outfile):
"""
Summarise by each species group, e.g. mammals, reptiles, fish
"""
with open(tempdir + "jobs.start", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfile]))
with open(outfile, "w") as oo:
for i in infiles:
with open(i) as ii:
oo.write(ii.read())
with open(tempdir + "jobs.finish", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfile]))
def check_species_correct():
"""
#cow.mammals.animal
#horse.mammals.animal
#sheep.mammals.animal
# -> mammals.results
#
#snake.reptiles.animal
#lizard.reptiles.animal
#crocodile.reptiles.animal
# -> reptiles.results
#
#pufferfish.fish.animal
# -> fish.results
"""
for grouping in species_list:
with open(tempdir + grouping + ".results") as ii:
assert(ii.read() ==
"".join(s + "\n" for s in sorted(species_list[grouping])))
import unittest, shutil
class Test_ruffus(unittest.TestCase):
def tearDown(self):
try:
shutil.rmtree(tempdir)
except:
pass
def setUp(self):
try:
shutil.rmtree(tempdir)
except:
pass
def test_ruffus (self):
""
pipeline_run(multiprocess = 10, verbose = 0, pipeline= "main")
check_species_correct()
if __name__ == '__main__':
unittest.main()
| pombreda/ruffus | ruffus/test/test_filesre_combine.py | Python | mit | 4,198 |
import Pyro4
class PyroAdapter(object):
_nameserver = False
def __init__(self):
self._daemon = None
def setup_pyro(self):
import config as _c
_k = [x if x.startswith('__') is False else None for x in dir(_c)]
keys = filter(None, _k)
for name in keys:
setattr(Pyro4.config, name, getattr(_c, name))
def daemon(self):
if self._daemon is None:
self.setup_pyro()
self._daemon = Pyro4.Daemon()
return self._daemon
def register(self, item):
if self._daemon is None:
self.daemon()
return self._daemon.register(item)
def get_nameserver(self):
if self._daemon is None:
self.setup_pyro()
try:
return Pyro4.locateNS()
except Pyro4.errors.NamingError:
# no name server
return None
def get_object(self, uri):
o = Pyro4.Proxy(uri)
return o
| Strangemother/python-state-machine | scratch/machine_4/adapters.py | Python | mit | 971 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2017 Dean Jackson <[email protected]>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2017-11-14
#
"""Common workflow variables and functions."""
from __future__ import print_function, absolute_import
from collections import OrderedDict
import logging
import os
from workflow import Variables
log = logging.getLogger('workflow')
# Default workflow settings
DEFAULT_SETTINGS = {
'locales': [
'en',
'de_DE',
'es_ES',
'fr_FR',
],
}
DOCS_URL = 'https://github.com/deanishe/alfred-fakeum/blob/master/README.md'
HELP_URL = u'https://www.alfredforum.com/topic/5319-fakeum-—-generate-fake-test-datasets-in-alfred/'
ISSUE_URL = 'https://github.com/deanishe/alfred-fakeum/issues'
UPDATE_SETTINGS = {'github_slug': 'deanishe/alfred-fakeum'}
# Workflow icons
ICON_DOCS = 'icons/docs.png'
ICON_HELP = 'icons/help.png'
ICON_ISSUE = 'icons/issue.png'
ICON_ON = 'icons/on.png'
ICON_OFF = 'icons/off.png'
ICON_LOCALES = 'icons/locales.png'
ICON_UPDATE_CHECK = 'icons/update-check.png'
ICON_UPDATE_AVAILABLE = 'icons/update-available.png'
# All locales supported by faker
ALL_LOCALES = OrderedDict((
('en', 'English'),
('de_DE', 'German'),
('es', 'Spanish'),
('fr_FR', 'French'),
('ar_AA', 'Arabic'),
('ar_EG', 'Arabic (Egypt)'),
('ar_JO', 'Arabic (Jordan)'),
('ar_PS', 'Arabic (Palestine)'),
('ar_SA', 'Arabic (Saudi Arabia)'),
('bs_BA', 'Bosnian'),
('bg_BG', 'Bulgarian'),
('zh_CN', 'Chinese (China)'),
('zh_TW', 'Chinese (Taiwan)'),
('hr_HR', 'Croatian'),
('cs_CZ', 'Czech'),
('dk_DK', 'Danish'),
('nl_NL', 'Dutch'),
('nl_BE', 'Dutch (Belgium)'),
('en_AU', 'English (Australia)'),
('en_CA', 'English (Canada)'),
('en_GB', 'English (Great Britain)'),
('en_TH', 'English (Thailand)'),
('en_US', 'English (United States)'),
('et_EE', 'Estonian'),
('fi_FI', 'Finnish'),
('fr_CH', 'French (Switzerland)'),
('ka_GE', 'Georgian'),
('de_AT', 'German (Austria)'),
('tw_GH', 'Ghanaian'),
('el_GR', 'Greek'),
('he_IL', 'Hebrew'),
('hi_IN', 'Hindi'),
('hu_HU', 'Hungarian'),
('id_ID', 'Indonesian'),
('it_IT', 'Italian'),
('ja_JP', 'Japanese'),
('ko_KR', 'Korean'),
('la', 'Latin'),
('lv_LV', 'Latvian'),
('lt_LT', 'Lithuanian'),
('ne_NP', 'Nepali'),
('no_NO', 'Norwegian'),
('fa_IR', 'Persian'),
('pl_PL', 'Polish'),
('pt_BR', 'Portuguese (Brazil)'),
('pt_PT', 'Portuguese (Portugal)'),
('ru_RU', 'Russian'),
('sk_SK', 'Slovakian'),
('sl_SI', 'Slovenian'),
('es_MX', 'Spanish (Mexico)'),
('es_ES', 'Spanish (Spain)'),
('sv_SE', 'Swedish'),
('th_TH', 'Thai'),
('tr_TR', 'Turkish'),
('uk_UA', 'Ukranian'),
))
# Workflow's bundle IDs
BUNDLE_ID = os.getenv('alfred_workflow_bundleid')
# Script Filter keyword
KEYWORD = os.getenv('keyword')
# AppleScript to run an Alfred search
SEARCH_AS = u'tell application "Alfred 3" to search "{query}"'
def boolvar(name, default=False):
"""Return `True` or `False` for a workflow variable."""
v = os.getenv(name)
if v is not None:
if v.lower() in ('1', 'on', 'yes'):
return True
if v.lower() in ('0', 'off', 'no'):
return False
log.debug('no value set for workflow variable "%s", '
'using default: %r', name, default)
return default
def intvar(name, default=0):
"""Return `int` for a workflow variable."""
v = os.getenv(name)
if v is not None:
try:
v = int(v)
except ValueError:
log.error('bad value for "%s": "%s" is not a number', name, v)
return default
return v
log.debug('no value set for workflow variable "%s", '
'using default: %r', name, default)
return default
def notify(title, text=''):
"""Show a notification."""
if not boolvar('SHOW_NOTIFICATIONS'):
return
v = Variables(title=title, text=text)
print(v)
| nekonok/dotfiles | app/alfred/Alfred.alfredpreferences/workflows/user.workflow.125AE956-75D0-4ABD-BA83-AB8EB38B9531/common.py | Python | mit | 4,088 |
import sys, os, time
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
from acq4.drivers.PatchStar import PatchStar
if len(sys.argv) < 2:
print("Usage: test.py com4")
sys.exit(-1)
ps = PatchStar(sys.argv[1])
ps.reset()
print("Firmware version: %s" % ps.getFirmwareVersion())
print("Max speed: %s um/sec" % ps.getSpeed())
pos1 = ps.getPos()
pos2 = [None, None, pos1[2]]
pos2[2] += 1000
print("Move %s => %s" % (pos1, pos2))
ps.moveTo(pos2, speed=300)
c = 0
while ps.isMoving():
pos = ps.getPos()
print("time: %s position: %s" % (time.time(), pos))
time.sleep(0.01)
c += 1
ps.moveTo(pos1, speed=30000)
while ps.isMoving():
pass
print("Move %s => %s" % (pos1, pos2))
ps.moveTo(pos2, speed=300)
c2 = 0
while ps.isMoving():
pos = ps.getPos()
print("time: %s position: %s" % (time.time(), pos))
if c2 > c//2:
print("Stopping early..")
ps.stop()
time.sleep(0.01)
c2 += 1
time.sleep(0.5)
pos = ps.getPos()
print("time: %s position: %s" % (time.time(), pos))
| tropp/acq4 | acq4/drivers/PatchStar/test.py | Python | mit | 1,043 |
import plusfw
from datetime import datetime
from requests import get, post, codes, exceptions
import json
class Html:
"""
"""
base_uris = {
plusfw.LABEL_PFW: "https://www.plusforward.net",
plusfw.LABEL_QLIVE: "https://www.plusforward.net",
plusfw.LABEL_QIV: "https://www.plusforward.net",
plusfw.LABEL_QIII: "https://www.plusforward.net",
plusfw.LABEL_QII: "https://www.plusforward.net",
plusfw.LABEL_QWORLD: "https://www.plusforward.net",
plusfw.LABEL_DIABOT: "https://www.plusforward.net",
plusfw.LABEL_DOOM: "https://www.plusforward.net",
plusfw.LABEL_REFLEX: "https://www.plusforward.net",
plusfw.LABEL_OWATCH: "https://www.plusforward.net",
plusfw.LABEL_GG: "https://www.plusforward.net",
plusfw.LABEL_UNREAL: "https://www.plusforward.net",
plusfw.LABEL_WARSOW: "https://www.plusforward.net",
plusfw.LABEL_DBMB: "https://www.plusforward.net",
plusfw.LABEL_XONOT: "https://www.plusforward.net",
plusfw.LABEL_QCHAMP: "https://www.plusforward.net",
plusfw.LABEL_QCPMA: "https://www.plusforward.net",
}
calendar_path = {
plusfw.LABEL_PFW: "/calendar/",
plusfw.LABEL_QLIVE: "/calendar/",
plusfw.LABEL_QIV: "/calendar/",
plusfw.LABEL_QIII: "/calendar/",
plusfw.LABEL_QII: "/calendar/",
plusfw.LABEL_QWORLD: "/calendar/",
plusfw.LABEL_DIABOT: "/calendar/",
plusfw.LABEL_DOOM: "/calendar/",
plusfw.LABEL_REFLEX: "/calendar/",
plusfw.LABEL_OWATCH: "/calendar/",
plusfw.LABEL_GG: "/calendar/",
plusfw.LABEL_UNREAL: "/calendar/",
plusfw.LABEL_WARSOW: "/calendar/",
plusfw.LABEL_DBMB: "/calendar/",
plusfw.LABEL_XONOT: "/calendar/",
plusfw.LABEL_QCHAMP: "/calendar/",
plusfw.LABEL_QCPMA: "/calendar/",
}
event_path = {
plusfw.LABEL_PFW: "/calendar/manage/",
plusfw.LABEL_QLIVE: "/calendar/manage/",
plusfw.LABEL_QIV: "/calendar/manage/",
plusfw.LABEL_QIII: "/calendar/manage/",
plusfw.LABEL_QII: "/calendar/manage/",
plusfw.LABEL_QWORLD: "/calendar/manage/",
plusfw.LABEL_DIABOT: "/calendar/manage/",
plusfw.LABEL_DOOM: "/calendar/manage/",
plusfw.LABEL_REFLEX: "/calendar/manage/",
plusfw.LABEL_OWATCH: "/calendar/manage/",
plusfw.LABEL_GG: "/calendar/manage/",
plusfw.LABEL_UNREAL: "/calendar/manage/",
plusfw.LABEL_WARSOW: "/calendar/manage/",
plusfw.LABEL_DBMB: "/calendar/manage/",
plusfw.LABEL_XONOT: "/calendar/manage/",
plusfw.LABEL_QCHAMP: "/calendar/manage/",
plusfw.LABEL_QCPMA: "/calendar/manage/",
}
calendar_type = {
plusfw.LABEL_QLIVE: 3,
plusfw.LABEL_QIV: 4,
plusfw.LABEL_QIII: 5,
plusfw.LABEL_QII: 6,
plusfw.LABEL_QWORLD: 7,
plusfw.LABEL_DIABOT: 8,
plusfw.LABEL_DOOM: 9,
plusfw.LABEL_REFLEX: 10,
plusfw.LABEL_OWATCH: 13,
plusfw.LABEL_GG: 14,
plusfw.LABEL_UNREAL: 15,
plusfw.LABEL_WARSOW: 16,
plusfw.LABEL_DBMB: 17,
plusfw.LABEL_XONOT: 18,
plusfw.LABEL_QCHAMP: 20,
plusfw.LABEL_QCPMA: 21,
}
class UriBuilder:
"""
"""
@staticmethod
def get_uri(calendar=plusfw.LABEL_PFW):
return Html.base_uris[calendar]
@staticmethod
def get_calendar_uri(calendar=plusfw.LABEL_PFW, by_week=True, date=None):
if by_week:
view_by = "week"
else:
view_by = "month"
if date is None:
date = datetime.now()
fmt = date.strftime
url = Html.base_uris[calendar] + Html.calendar_path[calendar] + \
'?view=%s&year=%s&month=%s&day=%s¤t=0' % (view_by, fmt("%Y"), fmt("%m"), fmt("%d"))
return url
@staticmethod
def get_event_uri(calendar=plusfw.LABEL_PFW):
return Html.base_uris[calendar] + Html.event_path[calendar]
@staticmethod
def get_calendar(calendar="+fw", by_week=True, date=None, debug=False):
"""
:param debug:
:param date:
:param by_week:
:param calendar:
:return: str
"""
calendar_uri = Html.UriBuilder.get_calendar_uri(calendar, by_week, date)
if debug:
print("Loading calendar from: %s" % calendar_uri)
post_data = None
if calendar in Html.calendar_type:
post_data = {
"cat": str(Html.calendar_type[calendar])
}
try:
if post_data:
tl_response = post(calendar_uri, post_data)
else:
tl_response = get(calendar_uri)
except exceptions.ConnectionError as h:
return "" % h
if tl_response.status_code == codes.ok:
return tl_response.content
else:
return ""
@staticmethod
def get_event(calendar=plusfw.LABEL_QCHAMP, event_id=None, debug=True):
if event_id is None:
return ""
if debug:
print(event_id, end=" ", flush=True)
html = ""
event_uri = Html.UriBuilder.get_event_uri(calendar)
post_data = {
"action": "view-event-popup",
"event_id": event_id
}
tl_response = post(event_uri, post_data)
if tl_response.status_code == codes.ok and tl_response.headers.get('content-type') == "application/json":
decoded_response = json.loads(tl_response.content.decode(encoding="UTF-8"))
if "html" in decoded_response:
html = decoded_response["html"]
return html
| mariusor/tlcal | plusfw/scraper/html.py | Python | mit | 5,805 |
"""
The MIT License (MIT)
Copyright (c) 2014 Trustly Group AB
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class TrustlyJSONRPCVersionError(Exception):
pass
class TrustlyConnectionError(Exception):
pass
class TrustlyDataError(Exception):
pass
class TrustlySignatureError(Exception):
def __init__(self, message, data=None):
super(TrustlySignatureError, self).__init__(message)
self.signature_data = data
def get_bad_data(self):
return self.signature_data
class TrustlyAuthentificationError(Exception):
pass
# vim: set et cindent ts=4 ts=4 sw=4:
| trustly/trustly-client-python | trustly/exceptions.py | Python | mit | 1,583 |
# Django settings for SHTP project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': './db.sql', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), '../static'),
os.path.join(os.path.dirname(__file__), '../media'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '4m!fdos(75gol*i4!&2%t#h%ku*y#p6sl8w3iy-u#0^ofll265'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'SHTP.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'SHTP.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__),'../templates/'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'south',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS += (
'debug_toolbar',
)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.version.VersionDebugPanel',
'debug_toolbar.panels.timer.TimerDebugPanel',
'debug_toolbar.panels.settings_vars.SettingsVarsDebugPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel',
#'debug_toolbar.panels.profiling.ProfilingDebugPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel',
'debug_toolbar.panels.sql.SQLDebugPanel',
'debug_toolbar.panels.template.TemplateDebugPanel',
'debug_toolbar.panels.cache.CacheDebugPanel',
'debug_toolbar.panels.signals.SignalDebugPanel',
'debug_toolbar.panels.logger.LoggingPanel',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
| zypgithub/SHTPforQDU | SHTP/settings.py | Python | mit | 6,528 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mechanize
import cookielib
import sys
import urllib
def getTokenMechanize():
br = mechanize.Browser()
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
br.set_handle_equiv(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
# br.set_debug_http(True)
# br.set_debug_redirects(True)
# br.set_debug_responses(True)
br.addheaders = [('User-agent', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/33.0.1750.152 Chrome/33.0.1750.152 Safari/537.36')]
r = br.open('https://developers.facebook.com/tools/explorer/')
forms = br.forms()
loginForm = None
for f in br.forms():
if f.attrs['id'] == 'login_form':
loginForm = f
if not loginForm: sys.exit(1)
# # br.set_handle_refresh(False)
loginForm['email'] = '[email protected]'
loginForm['pass'] = 'aaaaaaaaaaaaa'
loginForm.set_all_readonly(False)
# El input submit necesita un value o mechanize no va a andar.
loginForm.set_value("lalalala", nr=len(loginForm.controls) - 1)
response = loginForm.click(id="u_0_1")
r = br.open(response)
html = r.read()
token = html.split(',100004245942709,"')[1]
return token[:token.index('"')]
# print token[:token.index('"')] | itirabasso/asciicam-sdc | lalala.py | Python | mit | 1,553 |
import numpy as np
import sys
sys.path.append("..")
import data_tools as dt
in_path = sys.argv[1]
out_path = sys.argv[2]
contactMat = dt.matFromBed(in_path)
np.savetxt(out_path, contactMat, delimiter="\t")
| seqcode/miniMDS | scripts/chromosome3d_input.py | Python | mit | 208 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
r"""Minimal Flask application example for development with CERN handler.
SPHINX-START
1. Register a CERN application in
`https://sso-management.web.cern.ch/OAuth/RegisterOAuthClient.aspx` with
`redirect_uri` as
`https://localhost:5000/oauth/authorized/cern/` and filling all the other
fields:
2. Ensure you have ``gunicorn`` package installed:
.. code-block:: console
cdvirtualenv src/invenio-oauthclient
pip install -e gunicorn
3. Ensure you have ``openssl`` installed in your system (Most of the Linux
distributions has it by default.).
3. Grab the *client_id* and *secret_uri* after registering the application
and add them to your instance configuration as `consumer_key` and
`consumer_secret`.
.. code-block:: console
$ export CERN_APP_CREDENTIALS_KEY=my_cern_client_id
$ export CERN_APP_CREDENTIALS_SECRET=my_cern_secret_uri
4. Create database and tables:
.. code-block:: console
$ pip install -e .[all]
$ cd examples
$ export FLASK_APP=cern_app.py
$ ./app-setup.sh
You can find the database in `examples/cern_app.db`.
5. Create the key and the certificate in order to run a HTTPS server:
.. code-block:: console
$ openssl genrsa 1024 > ssl.key
$ openssl req -new -x509 -nodes -sha1 -key ssl.key > ssl.crt
6. Run gunicorn server:
.. code-block:: console
$ gunicorn -b :5000 --certfile=ssl.crt --keyfile=ssl.key cern_app:app
7. Open in a browser the page `https://localhost:5000/cern`.
You will be redirected to CERN to authorize the application.
Click on `Grant` and you will be redirected back to
`https://localhost:5000/oauth/authorized/cern/`
Now, you will be again in homepage but this time it say:
`hello [email protected]`.
You have completed the user authorization.
8. To be able to uninstall the example app:
.. code-block:: console
$ ./app-teardown.sh
SPHINX-END
"""
import os
from flask import Flask, redirect, url_for
from flask_babelex import Babel
from flask_login import current_user
from flask_menu import Menu as FlaskMenu
from invenio_accounts import InvenioAccounts
from invenio_accounts.views import blueprint as blueprint_user
from invenio_db import InvenioDB
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib import cern
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
from invenio_oauthclient._compat import monkey_patch_werkzeug # noqa isort:skip
monkey_patch_werkzeug() # noqa isort:skip
from flask_oauthlib.client import OAuth as FlaskOAuth # noqa isort:skip
# [ Configure application credentials ]
CERN_APP_CREDENTIALS = dict(
consumer_key=os.environ.get('CERN_APP_CREDENTIALS_KEY'),
consumer_secret=os.environ.get('CERN_APP_CREDENTIALS_SECRET'),
)
# Create Flask application
app = Flask(__name__)
app.config.update(
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///cern_app.db'
),
OAUTHCLIENT_REMOTE_APPS=dict(
cern=cern.REMOTE_APP
),
CERN_APP_CREDENTIALS=CERN_APP_CREDENTIALS,
DEBUG=True,
SECRET_KEY='TEST',
SECURITY_PASSWORD_SALT='security-password-salt',
SECURITY_SEND_REGISTER_EMAIL=False,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
APP_THEME=['semantic-ui'],
THEME_ICONS={
'semantic-ui': dict(
link='linkify icon'
)
}
)
Babel(app)
FlaskMenu(app)
InvenioDB(app)
InvenioAccounts(app)
FlaskOAuth(app)
InvenioOAuthClient(app)
app.register_blueprint(blueprint_user)
app.register_blueprint(blueprint_client)
app.register_blueprint(blueprint_settings)
principal = app.extensions['security'].principal
@app.route('/')
def index():
"""Homepage."""
return 'Home page (without any restrictions)'
@app.route('/cern')
def cern():
"""Home page: try to print user email or redirect to login with cern."""
if not current_user.is_authenticated:
return redirect(url_for('invenio_oauthclient.login',
remote_app='cern'))
return 'hello {}'.format(current_user.email)
| inveniosoftware/invenio-oauthclient | examples/cern_app.py | Python | mit | 4,432 |
__all__ = ["checkInputArguments", "generateTemplate", "relations"] | MichaelPHStumpf/Peitho | peitho/errors_and_parsers/abc_sysbio/abcsysbio/__init__.py | Python | mit | 66 |
# -*- coding: utf-8 -*-
from datetime import datetime
from datetime import timedelta
import os
import json
import telegram
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.event_handlers import TelegramHandler
class FileIOException(Exception):
pass
class TelegramTask(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
if not self.enabled:
return
self.bot.event_manager.add_handler(TelegramHandler(self.bot, self.config))
def work(self):
if not self.enabled:
return
| pengzhangdev/PokemonGo-Bot | pokemongo_bot/cell_workers/telegram_task.py | Python | mit | 608 |
# https://leetcode.com/problems/unique-binary-search-trees/
class Solution(object):
def numTrees(self, n):
dp = [1] * (n+1)
for i in range(1,n+1):
ways = 0
for j in range(1,i+1):
ways += dp[j - 1] * dp[i-j]
dp[i] = ways
return dp[-1] | menghanY/LeetCode-Python | Tree/UniqueBinarySearchTrees.py | Python | mit | 316 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
class OCP(object):
"Extension IVI methods for power supplies supporting overcurrent protection"
def __init__(self, *args, **kwargs):
super(OCP, self).__init__(*args, **kwargs)
cls = 'IviDCPwr'
grp = 'OCP'
ivi.add_group_capability(self, cls+grp)
self._output_ocp_enabled = list()
self._output_ocp_limit = list()
self._output_spec = [
{
'range': {
'P8V': (9.0, 20.0),
'P20V': (21.0, 10.0)
},
'ovp_max': 22.0,
'ocp_max': 22.0,
'voltage_max': 9.0,
'current_max': 20.0
}
]
ivi.add_property(self, 'outputs[].ocp_enabled',
self._get_output_ocp_enabled,
self._set_output_ocp_enabled,
None,
ivi.Doc("""
Specifies whether the power supply provides over-current protection. If
this attribute is set to True, the power supply disables the output when
the output current is greater than or equal to the value of the OCP
Limit attribute.
"""))
ivi.add_property(self, 'outputs[].ocp_limit',
self._get_output_ocp_limit,
self._set_output_ocp_limit,
None,
ivi.Doc("""
Specifies the current the power supply allows. The units are Amps.
If the OCP Enabled attribute is set to True, the power supply disables the
output when the output current is greater than or equal to the value of
this attribute.
If the OCP Enabled is set to False, this attribute does not affect the
behavior of the instrument.
"""))
self._init_outputs()
def _init_outputs(self):
try:
super(OCP, self)._init_outputs()
except AttributeError:
pass
self._output_ocp_enabled = list()
self._output_ocp_limit = list()
for i in range(self._output_count):
self._output_ocp_enabled.append(True)
self._output_ocp_limit.append(0)
def _get_output_ocp_enabled(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ocp_enabled[index]
def _set_output_ocp_enabled(self, index, value):
index = ivi.get_index(self._output_name, index)
value = bool(value)
self._output_ocp_enabled[index] = value
def _get_output_ocp_limit(self, index):
index = ivi.get_index(self._output_name, index)
return self._output_ocp_limit[index]
def _set_output_ocp_limit(self, index, value):
index = ivi.get_index(self._output_name, index)
value = float(value)
self._output_ocp_limit[index] = value
def _output_reset_output_protection(self, index):
pass
| lude-ma/python-ivi | ivi/extra/dcpwr.py | Python | mit | 4,414 |
__author__ = 'rcj1492'
__created__ = '2016.03'
__license__ = 'MIT'
try:
import pytest
except:
print('pytest module required to perform unittests. try: pip install pytest')
exit()
from labpack.storage.appdata import appdataClient
from labpack.performance import performlab
from jsonmodel.exceptions import InputValidationError
if __name__ == '__main__':
from time import time, sleep
from copy import deepcopy
# initialize client
appdata_client = appdataClient(collection_name='Unit Tests')
export_client = appdataClient(collection_name='Test Export')
# construct test records
import json
from hashlib import md5
from labpack.compilers import drep
secret_key = 'upside'
test_record = {
'dt': 1474509314.419702,
'deviceID': '2Pp8d9lpsappm8QPv_Ps6cL0'
}
test_data = open('../data/test_voice.ogg', 'rb').read()
data_key = 'lab/voice/unittest.ogg'
record_data = json.dumps(test_record).encode('utf-8')
record_key = 'lab/device/unittest.json'
drep_data = drep.dump(test_record, secret_key)
drep_key = 'lab/device/unittest.drep'
# test save method
old_hash = md5(test_data).digest()
appdata_client.save(data_key, test_data, secret_key=secret_key)
appdata_client.save(record_key, record_data)
appdata_client.save(drep_key, drep_data)
assert appdata_client.exists(drep_key)
assert not appdata_client.exists('notakey')
# test export method
exit_msg = appdata_client.export(export_client)
exit_msg = appdata_client.export(export_client, overwrite=False)
assert exit_msg.find('3')
# test list arguments
path_segments = ['lab', 'unittests', '1473719695.2165067', '.json']
path_filters = [ { 0: { 'must_contain': [ '^lab' ] } } ]
filter_function = export_client.conditional_filter(path_filters=path_filters)
assert filter_function(*path_segments)
test_filter = appdata_client.conditional_filter([{2:{'must_contain':['unittest\.ogg$']}}])
filter_search = appdata_client.list(filter_function=test_filter)
prefix_search = export_client.list(prefix=record_key)
assert prefix_search[0] == record_key
delimiter_search = export_client.list(prefix='lab/device/', delimiter='.json')
assert delimiter_search[0] == drep_key
multi_filter = { 2: { 'must_contain': [ '.json$' ] } }
multi_function = appdata_client.conditional_filter(multi_filter)
multi_search = appdata_client.list(prefix='lab/device/', filter_function=multi_function)
assert multi_search[0] == prefix_search[0]
# test load argument
new_data = appdata_client.load(filter_search[0], secret_key=secret_key)
new_hash = md5(new_data).digest()
assert old_hash == new_hash
load_data = export_client.load(prefix_search[0])
record_details = json.loads(load_data.decode())
assert record_details == test_record
# test filter false results
path_filters = [{0: {'must_not_contain': ['^lab']}}]
filter_function = export_client.conditional_filter(path_filters=path_filters)
assert not filter_function(*path_segments)
path_filters = [{0: {'must_contain': ['^lab']}, 1:{'excluded_values': ['unittests']} }]
filter_function = export_client.conditional_filter(path_filters=path_filters)
assert not filter_function(*path_segments)
path_filters = [{0: {'must_contain': ['^lab']}, 2: {'discrete_values': ['unittests']}}]
filter_function = export_client.conditional_filter(path_filters=path_filters)
assert not filter_function(*path_segments)
path_filters = [{4: {'must_contain': ['^lab']}}]
filter_function = export_client.conditional_filter(path_filters=path_filters)
assert not filter_function(*path_segments)
# test filter exceptions
path_filters = [{ '0': {'must_contain': ['^lab']}}]
with pytest.raises(TypeError):
export_client.conditional_filter(path_filters=path_filters)
path_filters = [{ 0: 'string' }]
with pytest.raises(TypeError):
export_client.conditional_filter(path_filters=path_filters)
path_filters = [{ 0: {'must_contai': ['^lab']}}]
with pytest.raises(InputValidationError):
export_client.conditional_filter(path_filters=path_filters)
# test delete method
assert appdata_client.delete(filter_search[0])
# test list performance
testKey = 'lab/log/performancetest'
testDetails = {'command': 'home', 'project': 'lab', 'verbose': True}
count = 0
last_key = ''
while count < 100:
seed_details = deepcopy(testDetails)
seed_data = json.dumps(seed_details).encode('utf-8')
seed_details['type'] = '.json'
seed_details['time'] = time()
seed_key = '%s/%s%s' % (testKey, str(seed_details['time']), seed_details['type'])
appdata_client.save(record_key=seed_key, record_data=seed_data)
count += 1
sleep(.001)
if count == 2:
last_key = deepcopy(seed_key)
path_filters = [{ 1:{'must_contain':['^log']}}]
filter_function = appdata_client.conditional_filter(path_filters=path_filters)
filter_kwargs = {
'filter_function': filter_function,
'max_results': 100,
'previous_key': last_key
}
prefix_kwargs = {
'prefix': 'lab/log/performancetest',
'max_results': 100,
'previous_key': last_key
}
performlab.repeat(appdata_client.list, filter_kwargs, 'appdataClient.list(filter_function=self.conditional_filter(%s), max_results=100, previous_key=%s)' % (path_filters, last_key), 1000)
performlab.repeat(appdata_client.list, prefix_kwargs, 'appdataClient.list(prefix="%s", max_results=100, previous_key=%s' % (prefix_kwargs['prefix'], last_key), 1000)
# remove client
appdata_client.remove()
export_client.remove()
| collectiveacuity/labPack | tests/test_storage_appdata.py | Python | mit | 5,903 |
# -*- coding: utf-8 -*-
import pickle
from _thread import start_new_thread
from threading import Timer
from time import sleep
import traceback
from pysimplelog import Logger
from werkzeug.wrappers import Request, Response
from werkzeug.serving import run_simple
from FoodScan.ShopSync.metaShop import MetaShop
class ShopSync:
def __init__(self, shop, shop_list, web_hook_url=None, web_server_ip=None, web_server_port=8080, asynchron=True):
self.logger = Logger('ShopSync')
self.shop = shop
self.wu_list = shop_list
self.meta = MetaShop(self, self.wu_list)
self.shop_list_rev = 0
self.shop_task_revs = {}
self.shop_items = {}
self.choice = Choice("choices-" + shop.__class__.__name__ + ".db")
if web_hook_url:
self.web_hook_url = web_hook_url
self.web_server_ip = web_server_ip
self.web_server_port = web_server_port
function = self.start_hook
else:
function = self.listen
if asynchron:
start_new_thread(function, ())
else:
function()
def start_hook(self):
self.sync_shop_list()
self.sync_shop_list()
self.wu_list.create_web_hook(self.web_hook_url, self.web_server_port)
run_simple(self.web_server_ip, self.web_server_port, self.hook)
@Request.application
def hook(self, _):
self.timed_sync()
return Response()
def timed_sync(self):
try:
self.sync_shop_list()
except:
Timer(10 * 60.0, self.timed_sync).start() # 10 Minutes
return Response()
def listen(self):
wait = [10, 30, 60, 120, 240, 480]
wait_index = 0
while True:
try:
change = self.sync_shop_list()
if change:
wait_index = 0
else:
sleep(wait[wait_index])
wait_index = min(wait_index + 1, len(wait) - 1)
except Exception:
traceback.print_exc()
def sync_shop_list(self):
if self.shop_list_rev == self.wu_list.list_revision():
return False
new, changed, deleted_ids, meta_changed = self.detect_changed_tasks()
for iid in deleted_ids:
self.remove_item_by_id(iid)
for task in new:
self.new_item(task)
for task in changed:
self.update_item(task)
if meta_changed:
self.meta.sync()
self.update_meta()
return len(new) + len(changed) + len(deleted_ids) > 0 or meta_changed
def update_meta(self):
shop_items = [item.selected_shop_item() for item in list(self.shop_items.values()) if item.synced()]
price = 0
for s in shop_items:
price += s.amount * s.price
self.meta.set_price(price)
def detect_changed_tasks(self):
self.shop_list_rev = self.wu_list.list_revision()
new_tasks = self.wu_list.list_tasks()
meta_changed = self.meta.detect_changes(new_tasks)
changed = []
new = []
for new_task in new_tasks:
if self.wu_list.is_meta_item(new_task):
continue
iid = new_task['id']
revision = new_task['revision']
if iid in self.shop_task_revs:
if self.shop_task_revs[iid] != revision:
self.shop_task_revs[iid] = revision
changed.append(new_task)
else:
self.shop_task_revs[iid] = revision
new.append(new_task)
deleted_ids = []
for iid in self.shop_task_revs:
found = False
for new_task in new_tasks:
if iid == new_task['id']:
found = True
break
if not found:
deleted_ids.append(iid)
for iid in deleted_ids:
self.shop_task_revs.pop(iid)
return new, changed, deleted_ids, meta_changed
def remove_item_by_id(self, iid):
item = self.shop_items.pop(iid)
self.logger.info("delete - " + item.name.encode('utf-8'))
if item.synced():
self.shop.delete(item.selected_shop_item())
def new_item(self, task):
self.logger.info("new - " + task['title'].encode('utf-8'))
iid = task['id']
item = self.wu_list.item_from_task(task)
shop_items = self.shop.search(item.name, item.sub_name)
item.set_shop_items(shop_items)
if item.selected_item:
self.choice.remember_choice(item)
else:
self.choice.match(item)
if item.selected_item:
self.shop.take(item.selected_shop_item())
self.shop_items[iid] = item
self.wu_list.update_item(task, item, rebuild_notes=True, rebuild_subs=True)
def update_item(self, task):
self.logger.info("Update - " + task['title'].encode('utf-8'))
iid = task['id']
item = self.wu_list.item_from_task(task)
existing = self.shop_items[iid]
if item != existing:
self.remove_item_by_id(iid)
self.new_item(task)
else:
update = False
if item.synced() and not existing.synced():
existing.select_shop_item(item.selected_shop_item())
self.shop.take(existing.selected_shop_item())
update = True
if not item.synced() and existing.synced():
self.shop.delete(existing.selected_shop_item())
existing.select_shop_item(None)
update = True
if existing.amount != item.amount:
existing.amount = item.amount
if existing.synced():
self.shop.take(existing.selected_shop_item())
update = True
if update:
self.choice.remember_choice(existing)
self.shop_task_revs[iid] = self.wu_list.update_item(task, existing)
class Choice:
def __init__(self, file_name):
self.file_name = file_name
self.matches = self.load()
def save(self):
with open(self.file_name, 'w') as f:
pickle.dump(self.matches, f)
def load(self):
try:
with open(self.file_name) as f:
return pickle.load(f)
except IOError:
return {}
def remember_choice(self, item):
if item.synced():
self.matches[item.name] = item.selected_shop_item().name
self.save()
def match(self, item):
if item.name in self.matches:
shop_item_name = self.matches[item.name]
for shop_item in item.shop_items:
if shop_item.name == shop_item_name:
item.select_shop_item(shop_item)
| danielBreitlauch/FoodScan | FoodScan/ShopSync/shopSync.py | Python | mit | 6,886 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Find the sum of all pandigital numbers
with an unusual sub-string divisibility property.
"""
from itertools import permutations
from utils import list_num
def pe43():
"""
>>> pe43()
[1406357289, 1430952867, 1460357289, 4106357289, 4130952867, 4160357289]
"""
# s = 0
ps = []
for perm in permutations(i for i in range(10) if i != 5):
if perm[3] & 1: continue
perm = list(perm)
perm.insert(5, 5)
if not list_num(perm[7:10]) % 17 and \
not list_num(perm[6: 9]) % 13 and \
not list_num(perm[5: 8]) % 11 and \
not list_num(perm[4: 7]) % 7 and \
not list_num(perm[3: 6]) % 5 and \
not list_num(perm[2: 5]) % 3:
# s += list_num(perm)
ps.append(list_num(perm))
# print(s)
return ps
if __name__ == "__main__":
import doctest
doctest.testmod()
| kittttttan/pe | py/pe/pe43.py | Python | mit | 946 |
# Module:
# Submodules:
# Created:
# Copyright (C) <date> <fullname>
#
# This module is part of the <project name> project and is released under
# the MIT License: http://opensource.org/licenses/MIT
"""
"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
# Third-party imports
import pytest
# Local imports
from selweb.driver import BrowserDriver
# ============================================================================
#
# ============================================================================
class FakeDriver(BrowserDriver):
def __enter__(self):
pass
def __exit__(self, exctype, exc, exctb):
pass
def mkdriver(self):
super().mkdriver()
@property
def driver(self):
super().driver
def test_mkdriver_raise_exception():
"""All methods of BrowserDriver ABC raise NotImplementedError"""
driver = FakeDriver()
with pytest.raises(NotImplementedError):
driver.mkdriver()
def test_driver_raise_exception():
"""All methods of BrowserDriver ABC raise NotImplementedError"""
driver = FakeDriver()
with pytest.raises(NotImplementedError):
driver.driver
# ============================================================================
#
# ============================================================================
| arielmakestuff/selweb | test/unit/driver/test_unit_driver_browserdriver.py | Python | mit | 1,464 |
""" Generates Figure 1 of the the paper
Sascha Spors, Frank Schultz, and Hagen Wierstorf. Non-smooth secondary
source distributions in wave
field synthesis. In German Annual Conference
on Acoustics (DAGA), March 2015.
Sound field synthesized by an infitely long vs. semi-infintely long
linear/rectangular array driven by two-dimensional WFS for a virtual
line source.
"""
import numpy as np
import matplotlib.pyplot as plt
import sfs
# what Figure to generate
# Figure 1(a): infinite=True, rect=False
# Figure 1(b): infinite=False, rect=False
# Figure 1(c): infinite=False, rect=True
infinite = True # infinite linear array
rect = False # rectangular array
# simulation parameters
xref = [0, 0, 0] # reference point
dx = 0.05 # secondary source distance
N = 2000 # number of secondary sources for one array
f = 500 # frequency
omega = 2 * np.pi * f # angular frequency
src_angle = 135
if not rect:
grid = sfs.util.xyz_grid([-2, 2], [-2, 3], 0, spacing=0.02)
else:
grid = sfs.util.xyz_grid([-2, 2], [-2, 2], 0, spacing=0.02)
def compute_sound_field(x0, n0, a0, omega, angle):
npw = sfs.util.direction_vector(np.radians(angle), np.radians(90))
xs = xref + (np.sqrt(xref[0]**2 + xref[1]**2) + 4) * np.asarray(npw)
d = sfs.mono.drivingfunction.wfs_2d_line(omega, x0, n0, xs)
a = sfs.mono.drivingfunction.source_selection_point(n0, x0, xs)
twin = sfs.tapering.none(a)
p = sfs.mono.synthesized.generic(omega, x0, n0, d * twin * a0, grid,
source=sfs.mono.source.line)
return p, twin, xs
def plot_objects(ax):
if infinite and not rect:
ax.plot((-2, -2), (-2.2, 3.2), 'k-', lw=4)
if not infinite:
ax.plot((-2, -2), (-2.2, 2), 'k-', lw=4)
if rect:
ax.plot((-2, 2.2), (2, 2), 'k-', lw=4)
sfs.plot.virtualsource_2d(xs, type='point', ax=ax)
sfs.plot.reference_2d(xref, ax=ax)
def plot_sound_field(p, xs, twin, diff=0):
fig = plt.figure()
ax1 = fig.add_axes([0.0, 0.0, 0.7, 1])
im = sfs.plot.soundfield(p, grid, xnorm=None, colorbar=False,
vmax=1.5, vmin=-1.5)
plot_objects(plt.gca())
plt.axis([-3.0, 2.2, -2.2, 3.2])
plt.axis('off')
myfig = plt.gcf()
plt.show()
def plot_sound_field_level(p, xs, twin):
fig = plt.figure()
ax1 = fig.add_axes([0.0, 0.0, 0.7, 1])
im = sfs.plot.level(p, grid, xnorm=None, colorbar=False, vmax=3, vmin=-3)
plot_objects(plt.gca())
plt.annotate('4m', (-2.5, 2), (-2.75, -2.4),
arrowprops={'arrowstyle': '<->'})
plt.axis([-3.0, 2.2, -2.2, 3.2])
plt.axis('off')
ax2 = fig.add_axes([0.55, -0.05, 0.25, 1])
plt.axis('off')
cbar = plt.colorbar(im, ax=ax2, shrink=.7)
cbar.set_label('relative level (dB)', rotation=270, labelpad=10)
myfig = plt.gcf()
plt.show()
# define secondary source positions
# infinitely long linear array
x0, n0, a0 = sfs.array.linear(2*N, dx, center=[-2, -dx/2, 0])
# semi-infinte linear array
if not infinite:
idx = x0[:, 1] <= 2+dx/2
x0 = x0[idx, :]
n0 = n0[idx, :]
a0 = a0[idx]
a0[-1] = 1/2 * a0[-1]
# semi-infinite edge + infinte edge
if rect:
x00, n00 = sfs.array._rotate_array(x0, n0, [1, 0, 0], [0, -1, 0])
x00[:, 0] = - x00[:, 0]
x00 = np.flipud(x00)
a00 = np.flipud(a0)
x0 = np.concatenate((x0, x00))
n0 = np.concatenate((n0, n00))
a0 = np.concatenate((a0, a00))
# infinte edge as reference
x0ref, n0ref, a0ref = sfs.array.linear(2*N, dx, center=[-2, -dx/2, 0])
x00, n00 = sfs.array._rotate_array(x0ref, n0ref, [1, 0, 0], [0, -1, 0])
x0ref = np.concatenate((x0ref, x00))
n0ref = np.concatenate((n0ref, n00))
a0ref = np.concatenate((a0ref, a0ref))
# compute field
p, twin, xs = compute_sound_field(x0, n0, a0, omega, src_angle)
# plot synthesized sound field and its level
if not rect:
normalization = 0.066 # normalization used for plotting
else:
normalization = 0.059 # normalization used for plotting (rect)
plot_sound_field(p/normalization, xs, twin)
plot_sound_field_level(p/normalization, xs, twin)
| spatialaudio/non-smooth-secondary-source-distributions | figure1.py | Python | mit | 4,146 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
def maior_sequencia(texto):
texto = texto.lower()
lista_resultante = []
lista_intermediaria = []
for i in range(len(texto)):
if i == 0 or (ord(texto[i]) == (ord(texto[i - 1]) + 1)):
lista_intermediaria.append(texto[i])
else:
if len(lista_intermediaria) > len(lista_resultante):
lista_resultante = lista_intermediaria
lista_intermediaria = [texto[i]]
else:
lista_intermediaria = [texto[i]]
if len(lista_intermediaria) > len(lista_resultante):
lista_resultante = lista_intermediaria
return "".join(lista_resultante)
if __name__ == '__main__':
minha_string = "abcdefgyzhtfghijklmnop"
print(maior_sequencia(minha_string))
| Rivefount/estudos | strings/sequencia_alafabetica_em_string.py | Python | mit | 809 |
# -*- coding: UTF-8 -*-
import unittest
from pandasticsearch.operators import *
from pandasticsearch.types import Row, Column
class TestSchema(unittest.TestCase):
def test_row(self):
row = Row(a=1, b='你好,世界')
print(repr(row))
self.assertEqual(row['a'], 1)
self.assertEqual(row['b'], '你好,世界')
self.assertEqual(row.as_dict(), {'a': 1, 'b': '你好,世界'})
def test_column(self):
col = Column('b')
self._assert_equal_filter(col > 2, Greater('b', 2))
self._assert_equal_filter(col >= 2, GreaterEqual('b', 2))
self._assert_equal_filter(col < 2, Less('b', 2))
self._assert_equal_filter(col <= 2, LessEqual('b', 2))
self._assert_equal_filter(col == 2, Equal('b', 2))
self._assert_equal_filter(col != 2, ~Equal('b', 2))
self._assert_equal_filter(col.isin([1, 2, 3]), IsIn('b', [1, 2, 3]))
self._assert_equal_filter(col.like('a*b'), Like('b', 'a*b'))
self._assert_equal_filter(col.rlike('a*b'), Rlike('b', 'a*b'))
self._assert_equal_filter(col.startswith('jj'), Startswith('b', 'jj'))
self._assert_equal_filter(col.isnull, IsNull('b'))
self._assert_equal_filter(col.notnull, NotNull('b'))
def _assert_equal_filter(self, x, y):
self.assertTrue(x, BooleanFilter)
self.assertTrue(y, BooleanFilter)
self.assertEqual(x.build(), y.build())
if __name__ == '__main__':
unittest.main()
| onesuper/pandasticsearch | tests/test_types.py | Python | mit | 1,478 |
import uuid
class GenericFile:
# Core type -> lookup_function, returning the coreid
__relations = {
}
type_name = "dataset"
type_xml = "http://purl.org/dc/dcmitype/Dataset"
def __init__(self,core=True,core_type=None):
if core_type is None or core:
self.id_field = "id"
self.myid_field = None
self.myid_func = lambda r: r["id"]
elif not core and core_type is not None:
self.id_field = "coreid"
self.myid_field = None
self.myid_func = None
if core_type is None or core:
self.id_func = self.myid_func
elif core_type.type_name in self.__relations:
self.id_func = self.__relations[core_type]
else:
self.id_func = lambda r: r["coreid"]
class Archive:
def __init__(self,name=None):
if name is None:
self.name = str(uuid.uuid4())
else:
self.name = name
self.files = []
self.core_type = None
def new_file(self,t,fields):
core = False
if self.core_type is None:
self.core_type = t
core = True
r = RecordFile(core=core, t=t, core_type=self.core_type, fields=fields)
self.files.append(r)
return r
def __repr__(self):
return "< " + self.name + ":\n\t" + "\n\t".join([repr(f) for f in self.files]) + "\n>"
class RecordFile:
def __init__(self, core=True, t=GenericFile, core_type=None, fields=[]):
self.core = core
self.t = t(core=core, core_type=core_type)
self.fields = fields
def __repr__(self):
other_fields = [self.t.id_field]
if self.t.myid_field is not None:
other_fields.append(self.t.myid_field)
return "< " + self.t.type_name + ": " + ",".join(other_fields + self.fields) + " >"
def main():
from .idigbio import Records, MediaRecords
dwca = Archive()
dwca.new_file(Records,["dwc:scientificName","dwc:locality"])
dwca.new_file(MediaRecords,["ac:accessURI","dcterms:identifier"])
print dwca
if __name__ == '__main__':
main()
| iDigBio/idigbio-dwca | dwca/writer.py | Python | mit | 2,150 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'My Project',
'author': 'Mighty Cohadar',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': 'My email.',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['cohadar'],
'scripts': [],
'name': 'cohadar'
}
setup(**config)
| cohadar/learn-python-the-hard-way | setup.py | Python | mit | 428 |
import pytest
from dotmailer.campaigns import Campaign
from dotmailer.exceptions import ErrorCampaignInvalid
from tests import manually_delete_campaign
from tests.campaigns import sample_campaign_data
def test_copy_valid(request, connection, sample_campaign):
copy = Campaign.copy(sample_campaign.id)
def cleanup():
manually_delete_campaign(connection, copy)
request.addfinalizer(cleanup)
assert copy.name == 'Copy of {}'.format(sample_campaign.name)
key_to_check = [
'subject', 'from_name', 'html_content', 'plain_text_content',
'reply_action', 'reply_to_address', 'status'
]
for key in key_to_check:
assert getattr(copy, key) == getattr(sample_campaign, key)
def test_copy_invalid():
campaign = Campaign(**sample_campaign_data())
with pytest.raises(TypeError):
Campaign.copy(campaign)
campaign.id = 0
with pytest.raises(Exception):
Campaign.copy(campaign)
| Mr-F/dotmailer | tests/campaigns/test_copy.py | Python | mit | 959 |
import venusian
class repository_config(object):
""" Configure repository objects.
"""
venusian = venusian
def __init__(self, name, namespace, shard='default'):
"""
:param name: unique repository name
:type name: str
:param namespace: namespace name according to Pacific config.
:type namespace: str
:param shard: one of the namespace shards. Shard 'default' is required to be set up
in the config.
:type shard: str
"""
settings = {
# lookup predicates
# -----------------
'name': name,
# configuration options
# ---------------------
'namespace': namespace,
'shard': shard
}
self.__dict__.update(settings)
def __call__(self, wrapped_class):
"""
:param wrapped_class: a class object that implements a repository
:return: the same class object
"""
settings = self.__dict__.copy()
def callback(scanner, name, ob):
scanner.config.add_repository(repository=wrapped_class, **settings)
self.venusian.attach(wrapped_class, callback, category='pacific')
return wrapped_class
def add_repository(config, repository, namespace, name, shard, **kw):
""" This function is used as a directive of Pyramid Config
and responsible for registering available SQL repositories.
:param config: Pyramid configurator instance
:type config: :class:`pyramid.config.Configurator`
:param repository: repository class object
:param namespace: database namespace
:type namespace: str
:param name: database name, must be unique
:type name: str
:param shard: database shard
:type shard: str
"""
repositories = config.registry.settings.setdefault('pacific.db.repositories', {})
repositories[name] = {
'repository': repository,
'namespace': namespace,
'shard': shard
}
| avanov/Pacific | pacific/db/repository.py | Python | mit | 2,019 |
from typing import Pattern, Dict
from recognizers_text.utilities import RegExpUtility
from ...resources.french_date_time import FrenchDateTime
from ..base_datetimeperiod import DateTimePeriodParserConfiguration, MatchedTimeRange
from ..extractors import DateTimeExtractor
from ..parsers import DateTimeParser
from ..base_configs import BaseDateParserConfiguration
class FrenchDateTimePeriodParserConfiguration(DateTimePeriodParserConfiguration):
def __init__(self, config: BaseDateParserConfiguration):
self._date_extractor = config.date_extractor
self._time_extractor = config.time_extractor
self._date_time_extractor = config.date_time_extractor
self._time_period_extractor = config.time_period_extractor
self.cardinal_extractor = config.cardinal_extractor
self._duration_extractor = config.duration_extractor
self.number_parser = config.number_parser
self._date_parser = config.date_parser
self._time_parser = config.time_parser
self._date_time_parser = config.date_time_parser
self._time_period_parser = config.time_period_parser
self._duration_parser = config.duration_parser
self._unit_map = config.unit_map
self._numbers = config.numbers
self.next_prefix_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.NextSuffixRegex)
self.past_prefix_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.PastSuffixRegex)
self.this_prefix_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.ThisPrefixRegex)
self.morning_start_end_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.MorningStartEndRegex)
self.afternoon_start_end_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.AfternoonStartEndRegex)
self.evening_start_end_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.EveningStartEndRegex)
self.night_start_end_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.NightStartEndRegex)
self._pure_number_from_to_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.PureNumFromTo)
self._pure_number_between_and_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.PureNumBetweenAnd)
self._specific_time_of_day_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.SpecificTimeOfDayRegex)
self.time_of_day_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.TimeOfDayRegex)
self._past_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.PastSuffixRegex)
self._future_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.NextSuffixRegex)
self.number_combined_with_unit_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.TimeNumberCombinedWithUnit)
self.unit_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.TimeUnitRegex)
self._period_time_of_day_with_date_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.PeriodTimeOfDayWithDateRegex)
self._relative_time_unit_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.RelativeTimeUnitRegex)
self._rest_of_date_time_regex = RegExpUtility.get_safe_reg_exp(FrenchDateTime.RestOfDateTimeRegex)
@property
def pure_number_from_to_regex(self) -> Pattern:
return self._pure_number_from_to_regex
@property
def pure_number_between_and_regex(self) -> Pattern:
return self._pure_number_between_and_regex
@property
def period_time_of_day_with_date_regex(self) -> Pattern:
return self._period_time_of_day_with_date_regex
@property
def specific_time_of_day_regex(self) -> Pattern:
return self._specific_time_of_day_regex
@property
def past_regex(self) -> Pattern:
return self._past_regex
@property
def future_regex(self) -> Pattern:
return self._future_regex
@property
def relative_time_unit_regex(self) -> Pattern:
return self._relative_time_unit_regex
@property
def rest_of_date_time_regex(self) -> Pattern:
return self._rest_of_date_time_regex
@property
def numbers(self) -> Dict[str, int]:
return self._numbers
@property
def unit_map(self) -> Dict[str, str]:
return self._unit_map
@property
def date_extractor(self) -> DateTimeExtractor:
return self._date_extractor
@property
def time_extractor(self) -> DateTimeExtractor:
return self._time_extractor
@property
def date_time_extractor(self) -> DateTimeExtractor:
return self._date_time_extractor
@property
def time_period_extractor(self) -> DateTimeExtractor:
return self._time_period_extractor
@property
def duration_extractor(self) -> DateTimeExtractor:
return self._duration_extractor
@property
def date_parser(self) -> DateTimeParser:
return self._date_parser
@property
def time_parser(self) -> DateTimeParser:
return self._time_parser
@property
def date_time_parser(self) -> DateTimeParser:
return self._date_time_parser
@property
def time_period_parser(self) -> DateTimeParser:
return self._time_period_parser
@property
def duration_parser(self) -> DateTimeParser:
return self._duration_parser
def get_matched_time_range(self, source: str) -> MatchedTimeRange:
trimmed_source = source.strip().lower()
time_str = ''
begin_hour = 0
end_hour = 0
end_min = 0
if self.morning_start_end_regex.search(trimmed_source):
time_str = 'TMO'
begin_hour = 8
end_hour = 12
elif self.afternoon_start_end_regex.search(trimmed_source):
time_str = 'TAF'
begin_hour = 12
end_hour = 16
elif self.evening_start_end_regex.search(trimmed_source):
time_str = 'TEV'
begin_hour = 16
end_hour = 20
elif self.night_start_end_regex.search(trimmed_source):
time_str = 'TNI'
begin_hour = 20
end_hour = 23
end_min = 59
else:
return MatchedTimeRange(time_str, begin_hour, end_hour, end_min, False)
return MatchedTimeRange(time_str, begin_hour, end_hour, end_min, True)
def get_swift_prefix(self, source: str) -> int:
trimmed_source = source.strip().lower()
swift = 0
# TODO: replace with regex
if (
trimmed_source.startswith('prochain') or
trimmed_source.endswith('prochain') or
trimmed_source.startswith('prochaine') or
trimmed_source.endswith('prochaine')
):
swift = 1
elif (
trimmed_source.startswith('derniere') or
trimmed_source.startswith('dernier') or
trimmed_source.endswith('derniere') or
trimmed_source.endswith('dernier')
):
swift = -1
return swift
| matthewshim-ms/Recognizers-Text | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/french/datetimeperiod_parser_config.py | Python | mit | 6,921 |
import numpy as np
import matplotlib.pyplot as plt
plt.rc('font',**{'family':'serif','sans-serif':['Computer Modern Roman']})
plt.rc('text', usetex=True)
def k(x,xp, l=1.0):
d = (x-xp)**2
return np.exp(-d/(l**2))
xp = 0
x = np.linspace(0,3,101)
fk = k(x,xp)
hf,ha = plt.subplots()
ha.plot(x, fk)
ha.set_xlabel('$|x-y|/\sigma$')
ha.set_ylabel('$k(x,y)$')
plt.show() | nrjl/GPN | covariance_plot.py | Python | mit | 381 |
import uuid
class AutomotiveComponent(object):
''' component of the Automotive Environment holding
common properties'''
def __init__(self, sim_env, comp_id=uuid.uuid4()):
''' Constructor
Input: sim_env simpy.Environment environment of this component
comp_id string identifier for this component
'''
self.sim_env = sim_env
self.comp_id = comp_id
self.time = {}
def set_timing(self, timing_dict):
''' sets the timing parameters in this
specific class
Input: timing_dict dictionary
'''
| PhilippMundhenk/IVNS | ECUSimulation/components/base/automotive_component.py | Python | mit | 721 |
import math
import time
t1 = time.time()
N = 100000
exp = N//10
prime = []
def primeSieve(n):
global prime
n = (n+1)//2
p = [True]*(n)
i = 1
prime.append(2)
while i < n:
if p[i]:
t = 2*i+1
prime.append(t)
j = i
while j < n:
p[j] = False
j += t
i += 1
return prime
def rad(number):
r = 1
i = 0
count = 0
nr = math.floor(math.sqrt(number))
while prime[i] <= nr:
while(number%prime[i] == 0):
count=count+1
number = number / prime[i]
nr = math.floor(math.sqrt(number))
if count > 0:
r *= prime[i]
count = 0
i = i+1
if number > 1:
r *= int(number)
return r
def bubbleSort(L):
l = len(L)
for i in range(l):
for j in range(l-i-1):
if L[j] > L[j+1]:
temp = L[j+1]
L[j+1] = L[j]
L[j] = temp
return L
primeSieve(N)
Lst = [[0,0]]
for i in range(1,N+1):
Lst.append([rad(i),i])
Lst = bubbleSort(Lst)
print(Lst[exp][1])
print("time:",time.time()-t1)
# time: 1475.4133920669556
| Adamssss/projectEuler | Problem 001-150 Python/pb124.py | Python | mit | 1,230 |
from evolacc.evolacc.evolacc import *
| Aluriak/EvolAcc | evolacc/evolacc/__init__.py | Python | mit | 38 |
# -*- encoding: utf-8
import datetime
import decimal
from sqlalchemy import Column
from sqlalchemy import DDL
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy import Identity
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import schema
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import types
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.dialects import mssql
from sqlalchemy.dialects.mssql import base
from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode
from sqlalchemy.dialects.mssql.information_schema import tables
from sqlalchemy.schema import CreateIndex
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import ComparesTables
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import in_
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL):
__only_on__ = "mssql"
__backend__ = True
def test_basic_reflection(self, metadata, connection):
meta = metadata
users = Table(
"engine_users",
meta,
Column("user_id", types.INT, primary_key=True),
Column("user_name", types.VARCHAR(20), nullable=False),
Column("test1", types.CHAR(5), nullable=False),
Column("test2", types.Float(5), nullable=False),
Column("test2.5", types.Float(), nullable=False),
Column("test3", types.Text()),
Column("test4", types.Numeric, nullable=False),
Column("test4.5", types.Numeric(10, 2), nullable=False),
Column("test5", types.DateTime),
Column(
"parent_user_id",
types.Integer,
ForeignKey("engine_users.user_id"),
),
Column("test6", types.DateTime, nullable=False),
Column("test7", types.Text()),
Column("test8", types.LargeBinary()),
Column("test_passivedefault2", types.Integer, server_default="5"),
Column("test9", types.BINARY(100)),
Column("test_numeric", types.Numeric()),
)
addresses = Table(
"engine_email_addresses",
meta,
Column("address_id", types.Integer, primary_key=True),
Column(
"remote_user_id", types.Integer, ForeignKey(users.c.user_id)
),
Column("email_address", types.String(20)),
)
meta.create_all(connection)
meta2 = MetaData()
reflected_users = Table(
"engine_users", meta2, autoload_with=connection
)
reflected_addresses = Table(
"engine_email_addresses",
meta2,
autoload_with=connection,
)
self.assert_tables_equal(users, reflected_users)
self.assert_tables_equal(addresses, reflected_addresses)
@testing.combinations(
(mssql.XML, "XML"),
(mssql.IMAGE, "IMAGE"),
(mssql.MONEY, "MONEY"),
(mssql.NUMERIC(10, 2), "NUMERIC(10, 2)"),
(mssql.FLOAT, "FLOAT(53)"),
(mssql.REAL, "REAL"),
# FLOAT(5) comes back as REAL
(mssql.FLOAT(5), "REAL"),
argnames="type_obj,ddl",
)
def test_assorted_types(self, metadata, connection, type_obj, ddl):
table = Table("type_test", metadata, Column("col1", type_obj))
table.create(connection)
m2 = MetaData()
table2 = Table("type_test", m2, autoload_with=connection)
self.assert_compile(
schema.CreateTable(table2),
"CREATE TABLE type_test (col1 %s NULL)" % ddl,
)
def test_identity(self, metadata, connection):
table = Table(
"identity_test",
metadata,
Column(
"col1",
Integer,
mssql_identity_start=2,
mssql_identity_increment=3,
primary_key=True,
),
)
with testing.expect_deprecated(
"The dialect options 'mssql_identity_start' and"
):
table.create(connection)
meta2 = MetaData()
table2 = Table("identity_test", meta2, autoload_with=connection)
eq_(table2.c["col1"].dialect_options["mssql"]["identity_start"], None)
eq_(
table2.c["col1"].dialect_options["mssql"]["identity_increment"],
None,
)
eq_(table2.c["col1"].identity.start, 2)
eq_(table2.c["col1"].identity.increment, 3)
def test_skip_types(self, connection):
connection.exec_driver_sql(
"create table foo (id integer primary key, data xml)"
)
with mock.patch.object(
connection.dialect, "ischema_names", {"int": mssql.INTEGER}
):
with testing.expect_warnings(
"Did not recognize type 'xml' of column 'data'"
):
eq_(
inspect(connection).get_columns("foo"),
[
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.INTEGER),
"nullable": False,
"default": None,
"autoincrement": False,
},
{
"name": "data",
"type": testing.eq_type_affinity(
sqltypes.NullType
),
"nullable": True,
"default": None,
"autoincrement": False,
},
],
)
def test_cross_schema_fk_pk_name_overlaps(self, metadata, connection):
# test for issue #4228
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema,
)
Table(
"referrer",
metadata,
Column("id", Integer, primary_key=True),
Column(
"sid",
ForeignKey(
"%s.subject.id" % testing.config.test_schema,
name="fk_subject",
),
),
schema=testing.config.test_schema,
)
Table(
"subject",
metadata,
Column("id", Integer),
PrimaryKeyConstraint("id", name="subj_pk"),
schema=testing.config.test_schema_2,
)
metadata.create_all(connection)
insp = inspect(connection)
eq_(
insp.get_foreign_keys("referrer", testing.config.test_schema),
[
{
"name": "fk_subject",
"constrained_columns": ["sid"],
"referred_schema": "test_schema",
"referred_table": "subject",
"referred_columns": ["id"],
"options": {},
}
],
)
def test_table_name_that_is_greater_than_16_chars(
self, metadata, connection
):
Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ",
metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer),
Index("foo_idx", "foo"),
)
metadata.create_all(connection)
t = Table(
"ABCDEFGHIJKLMNOPQRSTUVWXYZ", MetaData(), autoload_with=connection
)
eq_(t.name, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
@testing.combinations(
("local_temp", "#tmp", True),
("global_temp", "##tmp", True),
("nonexistent", "#no_es_bueno", False),
id_="iaa",
argnames="table_name, exists",
)
def test_temporary_table(self, metadata, connection, table_name, exists):
if exists:
tt = Table(
table_name,
metadata,
Column("id", Integer, primary_key=True),
Column("txt", mssql.NVARCHAR(50)),
Column("dt2", mssql.DATETIME2),
)
tt.create(connection)
connection.execute(
tt.insert(),
[
{
"id": 1,
"txt": u"foo",
"dt2": datetime.datetime(2020, 1, 1, 1, 1, 1),
},
{
"id": 2,
"txt": u"bar",
"dt2": datetime.datetime(2020, 2, 2, 2, 2, 2),
},
],
)
if not exists:
with expect_raises(exc.NoSuchTableError):
Table(
table_name,
metadata,
autoload_with=connection,
)
else:
tmp_t = Table(table_name, metadata, autoload_with=connection)
result = connection.execute(
tmp_t.select().where(tmp_t.c.id == 2)
).fetchall()
eq_(
result,
[(2, "bar", datetime.datetime(2020, 2, 2, 2, 2, 2))],
)
@testing.combinations(
("local_temp", "#tmp", True),
("global_temp", "##tmp", True),
("nonexistent", "#no_es_bueno", False),
id_="iaa",
argnames="table_name, exists",
)
def test_has_table_temporary(
self, metadata, connection, table_name, exists
):
if exists:
tt = Table(
table_name,
metadata,
Column("id", Integer),
)
tt.create(connection)
found_it = testing.db.dialect.has_table(connection, table_name)
eq_(found_it, exists)
def test_has_table_temp_not_present_but_another_session(self):
"""test #6910"""
with testing.db.connect() as c1, testing.db.connect() as c2:
try:
with c1.begin():
c1.exec_driver_sql(
"create table #myveryveryuniquetemptablename (a int)"
)
assert not c2.dialect.has_table(
c2, "#myveryveryuniquetemptablename"
)
finally:
with c1.begin():
c1.exec_driver_sql(
"drop table #myveryveryuniquetemptablename"
)
def test_has_table_temp_temp_present_both_sessions(self):
"""test #7168, continues from #6910"""
with testing.db.connect() as c1, testing.db.connect() as c2:
try:
with c1.begin():
c1.exec_driver_sql(
"create table #myveryveryuniquetemptablename (a int)"
)
with c2.begin():
c2.exec_driver_sql(
"create table #myveryveryuniquetemptablename (a int)"
)
assert c2.dialect.has_table(
c2, "#myveryveryuniquetemptablename"
)
finally:
with c1.begin():
c1.exec_driver_sql(
"drop table #myveryveryuniquetemptablename"
)
with c2.begin():
c2.exec_driver_sql(
"drop table #myveryveryuniquetemptablename"
)
def test_db_qualified_items(self, metadata, connection):
Table("foo", metadata, Column("id", Integer, primary_key=True))
Table(
"bar",
metadata,
Column("id", Integer, primary_key=True),
Column("foo_id", Integer, ForeignKey("foo.id", name="fkfoo")),
)
metadata.create_all(connection)
dbname = connection.exec_driver_sql("select db_name()").scalar()
owner = connection.exec_driver_sql("SELECT user_name()").scalar()
referred_schema = "%(dbname)s.%(owner)s" % {
"dbname": dbname,
"owner": owner,
}
inspector = inspect(connection)
bar_via_db = inspector.get_foreign_keys("bar", schema=referred_schema)
eq_(
bar_via_db,
[
{
"referred_table": "foo",
"referred_columns": ["id"],
"referred_schema": referred_schema,
"name": "fkfoo",
"constrained_columns": ["foo_id"],
"options": {},
}
],
)
assert inspect(connection).has_table("bar", schema=referred_schema)
m2 = MetaData()
Table(
"bar",
m2,
schema=referred_schema,
autoload_with=connection,
)
eq_(m2.tables["%s.foo" % referred_schema].schema, referred_schema)
def test_fk_on_unique_index(self, metadata, connection):
# test for issue #7160
Table(
"uidx_parent",
metadata,
Column("id", Integer, primary_key=True),
Column("uidx_col1", Integer, nullable=False),
Column("uidx_col2", Integer, nullable=False),
Index(
"UIDX_composite",
"uidx_col1",
"uidx_col2",
unique=True,
),
)
Table(
"uidx_child",
metadata,
Column("id", Integer, primary_key=True),
Column("parent_uidx_col1", Integer, nullable=False),
Column("parent_uidx_col2", Integer, nullable=False),
ForeignKeyConstraint(
["parent_uidx_col1", "parent_uidx_col2"],
["uidx_parent.uidx_col1", "uidx_parent.uidx_col2"],
name="FK_uidx_parent",
),
)
metadata.create_all(connection)
inspector = inspect(connection)
fk_info = inspector.get_foreign_keys("uidx_child")
eq_(
fk_info,
[
{
"referred_table": "uidx_parent",
"referred_columns": ["uidx_col1", "uidx_col2"],
"referred_schema": None,
"name": "FK_uidx_parent",
"constrained_columns": [
"parent_uidx_col1",
"parent_uidx_col2",
],
"options": {},
}
],
)
def test_indexes_cols(self, metadata, connection):
t1 = Table("t", metadata, Column("x", Integer), Column("y", Integer))
Index("foo", t1.c.x, t1.c.y)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x"], t2.c.y]))
def test_indexes_cols_with_commas(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("x, col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x, col"], t2.c.y]))
def test_indexes_cols_with_spaces(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("x col", Integer, key="x"),
Column("y", Integer),
)
Index("foo", t1.c.x, t1.c.y)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("t", m2, autoload_with=connection)
eq_(set(list(t2.indexes)[0].columns), set([t2.c["x col"], t2.c.y]))
def test_indexes_with_filtered(self, metadata, connection):
t1 = Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", types.String(20)),
Column("y", types.Integer),
)
Index("idx_x", t1.c.x, mssql_where=t1.c.x == "test")
Index("idx_y", t1.c.y, mssql_where=t1.c.y >= 5)
metadata.create_all(connection)
ind = testing.db.dialect.get_indexes(connection, "t", None)
filtered_indexes = []
for ix in ind:
if "dialect_options" in ix:
filtered_indexes.append(ix["dialect_options"]["mssql_where"])
eq_(sorted(filtered_indexes), ["([x]='test')", "([y]>=(5))"])
t2 = Table("t", MetaData(), autoload_with=connection)
idx = list(sorted(t2.indexes, key=lambda idx: idx.name))[0]
self.assert_compile(
CreateIndex(idx), "CREATE INDEX idx_x ON t (x) WHERE ([x]='test')"
)
def test_max_ident_in_varchar_not_present(self, metadata, connection):
"""test [ticket:3504].
Here we are testing not just that the "max" token comes back
as None, but also that these types accept "max" as the value
of "length" on construction, which isn't a directly documented
pattern however is likely in common use.
"""
Table(
"t",
metadata,
Column("t1", types.String),
Column("t2", types.Text("max")),
Column("t3", types.Text("max")),
Column("t4", types.LargeBinary("max")),
Column("t5", types.VARBINARY("max")),
)
metadata.create_all(connection)
for col in inspect(connection).get_columns("t"):
is_(col["type"].length, None)
in_("max", str(col["type"].compile(dialect=connection.dialect)))
class InfoCoerceUnicodeTest(fixtures.TestBase, AssertsCompiledSQL):
def test_info_unicode_coercion(self):
dialect = mssql.dialect()
value = CoerceUnicode().bind_processor(dialect)("a string")
assert isinstance(value, util.text_type)
def test_info_unicode_cast_no_2000(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2000_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = :table_name_1",
dialect=dialect,
)
def test_info_unicode_cast(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2005_VERSION
stmt = tables.c.table_name == "somename"
self.assert_compile(
stmt,
"[INFORMATION_SCHEMA].[TABLES].[TABLE_NAME] = "
"CAST(:table_name_1 AS NVARCHAR(max))",
dialect=dialect,
)
class ReflectHugeViewTest(fixtures.TablesTest):
__only_on__ = "mssql"
__backend__ = True
# crashes on freetds 0.91, not worth it
__skip_if__ = (lambda: testing.requires.mssql_freetds.enabled,)
@classmethod
def define_tables(cls, metadata):
col_num = 150
t = Table(
"base_table",
metadata,
*[
Column("long_named_column_number_%d" % i, Integer)
for i in range(col_num)
]
)
cls.view_str = (
view_str
) = "CREATE VIEW huge_named_view AS SELECT %s FROM base_table" % (
",".join("long_named_column_number_%d" % i for i in range(col_num))
)
assert len(view_str) > 4000
event.listen(t, "after_create", DDL(view_str))
event.listen(t, "before_drop", DDL("DROP VIEW huge_named_view"))
def test_inspect_view_definition(self):
inspector = inspect(testing.db)
view_def = inspector.get_view_definition("huge_named_view")
eq_(view_def, self.view_str)
class OwnerPlusDBTest(fixtures.TestBase):
def test_default_schema_name_not_interpreted_as_tokenized(self):
dialect = mssql.dialect()
dialect.server_version_info = base.MS_2014_VERSION
mock_connection = mock.Mock(scalar=lambda sql: "Jonah.The.Whale")
schema_name = dialect._get_default_schema_name(mock_connection)
eq_(schema_name, "Jonah.The.Whale")
eq_(
base._owner_plus_db(dialect, schema_name),
(None, "Jonah.The.Whale"),
)
def test_owner_database_pairs_dont_use_for_same_db(self):
dialect = mssql.dialect()
identifier = "my_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect,
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=mock.Mock(return_value="my_db"))
),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(
mock_connection.mock_calls,
[mock.call.exec_driver_sql("select db_name()")],
)
eq_(
mock_connection.exec_driver_sql.return_value.mock_calls,
[mock.call.scalar()],
),
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
def test_owner_database_pairs_switch_for_different_db(self):
dialect = mssql.dialect()
identifier = "my_other_db.some_schema"
schema, owner = base._owner_plus_db(dialect, identifier)
mock_connection = mock.Mock(
dialect=dialect,
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=mock.Mock(return_value="my_db"))
),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
eq_(
mock_connection.mock_calls,
[
mock.call.exec_driver_sql("select db_name()"),
mock.call.exec_driver_sql("use my_other_db"),
mock.call.exec_driver_sql("use my_db"),
],
eq_(
mock_connection.exec_driver_sql.return_value.mock_calls,
[mock.call.scalar()],
),
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
@testing.combinations(
("foo", None, "foo", "use foo"),
("foo.bar", "foo", "bar", "use foo"),
("Foo.Bar", "Foo", "Bar", "use [Foo]"),
("[Foo.Bar]", None, "Foo.Bar", "use [Foo.Bar]"),
("[Foo.Bar].[bat]", "Foo.Bar", "bat", "use [Foo.Bar]"),
(
"[foo].]do something; select [foo",
"foo",
"do something; select foo",
"use foo",
),
(
"something; select [foo].bar",
"something; select foo",
"bar",
"use [something; select foo]",
),
(
"[abc].[def].[efg].[hij]",
"[abc].[def].[efg]",
"hij",
"use [abc].[def].[efg]",
),
("abc.def.efg.hij", "abc.def.efg", "hij", "use [abc.def.efg]"),
)
def test_owner_database_pairs(
self, identifier, expected_schema, expected_owner, use_stmt
):
dialect = mssql.dialect()
schema, owner = base._owner_plus_db(dialect, identifier)
eq_(owner, expected_owner)
eq_(schema, expected_schema)
mock_connection = mock.Mock(
dialect=dialect,
exec_driver_sql=mock.Mock(
return_value=mock.Mock(
scalar=mock.Mock(return_value="Some Database")
)
),
)
mock_lambda = mock.Mock()
base._switch_db(schema, mock_connection, mock_lambda, "x", y="bar")
if schema is None:
eq_(mock_connection.mock_calls, [])
else:
eq_(
mock_connection.mock_calls,
[
mock.call.exec_driver_sql("select db_name()"),
mock.call.exec_driver_sql(use_stmt),
mock.call.exec_driver_sql("use [Some Database]"),
],
)
eq_(
mock_connection.exec_driver_sql.return_value.mock_calls,
[mock.call.scalar()],
)
eq_(mock_lambda.mock_calls, [mock.call("x", y="bar")])
class IdentityReflectionTest(fixtures.TablesTest):
__only_on__ = "mssql"
__backend__ = True
__requires__ = ("identity_columns",)
@classmethod
def define_tables(cls, metadata):
for i, col in enumerate(
[
Column(
"id1",
Integer,
Identity(
always=True,
start=2,
increment=3,
minvalue=-2,
maxvalue=42,
cycle=True,
cache=4,
),
),
Column("id2", Integer, Identity()),
Column("id3", sqltypes.BigInteger, Identity()),
Column("id4", sqltypes.SmallInteger, Identity()),
Column("id5", sqltypes.Numeric, Identity()),
]
):
Table("t%s" % i, metadata, col)
def test_reflect_identity(self, connection):
insp = inspect(connection)
cols = []
for t in self.tables_test_metadata.tables.keys():
cols.extend(insp.get_columns(t))
for col in cols:
is_true("dialect_options" not in col)
is_true("identity" in col)
if col["name"] == "id1":
eq_(col["identity"], {"start": 2, "increment": 3})
elif col["name"] == "id2":
eq_(col["identity"], {"start": 1, "increment": 1})
eq_(type(col["identity"]["start"]), int)
eq_(type(col["identity"]["increment"]), int)
elif col["name"] == "id3":
eq_(col["identity"], {"start": 1, "increment": 1})
eq_(type(col["identity"]["start"]), util.compat.long_type)
eq_(type(col["identity"]["increment"]), util.compat.long_type)
elif col["name"] == "id4":
eq_(col["identity"], {"start": 1, "increment": 1})
eq_(type(col["identity"]["start"]), int)
eq_(type(col["identity"]["increment"]), int)
elif col["name"] == "id5":
eq_(col["identity"], {"start": 1, "increment": 1})
eq_(type(col["identity"]["start"]), decimal.Decimal)
eq_(type(col["identity"]["increment"]), decimal.Decimal)
@testing.requires.views
def test_reflect_views(self, connection):
connection.exec_driver_sql("CREATE VIEW view1 AS SELECT * FROM t1")
insp = inspect(connection)
for col in insp.get_columns("view1"):
is_true("dialect_options" not in col)
is_true("identity" in col)
eq_(col["identity"], {})
| zzzeek/sqlalchemy | test/dialect/mssql/test_reflection.py | Python | mit | 27,474 |
# coding: utf-8
import datetime
from bs4 import BeautifulSoup
from bson.objectid import ObjectId
from tornado.web import RequestHandler
from processor.page_analytic import PageAnalytic
from processor.processor import TagProcessor
from libra import settings
from libra.handlers.base import authenticated
from libra.handlers.base import logged
from libra.models.page import Page, PageData
import requests
class PageHandler(RequestHandler):
@logged
def get(self, **kwargs):
self.render("index.html",
SERVER_NAME=settings.SERVER_NAME)
class UserPageHandler(RequestHandler):
@authenticated
def post(self, user, **kwargs):
page = Page()
page._id = ObjectId()
page.user_id = str(user['_id'])
page.url = self.get_argument('url')
page.save()
self.write({"msg": "Created!",
"name": page.url,
"url": settings.SERVER_NAME + '/' + str(user['fb_id']) + '/' + page.url + '/'})
@authenticated
def delete(self, user, **kwargs):
import pdb;pdb.set_trace()
Page().remove({"user": str(user["_id"]),
"url": self.get_argument('url')})
class SiteHandler(RequestHandler):
@authenticated
def get(self, user, **kwargs):
url = kwargs['url']
data = []
for page_data in PageData().find({"page_url": url,
"date": {"$gte": datetime.datetime.now() - datetime.timedelta(minutes=10)}}):
data.append({'date': page_data['date'].strftime("%Y/%m/%d %H:%M:%S"),
'weight': page_data['weight']/1024.0})
data.sort(key=lambda x: x["date"])
self.render("graph.html", dataSource=data, site=url, SERVER_NAME=settings.SERVER_NAME)
| pitomba/libra | libra/handlers/page.py | Python | mit | 1,803 |
n = input('Digite um valor: ')
print(n.isnumeric())
print(n.isalpha())
print(n.isalnum())
print(n.isupper())
print(n.isspace()) | AlbertoAlfredo/exercicios-cursos | Curso-em-video/Python/aulas-python/Testes/aula06b.py | Python | mit | 127 |
import unittest
from Content.JsonContent import JsonContent
# JSON validator test case.
class TestClass(unittest.TestCase):
def test_validation_passed(self):
validator = JsonContent('{ "test" : "simple JSON" }')
result = validator.validate()
assert result == True, 'Validation not passed - broken validation'
def test_validation_passed_but_more_complicated(self):
validator = JsonContent('{ "test" : true, "test": 1, "T1": { "a" : 2.0 }, "T2" : [ "2"] }')
result = validator.validate()
assert result == True, 'Validation not passed - broken validation'
def test_validation_failed(self):
validator = JsonContent('{ "')
result = validator.validate()
assert result == False, "Validation passed but shouldn't - broken validation"
def test_validation_failed_but_more_subtle(self):
validator = JsonContent("{ 'test' : 'test' }")
result = validator.validate()
assert result == False, "Validation passed but shouldn't - broken validation"
def test_which_returns_parsed_object(self):
parser = JsonContent('{ "test" : "simple JSON" }')
builtObject = parser.parse()
assert builtObject["test"] == 'simple JSON', "JSON content cannot be parsed."
def test_which_returns_none_in_response_to_invalid_object(self):
parser = JsonContent('{ " ')
builtObject = parser.parse()
assert builtObject == None, "JSON content cannot be parsed." | afronski/grammar-generator | grammar-generator/Tests/JsonContentTest.py | Python | mit | 1,372 |
# python standard library
import unittest
# third-party
from mock import MagicMock
# the ape
from theape.infrastructure.arguments.fetcharguments import Fetch, FetchStrategy
from theape.infrastructure.arguments.basestrategy import BaseStrategy
import theape.infrastructure.arguments.fetcharguments
fetch_usage = theape.infrastructure.arguments.fetcharguments.__doc__
class TestFetchArguments(unittest.TestCase):
def setUp(self):
self.args = ['fetch']
self.arguments = Fetch(args=self.args)
return
def test_constructor(self):
"""
Does it build?
"""
arguments = Fetch(args=['fetch'])
# test inheritance
self.assertFalse(arguments.debug)
self.assertEqual(fetch_usage, arguments.sub_usage)
return
def test_names(self):
"""
Does it get the list of plugin names?
"""
# default
self.assertEqual(['Ape'], self.arguments.names)
# positionl arguments
self.arguments.reset()
names = "apple banana cat".split()
self.arguments.args = self.args + names
self.assertEqual(names, self.arguments.names)
return
def test_modules(self):
"""
Does it get a list of external modules?
"""
# default is None
self.assertEqual([], self.arguments.modules)
# add one
self.arguments.reset()
modules = ['aoeu']
options = ['-m'] + modules
self.arguments.args = self.args + options
self.assertEqual(modules, self.arguments.modules)
# add multiple
self.arguments.reset()
modules = 'a b c d e'.split()
options = ['-m'] + " -m".join(modules).split()
self.arguments.args = self.args + options
self.assertEqual(modules, self.arguments.modules)
return
def test_both(self):
"""
Can you use both names and modules?
"""
names = 'ab cd'.split()
modules = 'a b c d'.split()
arguments_options = names + ['-m'] + ' -m'.join(modules).split()
self.arguments.args = self.args + arguments_options
self.assertEqual(names, self.arguments.names)
self.assertEqual(modules, self.arguments.modules)
return
# end TestFetchArguments
class TestFetchStrategy(unittest.TestCase):
def setUp(self):
self.quartermaster = MagicMock()
FetchStrategy.quartemaster = self.quartermaster
self.strategy = FetchStrategy()
return
def test_constructor(self):
"""
Does it build?
"""
strategy = FetchStrategy()
self.assertIsInstance(strategy, BaseStrategy)
return
def test_function(self):
"""
Does it implement the `fetch` strategy?
"""
self.strategy.quartermaster = self.quartermaster
args = MagicMock()
args.names = 'a b c'.split()
args.modules = 'd e f'.split()
definition_a, definition_b, definition_c = MagicMock(), MagicMock(), MagicMock()
definitions = [definition_a, definition_b, definition_c]
plugin_a, plugin_b, plugin_c = MagicMock(), MagicMock(), MagicMock()
definition_a.return_value = plugin_a
definition_b.return_value = plugin_b
definition_c.return_value = plugin_c
plugin_source = dict(zip(args.names, definitions))
def side_effect(name):
return plugin_source[name]
self.quartermaster.get_plugin.side_effect = side_effect
self.strategy.function(args)
self.assertEqual(self.quartermaster, self.strategy.quartermaster)
self.assertEqual(self.quartermaster.external_modules, args.modules)
for definition in definitions:
definition.return_value.fetch_config.assert_called_with()
args.names.append('d')
definition_d = MagicMock()
definition_d.side_effect = TypeError("unknown plugin")
plugin_source['d'] = definition_d
# nothing should happen, because it handles unknown plugins
self.strategy.function(args)
# and the decorator handles other errors
definition_a.side_effect = AttributeError("plugin implementation error")
self.strategy.function(args)
return | rsnakamura/theape | theape/infrastructure/arguments/tests/test_fetcharguments.py | Python | mit | 4,312 |
from django.db import models
class Configuration(models.Model):
fb_share_token = models.TextField(max_length=250)
fb_share_app_id = models.TextField(max_length=20)
fb_share_app_secret = models.TextField(max_length=35)
fb_share_link = models.TextField(max_length=50)
| dirtycoder/pets | pets/common/models.py | Python | mit | 284 |
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['PHENOPROC_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['PHENOPROC_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
| mainulhossain/phenoproc | app/email.py | Python | mit | 675 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'example_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| cordery/django-countries-plus | example_project/manage.py | Python | mit | 671 |
"""
Created on 25 Feb 2020
@author: Bruno Beloff ([email protected])
an empty catalogue of particulate exegesis models, to be implemented elsewhere
"""
# --------------------------------------------------------------------------------------------------------------------
class ExegeteCatalogue(object):
"""
classdocs
"""
# ----------------------------------------------------------------------------------------------------------------
@staticmethod
def model_names():
return []
@staticmethod
def load(_name, _host):
raise NotImplementedError
@staticmethod
def standard(_name):
raise NotImplementedError
| south-coast-science/scs_core | src/scs_core/exegesis/particulate/exegete_catalogue.py | Python | mit | 696 |
#!/usr/bin/env python2.7
# Copyright (c) 2012 by David Schulz ([email protected])
# All rights reserved.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import shutil
import os
import time
import rrdtool
import time
import io
from rrdtool import update as rrd_update
import subprocess
from subprocess import call
import rrdtool
ret = rrdtool.create(
'/var/rrd/dylos.rrd', '--step', '60', '--start', '0',
'DS:small:GAUGE:180:0:50000',
'DS:large:GAUGE:180:0:50000',
'RRA:AVERAGE:0.5:1:1440',
'RRA:AVERAGE:0.5:60:720',
'RRA:MIN:0.5:1:1440',
'RRA:MAX:0.5:1:1440',
'RRA:LAST:0.5:1:1440',
'RRA:MIN:0.5:60:720',
'RRA:MAX:0.5:60:720',
'RRA:LAST:0.5:60:720'
)
| overrider/pollution-aqi | raspberrypi/rrd_create_dylos.py | Python | mit | 1,438 |
# -*- coding: utf8 -*-
import time
from datetime import datetime
import feedparser
import requests
import sqlalchemy.orm.exc
from sqlalchemy.orm.exc import NoResultFound
import zeeguu_core
from zeeguu_core.constants import JSON_TIME_FORMAT, SIMPLE_TIME_FORMAT
from zeeguu_core.model.language import Language
from zeeguu_core.model.url import Url
db = zeeguu_core.db
class RSSFeed(db.Model):
__table_args__ = {'mysql_collate': 'utf8_bin'}
__tablename__ = 'rss_feed'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(2083))
description = db.Column(db.String(2083))
language_id = db.Column(db.Integer, db.ForeignKey(Language.id))
language = db.relationship(Language)
url_id = db.Column(db.Integer, db.ForeignKey(Url.id))
url = db.relationship(Url, foreign_keys=url_id)
image_url_id = db.Column(db.Integer, db.ForeignKey(Url.id))
image_url = db.relationship(Url, foreign_keys=image_url_id)
icon_name = db.Column(db.String(2083))
last_crawled_time = db.Column(db.DateTime)
def __init__(self, url, title, description, image_url=None, icon_name=None, language=None):
self.url = url
self.image_url = image_url
self.icon_name = icon_name
self.title = title
self.language = language
self.description = description
self.last_crawled_time = datetime(2001, 1, 2)
def __str__(self):
language = "unknown"
if self.language:
language = self.language.code
return f'{self.title, language}'
def __repr__(self):
return str(self)
@classmethod
def from_url(cls, url: str):
data = feedparser.parse(url)
try:
title = data.feed.title
except:
title = ""
try:
description = data.feed.subtitle
except:
description = None
try:
image_url_string = data.feed.image.href
print(f'Found image url at: {image_url_string}')
except:
print('Could not find any image url.')
feed_url = Url(url, title)
return RSSFeed(feed_url, title, description)
def as_dictionary(self):
language = "unknown_lang"
if self.language:
language = self.language.code
return dict(
id=self.id,
title=self.title,
url=self.url.as_string(),
description=self.description,
language=language,
image_url='',
icon_name=self.icon_name
)
def feed_items(self, last_retrieval_time_from_DB = None):
"""
:return: a dictionary with info about that feed
extracted by feedparser
and including: title, url, content, summary, time
"""
if not last_retrieval_time_from_DB:
last_retrieval_time_from_DB = datetime(1980,1,1)
def publishing_date(item):
# this used to be updated_parsed but cf the deprecation
# warning we changed to published_parsed instead.
try:
return item.published_parsed
except:
# March 8 -- added back in updated_parsed;
# curious if this fixes the problem in some
# cases; to find out, we log
zeeguu_core.log(f'trying updated_parsed where published_parsed failed for {item.get("link", "")} in the context of {self.url.as_string()}')
result = item.updated_parsed
return result
response = requests.get(self.url.as_string())
feed_data = feedparser.parse(response.text)
skipped_due_to_time = 0
feed_items = []
skipped_items = []
zeeguu_core.log(f"** Articles in feed: {len(feed_data.entries)}")
for item in feed_data.entries:
try:
published_string = time.strftime(SIMPLE_TIME_FORMAT, publishing_date(item))
this_entry_time = datetime.strptime(published_string, SIMPLE_TIME_FORMAT)
this_entry_time = this_entry_time.replace(tzinfo=None)
new_item_data_dict = dict(
title=item.get("title", ""),
url=item.get("link", ""),
content=item.get("content", ""),
summary=item.get("summary", ""),
published=published_string,
published_datetime=this_entry_time
)
if this_entry_time > last_retrieval_time_from_DB:
feed_items.append(new_item_data_dict)
else:
skipped_due_to_time+=1
skipped_items.append(new_item_data_dict)
except AttributeError as e:
zeeguu_core.log(f'Exception {e} while trying to retrieve {item.get("link", "")}')
sorted_skipped_items = sorted(skipped_items, key= lambda x:x['published_datetime'])
for each in sorted_skipped_items:
zeeguu_core.debug(f"- skipped: {each['published_datetime']} - {each['title']}")
for each in feed_items:
zeeguu_core.debug(f"- to download: {each['published_datetime']} - {each['title']}")
zeeguu_core.log(f'*** Skipped due to time: {len(skipped_items)} ')
zeeguu_core.log(f"*** To download: {len(feed_items)}")
return feed_items
@classmethod
def exists(cls, rss_feed):
try:
cls.query.filter(
cls.url == rss_feed.url
).one()
return True
except NoResultFound:
return False
@classmethod
def find_by_id(cls, i):
try:
result = cls.query.filter(cls.id == i).one()
return result
except Exception as e:
from sentry_sdk import capture_exception
capture_exception(e)
return None
@classmethod
def find_by_url(cls, url):
try:
result = cls.query.filter(cls.url == url).one()
return result
except sqlalchemy.orm.exc.NoResultFound:
return None
@classmethod
def find_or_create(cls, session, url, title, description, icon_name, language: Language):
try:
result = (cls.query.filter(cls.url == url)
.filter(cls.title == title)
.filter(cls.language == language)
.filter(cls.description == description)
.one())
return result
except sqlalchemy.orm.exc.NoResultFound:
new = cls(url, title, description, icon_name=icon_name, language=language)
session.add(new)
session.commit()
return new
# although it seems to not be used by anybody,
# this method is being used from the zeeguu-api
@classmethod
def find_for_language_id(cls, language_code):
language = Language.find(language_code)
return cls.query.filter(cls.language == language).all()
def get_articles(self, limit=None, after_date=None, most_recent_first=False, easiest_first=False):
"""
Articles for this feed from the article DB
:param limit:
:param after_date:
:param most_recent_first:
:param easiest_first:
:return:
"""
from zeeguu_core.model import Article
if not after_date:
after_date = datetime(2001, 1, 1)
try:
q = (Article.query.
filter(Article.rss_feed == self).
filter(Article.broken == 0).
filter(Article.published_time >= after_date).
filter(Article.word_count > Article.MINIMUM_WORD_COUNT))
if most_recent_first:
q = q.order_by(Article.published_time.desc())
if easiest_first:
q = q.order_by(Article.fk_difficulty)
return q.limit(limit).all()
except Exception as e:
raise (e)
return None
| mircealungu/Zeeguu-Core | zeeguu_core/model/feed.py | Python | mit | 8,067 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import ConnectedKubernetesClientConfiguration
from .operations import ConnectedClusterOperations
from .operations import Operations
from . import models
class ConnectedKubernetesClient(object):
"""Azure Connected Cluster Resource Provider API for adopting any Kubernetes Cluster.
:ivar connected_cluster: ConnectedClusterOperations operations
:vartype connected_cluster: connected_kubernetes_client.operations.ConnectedClusterOperations
:ivar operations: Operations operations
:vartype operations: connected_kubernetes_client.operations.Operations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = ConnectedKubernetesClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.connected_cluster = ConnectedClusterOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ConnectedKubernetesClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/hybridkubernetes/azure-mgmt-hybridkubernetes/azure/mgmt/hybridkubernetes/_connected_kubernetes_client.py | Python | mit | 4,207 |
#!/usr/bin/env python2
import sys, os, json, traceback, decimal
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../python-bitcoinrpc"))
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from rotating_consensus import RotatingConsensus
from threading import Lock, current_thread
from time import sleep
from constants import FedpegConstants
from httplib import CannotSendRequest, BadStatusLine
import socket
settings = FedpegConstants()
port = 14242
sidechain = [AuthServiceProxy(settings.sidechain_url), AuthServiceProxy(settings.sidechain_url)]
# We need to do a rescan on bitcoin, so we set a huge timeout
bitcoin = [AuthServiceProxy(settings.bitcoin_url, timeout=60*10), AuthServiceProxy(settings.bitcoin_url)]
spent_from_history = {}
open('spent_from.log', 'a').close() # Touch file (create if not already present)
with open('spent_from.log') as f:
for line in f.readlines():
l = eval(line)
if l[0] not in spent_from_history:
spent_from_history[l[0]] = set()
spent_from_history[l[0]].add(l[1])
spent_from_log = os.open("spent_from.log", os.O_CREAT | os.O_WRONLY | os.O_SYNC | os.O_DSYNC | os.O_APPEND)
def check_reset_connections():
global sidechain, bitcoin
connections_good = True
try:
sidechain[thread_id()].getblockcount()
except CannotSendRequest as e:
sidechain[thread_id()] = AuthServiceProxy(settings.sidechain_url)
connections_good = False
except BadStatusLine as e:
sidechain[thread_id()] = AuthServiceProxy(settings.sidechain_url)
connections_good = False
except socket.timeout as e:
sidechain[thread_id()] = AuthServiceProxy(settings.sidechain_url)
connections_good = False
try:
bitcoin[thread_id()].getblockcount()
except CannotSendRequest as e:
bitcoin[thread_id()] = AuthServiceProxy(settings.bitcoin_url)
connections_good = False
except BadStatusLine as e:
bitcoin[thread_id()] = AuthServiceProxy(settings.bitcoin_url)
connections_good = False
except socket.timeout as e:
bitcoin[thread_id()] = AuthServiceProxy(settings.bitcoin_url)
connections_good = False
return connections_good
# If there are two outputs to the same destination, the first output must fully
# confirm before we allow the second to process.
# This is really for ease of developer headache, though we could change some
# indexes and allow this
map_lock = Lock()
# withdraw metadata map: {"txid_concat": sidechain_txid_concat, "sidechain_height": height,
# "script_gen": p2sh_script_in_asm_for_bitcoin-tx, "script_match": p2sh_script_in_hex,
# "value": value, "spent_from": set({frozenset({(bitcoin_txid, bitcoin_vout), ...}), ...})}
# (spent_from is a set of sets of inputs used in every tx which was signed signed and had output)
# sidechain txid_concat -> withdraw metadata map
outputs_pending = {}
# withdraw_target_p2sh_script_hex -> txid_concat (for withdraw_claims_pending use)
outputs_pending_by_p2sh_hex = {}
# withdraw_target_p2sh_script_hex -> [withdraw metadata map, ...]
outputs_waiting = {}
# utxo metadata map: {"redeem_info": redeem_info_for_bitcoin_signrawtransaction, "privateKey": gen_private_key, "value": Decimal(value),
# "spent_by": set(), "donated_map": {frozenset({(bitcoin_txid, bitcoin_vout), ...}): value} }
# spent_by is a set of sidechain txid_concats which can be used to look up in outputs_pending
# donated_map is a map from input sets to the value taken from donated_funds as a fee
# (bitcoin_txid, bitcoin_vout) -> utxo metadata map
utxos = {}
#set of sets of txos we need to ensure are spent by fraud proofs
fraud_check_map = {}
donated_funds = 0
manual_check_lock = Lock()
manual_check_set = set()
main_thread = current_thread()
def thread_id():
if current_thread() == main_thread:
return 0
return 1
def check_raise(cond):
if not cond:
raise Exception("assertion failed")
def trigger_bitcoin_rescan():
# TODO: Replace with a really random one, instead
cht = os.popen("%s %s -c -p %s -a SALT -n %s" % (settings.contracthashtool_path, settings.cht_testnet_arg, settings.functionary_private_key, os.urandom(16).encode("hex")))
useless_private_key = cht.read().split("\n")[0 + settings.is_testnet][16:]
check_raise(cht.close() == None)
# Trigger a rescan by importing something useless and new
sys.stdout.write("Now triggering a full wallet rescan of the bitcoin chain...")
sys.stdout.flush()
bitcoin[thread_id()].importprivkey(useless_private_key, "", True)
print("done")
def process_bitcoin_tx_for_utxos(tx, is_donation=False, manual_check=False):
global donated_funds
manual_check_lock.acquire()
if not manual_check and tx["txid"] in manual_check_set:
manual_check_set.remove(tx["txid"])
return
elif manual_check:
manual_check_set.add(tx["txid"])
manual_check_lock.release()
# Go through the outputs, adding any coins sent to the raw functionary address to utxos
for nout, outp in enumerate(tx["vout"]):
if outp["scriptPubKey"]["type"] == "scripthash" and outp["scriptPubKey"]["addresses"][0] == settings.redeem_script_address:
txo = tx["vout"][nout]
map_lock.acquire()
print("Got %s UTXO sent to raw functioanry address (change or donation): %s:%d" % ("new" if (tx["txid"], nout) not in utxos else "existing", tx["txid"], nout))
utxos[(tx["txid"], nout)] = {"redeem_info": {"txid": tx["txid"], "vout": nout, "scriptPubKey": outp["scriptPubKey"]["hex"], "redeemScript": settings.redeem_script}, "privateKey": settings.functionary_private_key, "value": decimal.Decimal(outp["value"]), "spent_by": set(), "donated_map": {}}
if is_donation:
print("Got donation of %s, now possibly paying fees" % str(outp["value"]))
donated_funds = donated_funds + outp["value"]
map_lock.release()
def sign_withdraw_tx(tx_hex, txid_concat_list):
global donated_funds
check_reset_connections()
tx_raw = bitcoin[thread_id()].decoderawtransaction(tx_hex)
max_sidechain_height = sidechain[thread_id()].getblockcount() - 6
check_raise(len(tx_raw["vout"]) == len(txid_concat_list) + 1)
check_raise(tx_raw["vout"][-1]["scriptPubKey"]["type"] == "scripthash")
check_raise(tx_raw["vout"][-1]["scriptPubKey"]["addresses"][0] == settings.redeem_script_address)
tx_value = decimal.Decimal(0)
privKeys = []
redeemScripts = []
inputs_set = set()
input_size = 0
for inp in tx_raw["vin"]:
if (inp["txid"], inp["vout"]) not in utxos:
# To-functionary UTXOs are only added after sufficient confirmations,
# so we may need to find them here.
spent_tx = bitcoin[thread_id()].getrawtransaction(inp["txid"], 1)
process_bitcoin_tx_for_utxos(spent_tx, manual_check=True)
check_raise((inp["txid"], inp["vout"]) in utxos)
utxo = utxos[(inp["txid"], inp["vout"])]
redeemScripts.append(utxo["redeem_info"])
privKeys.append(utxo["privateKey"])
tx_value = tx_value + decimal.Decimal(utxo["value"])
inputs_set.add((inp["txid"], inp["vout"]))
input_size = input_size + len(inp["scriptSig"]["hex"])/2
if len(inp["scriptSig"]["hex"])/2 >= 0xfd:
input_size += 2
txid_concat_set = set()
for i, txid_concat in enumerate(txid_concat_list):
check_raise(txid_concat in outputs_pending)
output = outputs_pending[txid_concat]
check_raise(output["sidechain_height"] <= max_sidechain_height)
tx_vout = tx_raw["vout"][i]
check_raise(tx_vout["scriptPubKey"]["hex"] == output["script_match"])
check_raise(decimal.Decimal(tx_vout["value"]) == output["value"])
tx_value = tx_value - decimal.Decimal(tx_vout["value"])
for input_set in output["spent_from"]:
check_raise(not inputs_set.isdisjoint(input_set))
txid_concat_set.add(txid_concat)
# scriptSig is OP_0 x*(1-byte pushlen + 73-byte max-sized signature) + redeemScript push
# if it triggers a long var-int for the scriptlen we have to include that, too
RS_push_size = len(settings.redeem_script) / 2
RS_push_size += 1 if RS_push_size <= 0x4b else (2 if RS_push_size <= 0xff else 3)
scriptSig_size = 1 + 74 * settings.sigs_required + RS_push_size
if scriptSig_size >= 0xfd:
scriptSig_size += 2
fee_allowed = len(tx_hex)/2 - input_size + scriptSig_size * len(tx_raw["vin"])
fee_allowed = min(fee_allowed, donated_funds * 100000000)
fee_paid = tx_value - decimal.Decimal(tx_raw["vout"][-1]["value"])
check_raise(fee_paid * 100000000 <= fee_allowed)
donated_funds = donated_funds - fee_paid
inputs_set = frozenset(inputs_set)
for txid_concat in txid_concat_list:
output = outputs_pending[txid_concat]
if inputs_set not in output["spent_from"]:
output["spent_from"].add(inputs_set)
os.write(spent_from_log, "['%s', %s]\n" % (txid_concat, repr(inputs_set)))
old_paid_memory = -1
for inp in tx_raw["vin"]:
utxo = utxos[(inp["txid"], inp["vout"])]
utxo["spent_by"] = utxo["spent_by"] | txid_concat_set
old_paid = 0
if inputs_set in utxo["donated_map"]:
old_paid = utxo["donated_map"][inputs_set]
if old_paid_memory == -1:
old_paid_memory = old_paid
elif old_paid != old_paid_memory:
print("Internal data structure inconsistency!")
sys.exit(1)
utxo["donated_map"][inputs_set] = fee_paid + old_paid
return bitcoin[thread_id()].signrawtransaction(tx_hex, redeemScripts, privKeys)["hex"]
class WatchPeerController(RotatingConsensus):
round_local_tx_hex = ""
def gen_master_msg(self):
if not check_reset_connections():
return None
map_lock.acquire()
try:
max_sidechain_height = sidechain[thread_id()].getblockcount() - 8
txid_concat_list_untried = []
txid_concat_list_retries = []
command_untried = '%s %s -create' % (settings.bitcoin_tx_path, settings.btc_testnet_arg)
command_retries = command_untried
input_sets_retries = set()
input_pairs_retries = set()
for txid_concat in outputs_pending:
output = outputs_pending[txid_concat]
if output["sidechain_height"] > max_sidechain_height:
continue
if len(output["spent_from"]) == 0:
command_untried = command_untried + ' outscript=%.16g:"%s"' % (output["value"], output["script_gen"])
txid_concat_list_untried.append(txid_concat)
elif len(txid_concat_list_untried) == 0:
all_still_spendable = True
for input_set in output["spent_from"]:
for input_pair in input_set:
if bitcoin[thread_id()].gettxout(input_pair[0], input_pair[1], True) == None:
all_still_spendable = False
break
if not all_still_spendable:
break
if all_still_spendable:
command_retries = command_retries + ' outscript=%.16g:"%s"' % (output["value"], output["script_gen"])
txid_concat_list_retries.append(txid_concat)
input_sets_retries = input_sets_retries | output["spent_from"]
for input_set in output["spent_from"]:
input_pairs_retries = input_pairs_retries | input_set
if len(txid_concat_list_untried) != 0:
txid_concat_list = txid_concat_list_untried
command = command_untried
elif len(txid_concat_list_retries) != 0:
inputs_required = []
while len(input_sets_retries) != 0:
e = max(input_pairs_retries, key=lambda x: len([i for i in input_sets_retries if x in i]))
inputs_required.append(e)
input_sets_retries = set([x for x in input_sets_retries if e not in x])
for input_pair in inputs_required:
command_retries = command_retries + ' in="%s":%d' % (input_pair[0], input_pair[1])
txid_concat_list = txid_concat_list_retries
command = command_retries
else:
return None
cht = os.popen(command)
tx_hex = cht.read().split("\n")[0]
check_raise(cht.close() == None)
funded_tx = bitcoin[thread_id()].fundrawtransaction(tx_hex, True)
tx_raw = bitcoin[thread_id()].decoderawtransaction(funded_tx["hex"])
change_value = decimal.Decimal(funded_tx["fee"]) + decimal.Decimal(tx_raw["vout"][funded_tx["changepos"]]["value"])
cht = os.popen('%s %s %s delout=%d outaddr=%s:%s' % (settings.bitcoin_tx_path, settings.btc_testnet_arg, funded_tx["hex"], funded_tx["changepos"], "0", settings.redeem_script_address))
tx_hex = cht.read().split("\n")[0]
check_raise(cht.close() == None)
redeem_script_push_size = len(settings.redeem_script)/2
if redeem_script_push_size <= 0x4b:
redeem_script_push_size += 1
elif redeem_script_push_size <= 0xff:
redeem_script_push_size += 2
else:
redeem_script_push_size += 3
input_size = 1 + 74 * settings.sigs_required + redeem_script_push_size
if input_size >= 0xfd:
input_size += 2
pay_fee = decimal.Decimal(len(tx_hex)/2 + input_size * len(tx_raw["vin"])) / decimal.Decimal(100000000)
pay_fee = min(pay_fee, funded_tx["fee"])
if pay_fee > donated_funds:
pay_fee = 0
print("Paying fee of %s" % str(pay_fee))
change_value = change_value - pay_fee
cht = os.popen('%s %s %s delout=%d outaddr=%s:%s' % (settings.bitcoin_tx_path, settings.btc_testnet_arg, tx_hex, len(tx_raw["vout"]) - 1, change_value, settings.redeem_script_address))
tx_hex = cht.read().split("\n")[0]
check_raise(cht.close() == None)
self.round_local_tx_hex = sign_withdraw_tx(tx_hex, txid_concat_list)
return json.dumps([self.round_local_tx_hex, txid_concat_list])
finally:
map_lock.release()
def recv_master_msg(self, msg):
msg_decoded = json.loads(msg)
map_lock.acquire()
try:
self.round_local_tx_hex = sign_withdraw_tx(msg_decoded[0], msg_decoded[1])
return self.round_local_tx_hex
finally:
map_lock.release()
def round_done(self, peer_messages):
txn_concat = self.round_local_tx_hex
check_raise(txn_concat != "")
input_list = []
for inp in bitcoin[thread_id()].decoderawtransaction(txn_concat)["vin"]:
input_list.append((inp["txid"], inp["vout"]))
for msg in peer_messages:
try:
for i, inp in enumerate(bitcoin[thread_id()].decoderawtransaction(msg[1])["vin"]):
check_raise(input_list[i] == (inp["txid"], inp["vout"]))
txn_concat = txn_concat + msg[1]
except:
print("Peer %s sent invalid transaction" % msg[0])
res = bitcoin[thread_id()].signrawtransaction(txn_concat)
print("Final round result:")
print(res)
if res["complete"]:
bitcoin[thread_id()].sendrawtransaction(res["hex"])
return
def round_failed(self):
self.round_local_tx_hex = ""
return
def process_sidechain_tx_for_utxos(tx, height, avoid_rescans):
for vout, output in enumerate(tx["vout"]):
if output["scriptPubKey"]["type"] == "withdrawout":
check_reset_connections()
outp = output["scriptPubKey"]["asm"].split(" ")
check_raise(len(outp) == 16)
bitcoin_tx = outp[2]
bitcoin_raw_tx = bitcoin[thread_id()].getrawtransaction(bitcoin_tx, 1)
txo = bitcoin_raw_tx["vout"][int(outp[3])]
inp = tx["vin"][vout]["scriptSig"]["asm"].split(" ")
contract = inp[2]
cht = os.popen("%s %s -g -r %s -f %s" % (settings.contracthashtool_path, settings.cht_testnet_arg, settings.redeem_script, contract))
cht_out = cht.read()
check_raise(cht.close() == None)
modified_redeem_script = cht_out.split("\n")[2 + settings.is_testnet][24:]
modified_address = cht_out.split("\n")[3 + settings.is_testnet][40:]
bitcoin[thread_id()].importaddress(modified_redeem_script, "", not avoid_rescans, True)
cht = os.popen("%s %s -c -p %s -f %s" % (settings.contracthashtool_path, settings.cht_testnet_arg, settings.functionary_private_key, contract))
gen_private_key = cht.read().split("\n")[0 + settings.is_testnet][16:]
check_raise(cht.close() == None)
outp[3] = int(outp[3])
map_lock.acquire()
already_had = (bitcoin_tx, outp[3]) in utxos
utxos[(bitcoin_tx, outp[3])] = {"redeem_info": {"txid": bitcoin_tx, "vout": outp[3], "scriptPubKey": txo["scriptPubKey"]["hex"], "redeemScript": modified_redeem_script}, "privateKey": gen_private_key, "value": decimal.Decimal(txo["value"]), "spent_by": set(), "donated_map": {}}
if already_had:
if height not in fraud_check_map:
fraud_check_map[height] = []
fraud_check_map[height].append((tx["txid"], vout))
map_lock.release()
print("Got %s UTXO (%s:%d) from sidechain tx %s:%d" % ("new" if not already_had else "existing", bitcoin_tx, outp[3], tx["txid"], vout))
def process_sidechain_tx_for_withdraw(tx, height):
for vout, output in enumerate(tx["vout"]):
if output["scriptPubKey"]["type"] == "withdraw":
outp = output["scriptPubKey"]["asm"].split(" ")
if len(outp) == 5 and outp[2] == settings.inverse_bitcoin_genesis_hash and outp[3] == settings.secondScriptPubKeyHash:
check_raise(outp[1] == "OP_DROP" and outp[4] == "OP_WITHDRAWPROOFVERIFY")
if outp[0][0:8] != "50325348":
continue
contract = outp[0][8:]
check_raise(len(contract) == 40)
p2sh_script = "OP_HASH160 0x14%s OP_EQUAL" % outp[0][8:]
p2sh_hex = "a914%s87" % outp[0][8:]
txid_concat = tx["txid"] + ":" + str(vout)
value = decimal.Decimal(output["value"])
if txid_concat in spent_from_history:
output = {"txid_concat": txid_concat, "sidechain_height": height, "script_gen": p2sh_script, "script_match": p2sh_hex, "value": value, "spent_from": spent_from_history[txid_concat]}
else:
output = {"txid_concat": txid_concat, "sidechain_height": height, "script_gen": p2sh_script, "script_match": p2sh_hex, "value": value, "spent_from": set()}
# We track the set of inputs (from the utxos map) from which we've sent the withdraw,
# freely signing double-spends, but never allowing two non-conflicting withdraws
map_lock.acquire()
if txid_concat in outputs_pending:
print("Re-ran process_sidechain_tx_for_withdraw with existing withdraw: %s???" % txid_concat)
sys.exit(1)
if p2sh_hex in outputs_pending_by_p2sh_hex:
if p2sh_hex in outputs_waiting:
outputs_waiting[p2sh_hex].append(output)
else:
outputs_waiting[p2sh_hex] = [output]
print("Got new txo for withdraw (waiting on previous tx %s): %s" % (txid_concat, outputs_pending_by_p2sh_hex[p2sh_hex]))
map_lock.release()
continue
outputs_pending[txid_concat] = output
outputs_pending_by_p2sh_hex[p2sh_hex] = txid_concat
print("Got new txo for withdraw: %s (to %s with value %s)" % (txid_concat, p2sh_hex, str(value)))
map_lock.release()
def process_sidechain_blockchain(min_height, max_height, avoid_rescan):
for height in range(min_height, max_height):
block = sidechain[thread_id()].getblock(sidechain[thread_id()].getblockhash(height))
for tx in sidechain[thread_id()].batch_([["getrawtransaction", txhash, 1] for txhash in block["tx"]]):
process_sidechain_tx_for_utxos(tx, height, avoid_rescan)
process_sidechain_tx_for_withdraw(tx, height)
def process_confirmed_sidechain_blockchain(min_height, max_height):
global donated_funds
for height in range(min_height, max_height):
map_lock.acquire()
fraud_check_list = None
if height in fraud_check_map:
fraud_check_list = fraud_check_map[height]
del fraud_check_map[height]
map_lock.release()
if fraud_check_list != None:
for txo in fraud_check_list:
if sidechain[thread_id()].gettxout(txo[0], txo[1], False) != None:
print("NO FRAUD PROOF GENERATED WITHIN CONFIRMATION PERIOD FOR TXO %s" % str(txo))
sys.exit(1)
block = sidechain[thread_id()].getblock(sidechain[thread_id()].getblockhash(height))
for tx in sidechain[thread_id()].batch_([["getrawtransaction", txhash, 1] for txhash in block["tx"]]):
for outp in tx["vout"]:
if outp["scriptPubKey"]["type"] == "nulldata":
map_lock.acquire()
donated_funds += outp["value"]
map_lock.release()
def process_confirmed_bitcoin_blockchain(min_height, max_height):
global donated_funds
for height in range(min_height, max_height):
block = bitcoin[thread_id()].getblock(bitcoin[thread_id()].getblockhash(height))
for tx in bitcoin[thread_id()].batch_([["getrawtransaction", txhash, 1] for txhash in block["tx"]]):
map_lock.acquire()
is_withdraw = False
is_not_withdraw = False
tx_value = 0
# First process the inputs, checking if its a withdraw transaction (ie spends from utxos)
# then remove that utxo, including from ensure-double-spend-sets in outputs_pending
for inp in tx["vin"]:
if "coinbase" in inp:
continue
txid_pair = (inp["txid"], inp["vout"])
if txid_pair not in utxos:
if is_withdraw:
print("Got transaction that spent both functionary utxos and non-functionary utxos...very confused")
sys.exit(1)
is_not_withdraw = True
else:
if is_not_withdraw:
print("Got transaction that spent both functionary utxos and non-functionary utxos...very confused")
sys.exit(1)
is_withdraw = True
utxo = utxos[txid_pair]
for txid_concat in utxo["spent_by"]:
if txid_concat in outputs_pending:
new_spent_from = set()
output = outputs_pending[txid_concat]
for inputs_set in output["spent_from"]:
if txid_pair not in inputs_set:
new_spent_from.add(inputs_set)
output["spent_from"] = new_spent_from
# Calculate donated_funds by re-adding all temporary removals that this invalidated
total_donated_value = 0
for txid_set in utxo["donated_map"]:
donated_value = utxo["donated_map"][txid_set]
for txid_pair_it in txid_set:
if txid_pair_it == txid_pair:
continue
if utxos[txid_pair_it]["donated_map"][txid_set] != donated_value:
print("Internal data structure inconsistency")
sys.exit(1)
del utxos[txid_pair_it]["donated_map"][txid_set]
total_donated_value = total_donated_value + donated_value
donated_funds = donated_funds + total_donated_value
tx_value = tx_value + utxo["value"]
del utxos[txid_pair]
# Then go through outputs, removing them from outputs_pending and warning if
# we dont know where the money went
if is_withdraw:
for outp in tx["vout"]:
script_asm = outp["scriptPubKey"]["hex"]
if script_asm in outputs_pending_by_p2sh_hex:
sys.stdout.write("Successfully completed withdraw for sidechain tx %s in bitcoin tx %s:%d" % (outputs_pending_by_p2sh_hex[script_asm], tx["txid"], outp["n"]))
del outputs_pending[outputs_pending_by_p2sh_hex[script_asm]]
del outputs_pending_by_p2sh_hex[script_asm]
if script_asm in outputs_waiting:
output = outputs_waiting[script_asm].pop(0)
outputs_pending[output["txid_concat"]] = output
outputs_pending_by_p2sh_hex[script_asm] = output["txid_concat"]
if len(outputs_waiting[script_asm]) == 0:
del outputs_waiting[script_asm]
sys.stdout.write("...next output to same address is %s" % output["txid_concat"])
sys.stdout.write("\n")
sys.stdout.flush()
elif outp["scriptPubKey"]["type"] != "scripthash" or outp["scriptPubKey"]["addresses"][0] != settings.redeem_script_address:
print("MONEY MOVED FROM FUNCTIONARY OUTPUT TO UNKNOWN DESTINATION!!!!")
print("In transaction %s in output %d" % (tx["txid"], outp["n"]))
sys.exit(1)
tx_value = tx_value - outp["value"]
# Remove fee from donated_funds
if tx_value > 0:
donated_funds = donated_funds - tx_value
map_lock.release()
# Finally, without map_lock held (we'll grab it again if needed in process_bitcoin_tx_for_utxos),
# we add any outputs which are to the functionary address to the utxos set.
process_bitcoin_tx_for_utxos(tx, not is_withdraw)
try:
print("Doing chain-scan init...")
print("Step 1. Sidechain blockchain scan for coins in and withdraws...")
# First do a pass over all existing blocks to collect all utxos
sidechain_block_count = sidechain[thread_id()].getblockcount()
process_sidechain_blockchain(1, sidechain_block_count, True)
process_confirmed_sidechain_blockchain(1, sidechain_block_count - 5)
print("done")
print("Step 2. Bitcoin blockchain scan for withdraws completed and coins to functionaries...")
check_reset_connections()
bitcoin_block_count = bitcoin[thread_id()].getblockcount()
process_confirmed_bitcoin_blockchain(447000, bitcoin_block_count - 5)
print("done")
sys.stdout.write("Step 3. Bitcoin blockchain rescan to load functionary outputs in wallet...")
sys.stdout.flush()
bitcoin[thread_id()].importaddress(settings.redeem_script, "", False, True)
trigger_bitcoin_rescan()
print("done")
print("Init done. Joining rotating consensus and watching chain for withdraws...")
#TODO: Change interval to ~60
settings.nodes.remove(settings.my_node)
WatchPeerController(settings.nodes, settings.my_node, port, 10, settings.socks_proxy)
print("Outputs to be created:")
for txid_concat in outputs_pending:
sys.stdout.write(" " + txid_concat)
print("\nOutputs waiting:")
for p2sh_hex in outputs_waiting:
for output in outputs_waiting[p2sh_hex]:
sys.stdout.write(" " + output["txid_concat"])
print("")
while True:
if not check_reset_connections():
sleep(1)
continue
new_block_count = sidechain[thread_id()].getblockcount()
process_sidechain_blockchain(sidechain_block_count, new_block_count, False)
process_confirmed_sidechain_blockchain(sidechain_block_count - 5, new_block_count - 5)
sidechain_block_count = new_block_count
if not check_reset_connections():
sleep(1)
continue
new_block_count = bitcoin[thread_id()].getblockcount()
process_confirmed_bitcoin_blockchain(bitcoin_block_count - 5, new_block_count - 5)
bitcoin_block_count = new_block_count
sleep(1)
except JSONRPCException as e:
print(e.error)
print(traceback.format_exc())
| droark/elements | contrib/fedpeg/withdrawwatch.py | Python | mit | 25,372 |
images_above = widget_inputs["check1"]
flipping = widget_inputs["check2"]
async = widget_inputs["check3"]
images_below = widget_inputs["check4"]
is_correct = False
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if images_above:
commentizer("Check the images above-the-fold. They should have already been downloaded and visible.")
is_correct = is_correct and False
else:
is_correct = True
if not flipping:
commentizer("While the app has some downtime, why not start precomputing FLIP animations?")
is_correct = is_correct and False
else:
is_correct = is_correct and True
if not async:
commentizer("You could try starting below-the-fold requests now so that they'll be ready by the time users reach them.")
is_correct = is_correct and False
else:
is_correct = is_correct and True
if not images_below:
commentizer("Like any below-the-fold content, images are great to request after the initial page load.")
is_correct = is_correct and False
else:
is_correct = is_correct and True
if is_correct:
commentizer("Great job! You're thinking [#perfmatters](https://twitter.com/hashtag/perfmatters)!")
grade_result["correct"] = is_correct
grade_result["comment"] = "\n\n".join(comments) | udacity/60fps | grading-scripts/2-rails2.py | Python | mit | 1,281 |
import os
from unittest import TestCase
from mock import MagicMock, patch, call
from slap import cli
mock_arcpy = MagicMock()
module_patcher = patch.dict('sys.modules', {'arcpy': mock_arcpy})
module_patcher.start()
class TestInitCli(TestCase):
def test_default_args(self):
with patch('slap.cli.config_builder.create_config') as mock:
cli.main(['init'])
mock.assert_called_once_with(
directories=[os.getcwd()],
filename='config.json',
hostname='hostname',
register_data_sources=False
)
def test_inputs(self):
with patch('slap.cli.config_builder.create_config') as mock:
cli.main(['init', 'foo', 'bar', 'baz'])
mock.assert_called_once_with(
directories=['foo', 'bar', 'baz'],
filename='config.json',
hostname='hostname',
register_data_sources=False
)
class TestPublishCli(TestCase):
required_args = ['publish', '-u', 'user', '-p', 'pass']
def test_throws_if_no_username(self):
with self.assertRaises(SystemExit):
cli.main(['publish', '-c', 'config.json', '-p', 'pass'])
def test_throws_if_no_password(self):
with self.assertRaises(SystemExit):
cli.main(['publish', '-u', 'user', '-c', 'config.json'])
def test_uses_git_if_both_git_and_inputs_specified(self):
expected = 'bar'
with patch('slap.publisher.ConfigParser.load_config'):
with patch('slap.publisher.Publisher.publish_input') as mock_publish:
with patch('slap.git.get_changed_mxds') as mock_changed:
mock_changed.return_value = [expected]
cli.main(['publish', '-u', 'user', '-p', 'pass', '-g', 'some-hash', 'foo'])
mock_changed.assert_called_once_with('some-hash')
mock_publish.assert_called_once_with(expected)
def test_uses_default_config(self):
with patch('slap.cli.Publisher') as mock_publisher:
with patch('slap.publisher.ConfigParser.load_config'):
cli.main(['publish', '-u', 'user', '-p', 'pass'])
mock_publisher.assert_called_once_with('user', 'pass', 'config.json', None)
def test_set_hostname(self):
with patch('slap.cli.Publisher') as mock_publisher:
with patch('slap.publisher.ConfigParser.load_config'):
cli.main(self.required_args + ['-n', 'host'])
mock_publisher.assert_called_once_with('user', 'pass', 'config.json', 'host')
def test_register_data_sources(self):
with patch('slap.publisher.Publisher.register_data_sources') as mock_register:
with patch('slap.publisher.ConfigParser.load_config'):
cli.main(self.required_args)
mock_register.assert_called_once()
def test_publish_all(self):
with patch('slap.publisher.Publisher.publish_all') as mock_publish:
with patch('slap.publisher.ConfigParser.load_config'):
cli.main(self.required_args)
mock_publish.assert_called_once()
def test_create_site(self):
with patch('slap.api.Api.create_site') as mock_create_site:
with patch('slap.publisher.ConfigParser.load_config'):
cli.main(self.required_args + ['-s'])
mock_create_site.assert_called_once()
def test_publish_inputs(self):
with patch('slap.publisher.Publisher.publish_input') as mock_publish:
with patch('slap.publisher.ConfigParser.load_config'):
input_files = ['foo', 'bar', 'baz']
cli.main(self.required_args + input_files)
calls = [call('foo'), call('bar'), call('baz')]
mock_publish.assert_has_calls(calls)
def test_publish_git(self):
with patch('slap.cli.Publisher.publish_input') as mock_publisher:
with patch('slap.publisher.ConfigParser.load_config'):
with patch('slap.git.get_changed_mxds') as mock_git:
sha = 'some-hash'
file = 'some/file'
mock_git.return_value = [file]
cli.main(self.required_args + ['-g', sha])
mock_git.assert_called_once_with(sha)
mock_publisher.assert_called_once_with(file)
| gisinc/slap | tests/test_cli.py | Python | mit | 4,431 |
#!/usr/bin/env python3
# Copyright (c) 2016 The nealcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from test_framework.mininode import *
from test_framework.test_framework import nealcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
# Note: we can reduce code by using SingleNodeConnCB (in master, not 0.12)
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.sleep_time = 0.05
self.getdataset = set()
self.last_reject = None
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_pong(self, conn, message):
self.last_pong = message
def on_reject(self, conn, message):
self.last_reject = message
#print (message)
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
return
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_getdata != None
self.sync(test_function, timeout)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
self.sync(test_function, timeout)
def wait_for_inv(self, expected_inv, timeout=60):
test_function = lambda: self.last_inv != expected_inv
self.sync(test_function, timeout)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_getdata = None
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
return
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_getdata = None
self.last_getheaders = None
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
return
def announce_block(self, block, use_header):
with mininode_lock:
self.last_getdata = None
if use_header:
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_block = None
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_reject.reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(nealcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1"]))
# Start a node for testing IsStandard rules.
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-whitelist=127.0.0.1", "-acceptnonstdtxn=0"]))
connect_nodes(self.nodes[0], 1)
# Disable segwit's bip9 parameter to simulate upgrading after activation.
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-bip9params=segwit:0:0"]))
connect_nodes(self.nodes[0], 2)
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=4):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
print("\tVerifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
print("\tTesting non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
print("\tTesting behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, nealcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_reject.reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
print("\tTesting witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
print("\tTesting witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
print("\tTesting witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let nealcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
print("\tTesting extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
print("\tTesting maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
print("\tTesting maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
print("\tTesting witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_getdata.inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
print("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
print("\tTesting relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv(CInv(1, tx2.sha256)) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv(CInv(1, tx3.sha256))
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
print("\tTesting block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
block2 = self.build_next_block(nVersion=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_getdata.inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(nVersion=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
self.old_node.announce_block(block4, use_header=False)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
print("\tTesting standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
print("\tTesting standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_reject.reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
print("\tTesting premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
print("\tTesting segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
print("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
print("\tTesting P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older nealcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-nealcoind> to
# the test.
def test_upgrade_after_activation(self, node, node_id):
print("\tTesting software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active nealcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
stop_node(node, node_id)
self.nodes[node_id] = start_node(node_id, self.options.tmpdir, ["-debug"])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(node, 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = node.getblockcount()
while height >= 0:
block_hash = node.getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), node.getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
print("\tTesting sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
print("\tTesting getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we should not be signalling
# for segwit activation, nor should we get a witness commitment.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# TODO: this duplicates some code from blocktools.py, would be nice
# to refactor.
# Check that default_witness_commitment is present.
block = CBlock()
witness_root = block.get_merkle_root([ser_uint256(0), ser_uint256(txid)])
check_commitment = uint256_from_str(hash256(ser_uint256(witness_root)+ser_uint256(0)))
from test_framework.blocktools import WITNESS_COMMITMENT_HEADER
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(check_commitment)
script = CScript([OP_RETURN, output_data])
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
print("\tTesting uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
print("\tTesting detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
print("\nStarting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
print("\nTesting behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
print("\nTesting behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(self.nodes[2], 2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
| appop/bitcoin | qa/rpc-tests/p2p-segwit.py | Python | mit | 92,763 |
import os, json, shutil
import subprocess as sp, sys, shutil
import hashlib
import bibtexparser
import six
from six.moves import input as raw_input
from papers import logger
# GIT = False
DRYRUN = False
# config directory location
HOME = os.environ.get('HOME',os.path.expanduser('~'))
CONFIG_HOME = os.environ.get('XDG_CONFIG_HOME', os.path.join(HOME, '.config'))
CACHE_HOME = os.environ.get('XDG_CACHE_HOME', os.path.join(HOME, '.cache'))
DATA_HOME = os.environ.get('XDG_DATA_HOME', os.path.join(HOME, '.local','share'))
CONFIG_FILE = os.path.join(CONFIG_HOME, 'papersconfig.json')
DATA_DIR = os.path.join(DATA_HOME, 'papers')
CACHE_DIR = os.path.join(CACHE_HOME, 'papers')
# utils
# -----
class bcolors:
# https://stackoverflow.com/a/287944/2192272
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def check_filesdir(folder):
folder_size = 0
file_count = 0
for (path, dirs, files) in os.walk(folder):
for file in files:
filename = os.path.join(path, file)
if filename.endswith('.pdf'):
folder_size += os.path.getsize(filename)
file_count += 1
return file_count, folder_size
class Config(object):
"""configuration class to specify system-wide collections and files-dir
"""
def __init__(self, file=CONFIG_FILE, data=DATA_DIR, cache=CACHE_DIR,
bibtex=None, filesdir=None, gitdir=None, git=False):
self.file = file
self.data = data
self.cache = cache
self.filesdir = filesdir or os.path.join(data, 'files')
self.bibtex = bibtex or os.path.join(data, 'papers.bib')
self.gitdir = gitdir or data
self.git = git
def collections(self):
files = []
for root, dirs, files in os.walk(os.path.dirname(self.bibtex)):
break
# return sorted(f[:-4] for f in files if f.endswith('.bib'))
return sorted(f for f in files if f.endswith('.bib'))
def save(self):
json.dump({
"filesdir":self.filesdir,
"bibtex":self.bibtex,
"git":self.git,
"gitdir":self.gitdir,
}, open(self.file, 'w'), sort_keys=True, indent=2, separators=(',', ': '))
def load(self):
js = json.load(open(self.file))
self.bibtex = js.get('bibtex', self.bibtex)
self.filesdir = js.get('filesdir', self.filesdir)
self.git = js.get('git', self.git)
self.gitdir = js.get('gitdir', self.gitdir)
def reset(self):
cfg = type(self)()
self.bibtex = cfg.bibtex
self.filesdir = cfg.filesdir
def check_install(self):
if not os.path.exists(self.cache):
logger.info('make cache directory for DOI requests: '+self.cache)
os.makedirs(self.cache)
# make a git commit?
@property
def _gitdir(self):
return os.path.join(self.gitdir, '.git')
def gitinit(self, branch=None):
if not os.path.exists(self._gitdir):
# with open(os.devnull, 'w') as shutup:
sp.check_call(['git','init'], cwd=self.gitdir)
else:
raise ValueError('git is already initialized in '+self.gitdir)
def gitcommit(self, branch=None, message=None):
if os.path.exists(self._gitdir):
target = os.path.join(self.gitdir, os.path.basename(self.bibtex))
if not os.path.samefile(self.bibtex, target):
shutil.copy(self.bibtex, target)
message = message or 'save '+self.bibtex+' after command:\n\n papers ' +' '.join(sys.argv[1:])
with open(os.devnull, 'w') as shutup:
if branch is not None:
sp.check_call(['git','checkout',branch], stdout=shutup, stderr=shutup, cwd=self.gitdir)
sp.check_call(['git','add',target], stdout=shutup, stderr=shutup, cwd=self.gitdir)
res = sp.call(['git','commit','-m', message], stdout=shutup, stderr=shutup, cwd=self.gitdir)
if res == 0:
logger.info('git commit')
else:
raise ValueError('git is not initialized in '+self.gitdir)
def status(self, check_files=False, verbose=False):
lines = []
lines.append(bcolors.BOLD+'papers configuration'+bcolors.ENDC)
if verbose:
lines.append('* configuration file: '+self.file)
lines.append('* cache directory: '+self.cache)
# lines.append('* app data directory: '+self.data)
lines.append('* git-tracked: '+str(self.git))
if self.git:
lines.append('* git directory : '+self.gitdir)
if not os.path.exists(self.filesdir):
status = bcolors.WARNING+' (missing)'+bcolors.ENDC
elif not os.listdir(self.filesdir):
status = bcolors.WARNING+' (empty)'+bcolors.ENDC
elif check_files:
file_count, folder_size = check_filesdir(self.filesdir)
status = bcolors.OKBLUE+" ({} files, {:.1f} MB)".format(file_count, folder_size/(1024*1024.0))+bcolors.ENDC
else:
status = ''
files = self.filesdir
lines.append('* files directory: '+files+status)
if not os.path.exists(self.bibtex):
status = bcolors.WARNING+' (missing)'+bcolors.ENDC
elif check_files:
try:
bibtexstring = open(self.bibtex).read()
db = bibtexparser.loads(bibtexstring)
if len(db.entries):
status = bcolors.OKBLUE+' ({} entries)'.format(len(db.entries))+bcolors.ENDC
else:
status = bcolors.WARNING+' (empty)'+bcolors.ENDC
except:
status = bcolors.FAIL+' (corrupted)'+bcolors.ENDC
elif os.path.getsize(self.bibtex) == 0:
status = bcolors.WARNING+' (empty)'+bcolors.ENDC
else:
status = ''
lines.append('* bibtex: '+self.bibtex+status)
# if verbose:
# collections = self.collections()
# status = bcolors.WARNING+' none'+bcolors.ENDC if not collections else ''
# lines.append('* other collections:'+status)
# for i, nm in enumerate(collections):
# if i > 10:
# lines.append(' '+'({} more collections...)'.format(len(collections)-10))
# break
# status = ' (*)' if nm == self.collection else ''
# lines.append(' '+nm+status)
return '\n'.join(lines)
config = Config()
config.check_install()
def cached(file, hashed_key=False):
file = os.path.join(config.cache, file)
def decorator(fun):
if os.path.exists(file):
cache = json.load(open(file))
else:
cache = {}
def decorated(doi):
if hashed_key: # use hashed parameter as key (for full text query)
if six.PY3:
key = hashlib.sha256(doi.encode('utf-8')).hexdigest()[:6]
else:
key = hashlib.sha256(doi).hexdigest()[:6]
else:
key = doi
if key in cache:
logger.debug('load from cache: '+repr((file, key)))
return cache[key]
else:
res = cache[key] = fun(doi)
if not DRYRUN:
json.dump(cache, open(file,'w'))
return res
return decorated
return decorator
def hash_bytestr_iter(bytesiter, hasher, ashexstr=False):
for block in bytesiter:
hasher.update(block)
return (hasher.hexdigest() if ashexstr else hasher.digest())
def file_as_blockiter(afile, blocksize=65536):
with afile:
block = afile.read(blocksize)
while len(block) > 0:
yield block
block = afile.read(blocksize)
def checksum(fname):
"""memory-efficient check sum (sha256)
source: https://stackoverflow.com/a/3431835/2192272
"""
return hash_bytestr_iter(file_as_blockiter(open(fname, 'rb')), hashlib.sha256())
# move / copy
def move(f1, f2, copy=False, interactive=True):
dirname = os.path.dirname(f2)
if dirname and not os.path.exists(dirname):
logger.info('create directory: '+dirname)
os.makedirs(dirname)
if f1 == f2:
logger.info('dest is identical to src: '+f1)
return
if os.path.exists(f2):
ans = raw_input('dest file already exists: '+f2+'. Replace? (y/n) ')
if ans != 'y':
return
if copy:
cmd = u'cp {} {}'.format(f1, f2)
logger.info(cmd)
if not DRYRUN:
shutil.copy(f1, f2)
else:
cmd = u'mv {} {}'.format(f1, f2)
logger.info(cmd)
if not DRYRUN:
shutil.move(f1, f2)
| perrette/myref | papers/config.py | Python | mit | 8,951 |
import os
import sys
from numerapi.numerapi import NumerAPI
import luigi
class FetchAndExtractData(luigi.Task):
"""
Fetches the most recent dataset and extracts the contents to the given
path if not yet done (default path is ``./data``).
:param: output_path:
(relative) path where the data should be written to. Defaults to
``./data``. Default signature is
``FetchAndExtractData(output_path='./data')``.
::
data
├── numerai_dataset_95
│ ├── example_model.py
│ ├── example_model.r
│ ├── example_predictions.csv
│ ├── numerai_tournament_data.csv
│ └── numerai_training_data.csv
└── numerai_dataset_95.zip
"""
output_path = luigi.Parameter(default='./data/')
def output(self):
"""
Manages the files to be written and determines their existence.
This is determined by checking all the listed files below. If any
of them does not exist, :py:func:`run` is evoked.
:returns:
A ``dict`` with the following keys:
* ``zipfile``: original file as downloaded
(``numerai_dataset_xxx.zip``)
* ``training_data.csv``: the training data
(``numerai_training_data.csv``)
* ``tournament_data.csv``: the tournament data
(``numerai_tournament_data.csv``)
* ``example_predictions.csv``: example predictions
(``example_predictions.csv``)
Note that ``example_model.py`` and ``example_model.r`` are not referenced,
as these are to no use for us.
"""
self.apc = NumerAPI()
current_round = self.apc.get_current_round()
dataset_name = "numerai_dataset_{0}.zip".format(current_round)
dataset_dir = "numerai_dataset_{0}".format(current_round)
assert self.apc.download_current_dataset(dest_path=self.output_path,
dest_filename=dataset_name,
unzip=True)
# see numerapi download_current_dataset
dataset_path = os.path.join(self.output_path, dataset_dir)
test_data_path = os.path.join(dataset_path, 'numerai_training_data.csv')
tournament_data_path = os.path.join(dataset_path,
'numerai_tournament_data.csv')
example_data_path = os.path.join(dataset_path,
'example_predictions.csv')
out = {
'zipfile': luigi.LocalTarget(os.path.join(self.output_path, dataset_name)),
'training_data.csv': luigi.LocalTarget(test_data_path),
'tournament_data.csv': luigi.LocalTarget(tournament_data_path),
'example_predictions.csv': luigi.LocalTarget(example_data_path)
}
print(out)
return out
def run(self):
out = self.output()
| ChristianSch/numerflow | tasks/numerai_fetch_training_data.py | Python | mit | 3,019 |
from pipeline import Pipeline
from phase import simpleAsyncPhase
from component import CountComponent, PassComponent
source = CountComponent(conf={'n':20})
dest = PassComponent()
ph1 = simpleAsyncPhase(source)
ph2 = simpleAsyncPhase(dest)
ph1.set_children([ph2])
pipeline = Pipeline(ph1)
pipeline.run()
import ipdb; ipdb.set_trace() # XXX BREAKPOINT
source = CountComponent(conf={'n':30})
dest = PassComponent()
ph1 = simpleAsyncPhase(source)
ph2 = simpleAsyncPhase(dest)
ph1.set_children([ph2])
pipeline = Pipeline(ph1)
print pipeline._ioloop._stopped
pipeline.run()
| sreedom/rabbit-pipeline | test_driver.py | Python | mit | 573 |
"""
Extract LIWC counts from sliced dialogue file.
"""
from clean_extracted_text import clean_text
from get_LIWC_counts import get_LIWC_counts
from nltk.tokenize import WordPunctTokenizer
import os, re
import pandas as pd
import argparse
from stopwords import get_stopwords
N_SLICES=60
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--sub_file', default='../data/subtitles/subtitlesInTSV/finding_nemo_clean.tsv')
parser.add_argument('--LIWC_dir', default='/hg191/corpora/LIWC/resources/liwc_lexicons/')
args = parser.parse_args()
sub_file = args.sub_file
LIWC_dir = args.LIWC_dir
LIWC_categories = ['positive_affect', 'negative_affect', 'anger', 'death',
'family', 'home', 'humans', 'social',
'percept', 'insight']
stopwords = get_stopwords('en')
LIWC_category_wordlists = {c : [re.compile('^' + l.strip() + '$')
for l in open(os.path.join(LIWC_dir, '%s'%(c)), 'r')
if l.strip() not in stopwords]
for c in LIWC_categories}
# replace positive/negative affect
LIWC_categories += ['positive', 'negative']
LIWC_categories.remove('positive_affect')
LIWC_categories.remove('negative_affect')
LIWC_category_wordlists['positive'] = LIWC_category_wordlists.pop('positive_affect')
LIWC_category_wordlists['negative'] = LIWC_category_wordlists.pop('negative_affect')
TKNZR = WordPunctTokenizer()
full_slice_list = set(range(N_SLICES))
# we count either the total number of tokens
# or the number of unique tokens
# count_option = 'total'
count_option = 'unique'
data = pd.read_csv(sub_file, sep='\t', index_col=False)
data.sort_values('slice', ascending=True)
fname = os.path.basename(sub_file).replace('.tsv', '')
out_dir = os.path.dirname(sub_file)
empty_slices = full_slice_list - set(data['slice'].unique())
if(len(empty_slices) > 0):
print('filling %s with empty slices %s'%
(e_name, empty_slices))
empty_slice_rows = pd.DataFrame(
[{'slice' : c, 'dialogue' : ''}
for c in empty_slices]
)
data = pd.concat([data, empty_slice_rows],
axis=0)
slice_iter = data.groupby('slice')
slice_text = [clean_text(' '.join(map(str, c[1]['dialogue'].tolist())))
for c in slice_iter]
slice_LIWC_counts = {c : [] for c in LIWC_categories}
# also store words cuz yolo
slice_LIWC_words = {c : [] for c in LIWC_categories}
slice_LIWC_count_dicts = {c : [] for c in LIWC_categories}
for t in slice_text:
tokens = TKNZR.tokenize(t)
for c in LIWC_categories:
counts = get_LIWC_counts(tokens, LIWC_words=LIWC_category_wordlists[c])
if(count_option == 'total'):
total_counts = sum(counts.values())
elif(count_option == 'unique'):
total_counts = len(counts)
# TODO: store individual words as well as aggregate counts
slice_LIWC_counts[c].append(total_counts)
slice_words = sorted(counts.keys())
slice_LIWC_words[c].append(' '.join(slice_words))
slice_LIWC_count_dicts[c].append(counts)
slice_LIWC_counts = pd.DataFrame(slice_LIWC_counts)
slice_LIWC_counts['time'] = slice_LIWC_counts.index
counts_fname = os.path.join(out_dir, '%s_LIWC_slice_counts.tsv'%(fname))
slice_LIWC_counts.to_csv(counts_fname, sep='\t', index=None)
slice_LIWC_words = pd.DataFrame(slice_LIWC_words)
slice_LIWC_words['time'] = slice_LIWC_words.index
word_fname = os.path.join(out_dir, '%s_LIWC_slice_words.tsv'%(fname))
slice_LIWC_words.to_csv(word_fname, sep='\t', index=None)
slice_LIWC_count_dicts = pd.DataFrame(slice_LIWC_count_dicts)
slice_LIWC_count_dicts['time'] = slice_LIWC_count_dicts.index
counts_fname = os.path.join(out_dir, '%s_LIWC_slice_token_counts.tsv'%(fname))
# convert dictionaries to string values
def dict_to_str(d):
if(len(d) == 0):
return ''
else:
return ','.join(['%s:%d'%(k,v) for k,v in d.iteritems()])
slice_LIWC_count_dicts[LIWC_categories] = slice_LIWC_count_dicts[LIWC_categories].applymap(dict_to_str)
slice_LIWC_count_dicts.to_csv(counts_fname, sep='\t', index=None)
| fredhohman/a-viz-of-ice-and-fire | scripts/extract_LIWC_from_slices_general.py | Python | mit | 4,435 |
from setuptools import setup, find_packages
setup(
name = "pymuxinator",
version = "0.0.6",
author = "Caleb Mingle",
author_email = "[email protected]",
description = "Tmux session manager",
url = "http://mingle.cm",
packages = find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'pymuxinator = pymuxinator.cli:main',
'mux = pymuxinator.cli:start',
],
},
tests_require=[
"nose>=1.0",
"mock>=1.0",
],
install_requires=[
"pyyaml>=1.0",
"Jinja2>=1.0",
],
test_suite = "nose.collector"
)
| dentafrice/pymuxinator | setup.py | Python | mit | 643 |
import re
rex = re.compile('(\w+)(\d+)\((\w+)\)')
rex_commit = re.compile('c(\d+)')
| saraivaufc/TS-Basico | expressions.py | Python | mit | 85 |
"""
@author: Bryan Silverthorn <[email protected]>
"""
import qy
import qy.llvm as llvm
class Function(qy.Value):
"""
Function in the wrapper language.
"""
def __call__(self, *arguments):
"""
Emit IR for a function call.
"""
# sanity
if len(arguments) != len(self.argument_types):
raise TypeError(
"function %s expects %i arguments but received %i" % (
self._value.name,
len(self.argument_types),
len(arguments),
),
)
# emit the call
arguments = map(qy.value_from_any, arguments)
coerced = [v.cast_to(a) for (v, a) in zip(arguments, self.argument_types)]
return \
qy.Value.from_low(
qy.get().builder.call(
self._value,
[c.low for c in coerced],
),
)
@property
def argument_values(self):
"""
Return the function argument values.
Meaningful only inside the body of this function.
"""
return map(qy.value_from_any, self._value.args)
@property
def argument_types(self):
"""
Return the function argument values.
Meaningful only inside the body of this function.
"""
if self.type_.kind == llvm.TYPE_POINTER:
return self.type_.pointee.args
else:
return self.type_.args
@staticmethod
def named(name, return_type = llvm.Type.void(), argument_types = ()):
"""
Look up or create a named function.
"""
type_ = \
llvm.Type.function(
qy.type_from_any(return_type),
map(qy.type_from_any, argument_types),
)
return Function(qy.get().module.get_or_insert_function(type_, name))
@staticmethod
def get_named(name):
"""
Look up a named function.
"""
return Function(qy.get().module.get_function_named(name))
@staticmethod
def new_named(name, return_type = llvm.Type.void(), argument_types = (), internal = True):
"""
Create a named function.
"""
type_ = \
llvm.Type.function(
qy.type_from_any(return_type),
map(qy.type_from_any, argument_types),
)
function = qy.get().module.add_function(type_, name)
if internal:
function.linkage = llvm.LINKAGE_INTERNAL
return Function(function)
@staticmethod
def define(return_type = llvm.Type.void(), argument_types = (), name = None, internal = True):
"""
Create a named function.
"""
def decorator(emit):
"""
Emit the body of the function.
"""
if name is None:
if emit.__name__ == "_":
function_name = "function"
else:
function_name = emit.__name__
else:
function_name = name
function = Function.new_named(function_name, return_type, argument_types, internal = internal)
entry = function._value.append_basic_block("entry")
with qy.this_builder(llvm.Builder.new(entry)) as builder:
emit(*function.argument_values)
return function
return decorator
@staticmethod
def define_once(return_type = llvm.Type.void(), argument_types = (), name = None, internal = True):
"""
Look up or create a named function.
"""
def decorator(emit):
"""
Look up or emit the function.
"""
if name is None:
if emit.__name__ == "_":
function_name = "function"
else:
function_name = emit.__name__
else:
function_name = name
if function_name in qy.get().module.global_variables:
return Function.get_named(function_name)
else:
define_decorator = \
Function.define(
return_type = return_type,
argument_types = argument_types,
name = name,
internal = internal,
) \
return define_decorator(emit)
return decorator
@staticmethod
def pointed(address, return_type, argument_types):
"""
Return a function from a function pointer.
"""
type_ = \
llvm.Type.function(
qy.type_from_any(return_type),
map(qy.type_from_any, argument_types),
)
return Function(llvm.Constant.int(iptr_type, address).inttoptr(llvm.Type.pointer(type_)))
@staticmethod
def intrinsic(intrinsic_id, qualifiers = ()):
"""
Return an intrinsic function.
"""
qualifiers = map(qy.type_from_any, qualifiers)
return Function(llvm.Function.intrinsic(qy.get().module, intrinsic_id, qualifiers))
| bsilverthorn/qy | src/qy/values/function.py | Python | mit | 5,308 |
# coding: utf-8
# data fixtures for functional tests
from datetime import date, time
import pytest
import marcotti.models.common.enums as enums
import marcotti.models.common.overview as mco
import marcotti.models.common.personnel as mcp
import marcotti.models.club as mc
@pytest.fixture
def comp_data():
return {
'domestic': {
'name': u"English Premier League",
'level': 1,
'country': mco.Countries(name=u"England", confederation=enums.ConfederationType.europe)
},
'international': {
'name': u"FIFA Club World Cup",
'level': 1,
'confederation': enums.ConfederationType.europe
}
}
@pytest.fixture
def season_data():
return {
'start_year': {
'yr': 2012
},
'end_year': {
'yr': 2013
}
}
@pytest.fixture
def venue_data():
england = mco.Countries(name=u"England", confederation=enums.ConfederationType.europe)
tz_london = mco.Timezones(name=u"Europe/London", offset=0.0, confederation=enums.ConfederationType.europe)
return {
"name": u"Emirates Stadium",
"city": u"London",
"country": england,
"timezone": tz_london,
"latitude": 51.555000,
"longitude": -0.108611,
"altitude": 41
}
@pytest.fixture
def venue_config():
return {
"date": date(2006, 7, 22),
"length": 105,
"width": 68,
"capacity": 60361,
"seats": 60361,
"surface": mco.Surfaces(description=u"Desso GrassMaster", type=enums.SurfaceType.hybrid)
}
@pytest.fixture
def person_data():
return {
'generic': {
'first_name': u"John",
'last_name': u"Doe",
'birth_date': date(1980, 1, 1),
'country': mco.Countries(name=u"Portlandia", confederation=enums.ConfederationType.north_america)
},
'manager': [
{
'first_name': u"Arsène",
'last_name': u"Wenger",
'birth_date': date(1949, 10, 22),
'country': mco.Countries(name=u"France", confederation=enums.ConfederationType.europe)
},
{
'first_name': u"Arthur",
'middle_name': u"Antunes",
'last_name': u"Coimbra",
'nick_name': u"Zico",
'birth_date': date(1953, 3, 3),
'country': mco.Countries(name=u"Brazil", confederation=enums.ConfederationType.south_america)
}
],
'player': [
{
'first_name': u'Miguel',
'middle_name': u'Ángel',
'last_name': u'Ponce',
'second_last_name': u'Briseño',
'birth_date': date(1989, 4, 12),
'country': mco.Countries(name=u"Mexico", confederation=enums.ConfederationType.north_america),
'order': enums.NameOrderType.middle
},
{
'first_name': u"Cristiano",
'middle_name': u"Ronaldo",
'last_name': u"Aveiro",
'second_last_name': u"dos Santos",
'nick_name': u"Cristiano Ronaldo",
'birth_date': date(1985, 2, 5),
'country': mco.Countries(name=u"Portugal", confederation=enums.ConfederationType.europe),
'order': enums.NameOrderType.western
},
{
'first_name': u'Heung-Min',
'last_name': u'Son',
'birth_date': date(1992, 7, 8),
'country': mco.Countries(name=u"Korea Republic", confederation=enums.ConfederationType.asia),
'order': enums.NameOrderType.eastern
}
],
'referee': [
{
'first_name': u"Christopher",
'middle_name': u"J",
'last_name': u"Foy",
'birth_date': date(1962, 11, 20),
'country': mco.Countries(name=u"England", confederation=enums.ConfederationType.europe)
},
{
'first_name': u"Cüneyt",
'last_name': u"Çakır",
'birth_date': date(1976, 11, 23),
'country': mco.Countries(name=u"Turkey", confederation=enums.ConfederationType.europe)
}
]
}
@pytest.fixture
def position_data():
return [
mcp.Positions(name=u"Left back", type=enums.PositionType.defender),
mcp.Positions(name=u"Forward", type=enums.PositionType.forward),
mcp.Positions(name=u"Second striker", type=enums.PositionType.forward)
]
@pytest.fixture
def player_history_data():
return [
{
'date': date(1996, 1, 1),
'height': 1.70,
'weight': 70
},
{
'date': date(1998, 7, 15),
'height': 1.74,
'weight': 76
},
{
'date': date(2001, 3, 11),
'height': 1.76,
'weight': 80
}
]
@pytest.fixture
def match_condition_data():
return {
'kickoff_time': time(19, 30),
'kickoff_temp': 15.0,
'kickoff_humidity': 68.0,
'kickoff_weather': enums.WeatherConditionType.partly_cloudy,
'halftime_weather': enums.WeatherConditionType.clear,
'fulltime_weather': enums.WeatherConditionType.windy_clear
}
@pytest.fixture
def match_data(comp_data, season_data, venue_data, person_data):
return {
"date": date(2012, 12, 12),
"competition": mco.DomesticCompetitions(**comp_data['domestic']),
"season": mco.Seasons(**{k: mco.Years(**v) for k, v in season_data.items()}),
"venue": mco.Venues(**venue_data),
"home_manager": mcp.Managers(**person_data['manager'][0]),
"away_manager": mcp.Managers(**person_data['manager'][1]),
"referee": mcp.Referees(**person_data['referee'][0])
}
@pytest.fixture
def club_data():
england = mco.Countries(name=u"England", confederation=enums.ConfederationType.europe)
france = mco.Countries(name=u"France", confederation=enums.ConfederationType.europe)
tz_london = mco.Timezones(name=u"Europe/London", offset=0.0, confederation=enums.ConfederationType.europe)
return {
'date': date(2015, 1, 1),
'competition': mco.DomesticCompetitions(name=u'Test Competition', level=1, country=england),
'season': mco.Seasons(start_year=mco.Years(yr=2014), end_year=mco.Years(yr=2015)),
'venue': mco.Venues(name=u"Emirates Stadium", city=u"London", country=england, timezone=tz_london),
'home_team': mc.Clubs(name=u"Arsenal FC", country=england),
'away_team': mc.Clubs(name=u"Lincoln City FC", country=england),
'home_manager': mcp.Managers(first_name=u"Arsène", last_name=u"Wenger",
birth_date=date(1949, 10, 22), country=france),
'away_manager': mcp.Managers(first_name=u"Gary", last_name=u"Simpson",
birth_date=date(1961, 4, 11), country=england),
'referee': mcp.Referees(first_name=u"Mark", last_name=u"Clattenburg",
birth_date=date(1975, 3, 13), country=england)
}
@pytest.fixture
def national_data():
mexico = mco.Countries(name=u"Mexico", confederation=enums.ConfederationType.north_america)
england = mco.Countries(name=u"England", confederation=enums.ConfederationType.europe)
france = mco.Countries(name=u"France", confederation=enums.ConfederationType.europe)
italy = mco.Countries(name=u"Italy", confederation=enums.ConfederationType.europe)
tz_london = mco.Timezones(name=u"Europe/London", offset=0.0, confederation=enums.ConfederationType.europe)
return {
'date': date(1997, 11, 12),
'competition': mco.InternationalCompetitions(name=u"International Cup", level=1,
confederation=enums.ConfederationType.fifa),
'season': mco.Seasons(start_year=mco.Years(yr=1997), end_year=mco.Years(yr=1998)),
'venue': mco.Venues(name=u"Emirates Stadium", city=u"London", country=england, timezone=tz_london),
'home_team': france,
'away_team': mexico,
'home_manager': mcp.Managers(first_name=u"Arsène", last_name=u"Wenger",
birth_date=date(1949, 10, 22), country=france),
'away_manager': mcp.Managers(first_name=u"Gary", last_name=u"Simpson",
birth_date=date(1961, 4, 11), country=england),
'referee': mcp.Referees(first_name=u"Pierluigi", last_name=u"Collina",
birth_date=date(1960, 2, 13), country=italy)
}
| soccermetrics/marcotti | tests/conftest.py | Python | mit | 8,790 |
import sys
import recipes
from ui.qt.init import start_application
if __name__ == "__main__":
recipes.read()
start_application()
| zachcheatham/Minecraft-Resource-Calculator | python/MRC_QT.py | Python | mit | 147 |
# package
#-*- coding: UTF-8 -*-
from sqlalchemy.orm import (
scoped_session,
sessionmaker,
)
from zope.sqlalchemy import ZopeTransactionExtension
DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension()))
import infosys.models.base
#import .fias
import infosys.models.organization
#import .classifiers
| StasMerzlyakov/InfoSys | infosys/models/__init__.py | Python | mit | 347 |
# -------------------------------------------------------------
# Panoptes Marking Export Script
#
# This script extracts individual markings from Zooniverse
# Panoptes classification data export CSV. This script is
# configured to export circular marker info for classifications
# collected only for the latest workflow version.
#
# Customizations are set for use with the following project:
# planet-9-rogue-worlds
#
# Column names, annotation info, and marking task ID may need
# be altered for this script to work for data exports from
# other projects.
#
# Written by: Cliff Johnson ([email protected])
# Last Edited: 10 January 2017
# Based on scripts by Brooke Simmons
# -------------------------------------------------------------
#Python 3.5.1
import sys
try:
classfile_in = sys.argv[1]
markfile_out = sys.argv[2]
except:
print("\nUsage: "+sys.argv[0]+" classifications_infile markings_outfile")
print(" classifications_infile: a Zooniverse (Panoptes) classifications data export CSV.")
print(" markings_outfile: a CSV file with marking information from classifications.")
print("\nExample: "+sys.argv[0]+" ap-aas229-test-classifications.csv ap-aas229-test-markings.csv")
sys.exit(0)
#classfile_in = 'ap-aas229-test-classifications.csv'
#markfile_out = 'ap-aas229-test-markings.csv'
import pandas as pd
import json
# Read in classification CSV and expand JSON fields
classifications = pd.read_csv(classfile_in)
classifications['metadata_json'] = [json.loads(q) for q in classifications.metadata]
classifications['annotations_json'] = [json.loads(q) for q in classifications.annotations]
classifications['subject_data_json'] = [json.loads(q) for q in classifications.subject_data]
# Calculate number of markings per classification
# Note: index of annotations_json ("q" here) corresponds to task number (i.e., 0)
classifications['n_markings'] = [ len(q[0]['value']) for q in classifications.annotations_json ]
### Classification Selection / CURRENT SETTING: most recent workflow version
# OPTION 1: Select only classifications from most recent workflow version
iclass = classifications[classifications.workflow_version == classifications['workflow_version'].max()]
# OPTION 2: Select most/all valid classifications using workflow_id and workflow_version
#iclass = classifications[(classifications['workflow_id'] == 1687) & (classifications['workflow_version'] > 40)]
# Output markings from classifications in iclass to new list of dictionaries (prep for pandas dataframe)
# Applicable for workflows with marking task as first task, and outputs data for circular markers (x,y,r)
clist=[]
for index, c in iclass.iterrows():
if c['n_markings'] > 0:
# Note: index of annotations_json corresponds to task number (i.e., 0)
for q in c.annotations_json[0]['value']:
# OPTIONAL EXPANSION: could use if statement here to split marker types
clist.append({'classification_id':c.classification_id, 'user_name':c.user_name, 'user_id':c.user_id,
'created_at':c.created_at, 'subject_ids':c.subject_ids, 'tool':q['tool'],
'tool_label':q['tool_label'], 'x':q['x'], 'y':q['y'], 'frame':q['frame']})
# Output list of dictionaries to pandas dataframe and export to CSV.
col_order=['classification_id','user_name','user_id','created_at','subject_ids',
'tool','tool_label','x','y','frame']
out=pd.DataFrame(clist)[col_order]
out.to_csv(markfile_out,index_label='mark_id')
| lcjohnso/Panoptes_MarkingExport | zoomarks_csvextract_planet9.py | Python | mit | 3,537 |
import unittest
from biokbase.narrative.exception_util import (
NarrativeException,
transform_job_exception,
)
from biokbase.execution_engine2.baseclient import ServerError as EEServerError
from biokbase.userandjobstate.baseclient import ServerError as UJSServerError
from requests.exceptions import HTTPError
import requests
class ExceptionUtilTestCase(unittest.TestCase):
def test_transform_njs_err(self):
code = 1000
message = "some error message"
name = "EEError"
njs_err = EEServerError(name, code, message)
nar_err = transform_job_exception(njs_err)
self.assertEqual(nar_err.code, code)
self.assertEqual(nar_err.message, message)
self.assertEqual(nar_err.name, name)
self.assertEqual(nar_err.source, "njs")
def test_transform_ujs_err(self):
code = 1000
message = "some error message"
name = "UJSError"
ujs_err = UJSServerError(name, code, message)
nar_err = transform_job_exception(ujs_err)
self.assertEqual(nar_err.code, code)
self.assertEqual(nar_err.message, message)
self.assertEqual(nar_err.name, name)
self.assertEqual(nar_err.source, "ujs")
def test_transform_http_err_unavailable(self):
codes = [404, 502, 503]
message = "A KBase service is currently unavailable."
name = "HTTPError"
for c in codes:
res = requests.Response()
res.status_code = c
err = HTTPError("http error", response=res)
nar_err = transform_job_exception(err)
self.assertEqual(nar_err.code, c)
self.assertEqual(nar_err.message, message)
self.assertEqual(nar_err.name, name)
self.assertEqual(nar_err.source, "network")
def test_transform_http_err_timeout(self):
codes = [504, 598, 599]
message = "There was a temporary network connection error."
name = "HTTPError"
for c in codes:
res = requests.Response()
res.status_code = c
err = HTTPError("http error", response=res)
nar_err = transform_job_exception(err)
self.assertEqual(nar_err.code, c)
self.assertEqual(nar_err.message, message)
self.assertEqual(nar_err.name, name)
self.assertEqual(nar_err.source, "network")
def test_transform_http_err_internal(self):
code = 500
message = "An internal error occurred in the KBase service."
name = "HTTPError"
res = requests.Response()
res.status_code = code
err = HTTPError("http error", response=res)
nar_err = transform_job_exception(err)
self.assertEqual(nar_err.code, code)
self.assertEqual(nar_err.message, message)
self.assertEqual(nar_err.name, name)
self.assertEqual(nar_err.source, "network")
def test_transform_http_err_unknown(self):
code = 666
message = "An untracked error occurred."
name = "HTTPError"
res = requests.Response()
res.status_code = code
err = HTTPError("http error", response=res)
nar_err = transform_job_exception(err)
self.assertEqual(nar_err.code, code)
self.assertEqual(nar_err.message, message)
self.assertEqual(nar_err.name, name)
self.assertEqual(nar_err.source, "network")
| briehl/narrative | src/biokbase/narrative/tests/test_exception_util.py | Python | mit | 3,400 |
Subsets and Splits